r4kcache.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Inline assembly cache operations.
  7. *
  8. * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9. * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
  10. * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
  11. */
  12. #ifndef _ASM_R4KCACHE_H
  13. #define _ASM_R4KCACHE_H
  14. #include <asm/asm.h>
  15. #include <asm/cacheops.h>
  16. #include <asm/cpu-features.h>
  17. #include <asm/cpu-type.h>
  18. #include <asm/mipsmtregs.h>
  19. #include <asm/uaccess.h> /* for segment_eq() */
  20. /*
  21. * This macro return a properly sign-extended address suitable as base address
  22. * for indexed cache operations. Two issues here:
  23. *
  24. * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
  25. * the index bits from the virtual address. This breaks with tradition
  26. * set by the R4000. To keep unpleasant surprises from happening we pick
  27. * an address in KSEG0 / CKSEG0.
  28. * - We need a properly sign extended address for 64-bit code. To get away
  29. * without ifdefs we let the compiler do it by a type cast.
  30. */
  31. #define INDEX_BASE CKSEG0
  32. #define cache_op(op,addr) \
  33. __asm__ __volatile__( \
  34. " .set push \n" \
  35. " .set noreorder \n" \
  36. " .set arch=r4000 \n" \
  37. " cache %0, %1 \n" \
  38. " .set pop \n" \
  39. : \
  40. : "i" (op), "R" (*(unsigned char *)(addr)))
  41. #ifdef CONFIG_MIPS_MT
  42. /*
  43. * Optionally force single-threaded execution during I-cache flushes.
  44. */
  45. #define PROTECT_CACHE_FLUSHES 1
  46. #ifdef PROTECT_CACHE_FLUSHES
  47. extern int mt_protiflush;
  48. extern int mt_protdflush;
  49. extern void mt_cflush_lockdown(void);
  50. extern void mt_cflush_release(void);
  51. #define BEGIN_MT_IPROT \
  52. unsigned long flags = 0; \
  53. unsigned long mtflags = 0; \
  54. if(mt_protiflush) { \
  55. local_irq_save(flags); \
  56. ehb(); \
  57. mtflags = dvpe(); \
  58. mt_cflush_lockdown(); \
  59. }
  60. #define END_MT_IPROT \
  61. if(mt_protiflush) { \
  62. mt_cflush_release(); \
  63. evpe(mtflags); \
  64. local_irq_restore(flags); \
  65. }
  66. #define BEGIN_MT_DPROT \
  67. unsigned long flags = 0; \
  68. unsigned long mtflags = 0; \
  69. if(mt_protdflush) { \
  70. local_irq_save(flags); \
  71. ehb(); \
  72. mtflags = dvpe(); \
  73. mt_cflush_lockdown(); \
  74. }
  75. #define END_MT_DPROT \
  76. if(mt_protdflush) { \
  77. mt_cflush_release(); \
  78. evpe(mtflags); \
  79. local_irq_restore(flags); \
  80. }
  81. #else
  82. #define BEGIN_MT_IPROT
  83. #define BEGIN_MT_DPROT
  84. #define END_MT_IPROT
  85. #define END_MT_DPROT
  86. #endif /* PROTECT_CACHE_FLUSHES */
  87. #define __iflush_prologue \
  88. unsigned long redundance; \
  89. extern int mt_n_iflushes; \
  90. BEGIN_MT_IPROT \
  91. for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
  92. #define __iflush_epilogue \
  93. END_MT_IPROT \
  94. }
  95. #define __dflush_prologue \
  96. unsigned long redundance; \
  97. extern int mt_n_dflushes; \
  98. BEGIN_MT_DPROT \
  99. for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
  100. #define __dflush_epilogue \
  101. END_MT_DPROT \
  102. }
  103. #define __inv_dflush_prologue __dflush_prologue
  104. #define __inv_dflush_epilogue __dflush_epilogue
  105. #define __sflush_prologue {
  106. #define __sflush_epilogue }
  107. #define __inv_sflush_prologue __sflush_prologue
  108. #define __inv_sflush_epilogue __sflush_epilogue
  109. #else /* CONFIG_MIPS_MT */
  110. #define __iflush_prologue {
  111. #define __iflush_epilogue }
  112. #define __dflush_prologue {
  113. #define __dflush_epilogue }
  114. #define __inv_dflush_prologue {
  115. #define __inv_dflush_epilogue }
  116. #define __sflush_prologue {
  117. #define __sflush_epilogue }
  118. #define __inv_sflush_prologue {
  119. #define __inv_sflush_epilogue }
  120. #endif /* CONFIG_MIPS_MT */
  121. static inline void flush_icache_line_indexed(unsigned long addr)
  122. {
  123. __iflush_prologue
  124. cache_op(Index_Invalidate_I, addr);
  125. __iflush_epilogue
  126. }
  127. static inline void flush_dcache_line_indexed(unsigned long addr)
  128. {
  129. __dflush_prologue
  130. cache_op(Index_Writeback_Inv_D, addr);
  131. __dflush_epilogue
  132. }
  133. static inline void flush_scache_line_indexed(unsigned long addr)
  134. {
  135. cache_op(Index_Writeback_Inv_SD, addr);
  136. }
  137. static inline void flush_icache_line(unsigned long addr)
  138. {
  139. __iflush_prologue
  140. switch (boot_cpu_type()) {
  141. case CPU_LOONGSON2:
  142. cache_op(Hit_Invalidate_I_Loongson2, addr);
  143. break;
  144. default:
  145. cache_op(Hit_Invalidate_I, addr);
  146. break;
  147. }
  148. __iflush_epilogue
  149. }
  150. static inline void flush_dcache_line(unsigned long addr)
  151. {
  152. __dflush_prologue
  153. cache_op(Hit_Writeback_Inv_D, addr);
  154. __dflush_epilogue
  155. }
  156. static inline void invalidate_dcache_line(unsigned long addr)
  157. {
  158. __dflush_prologue
  159. cache_op(Hit_Invalidate_D, addr);
  160. __dflush_epilogue
  161. }
  162. static inline void invalidate_scache_line(unsigned long addr)
  163. {
  164. cache_op(Hit_Invalidate_SD, addr);
  165. }
  166. static inline void flush_scache_line(unsigned long addr)
  167. {
  168. cache_op(Hit_Writeback_Inv_SD, addr);
  169. }
  170. #define protected_cache_op(op,addr) \
  171. __asm__ __volatile__( \
  172. " .set push \n" \
  173. " .set noreorder \n" \
  174. " .set arch=r4000 \n" \
  175. "1: cache %0, (%1) \n" \
  176. "2: .set pop \n" \
  177. " .section __ex_table,\"a\" \n" \
  178. " "STR(PTR)" 1b, 2b \n" \
  179. " .previous" \
  180. : \
  181. : "i" (op), "r" (addr))
  182. #define protected_cachee_op(op,addr) \
  183. __asm__ __volatile__( \
  184. " .set push \n" \
  185. " .set noreorder \n" \
  186. " .set mips0 \n" \
  187. " .set eva \n" \
  188. "1: cachee %0, (%1) \n" \
  189. "2: .set pop \n" \
  190. " .section __ex_table,\"a\" \n" \
  191. " "STR(PTR)" 1b, 2b \n" \
  192. " .previous" \
  193. : \
  194. : "i" (op), "r" (addr))
  195. /*
  196. * The next two are for badland addresses like signal trampolines.
  197. */
  198. static inline void protected_flush_icache_line(unsigned long addr)
  199. {
  200. switch (boot_cpu_type()) {
  201. case CPU_LOONGSON2:
  202. protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
  203. break;
  204. default:
  205. #ifdef CONFIG_EVA
  206. protected_cachee_op(Hit_Invalidate_I, addr);
  207. #else
  208. protected_cache_op(Hit_Invalidate_I, addr);
  209. #endif
  210. break;
  211. }
  212. }
  213. /*
  214. * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
  215. * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
  216. * caches. We're talking about one cacheline unnecessarily getting invalidated
  217. * here so the penalty isn't overly hard.
  218. */
  219. static inline void protected_writeback_dcache_line(unsigned long addr)
  220. {
  221. protected_cache_op(Hit_Writeback_Inv_D, addr);
  222. }
  223. static inline void protected_writeback_scache_line(unsigned long addr)
  224. {
  225. protected_cache_op(Hit_Writeback_Inv_SD, addr);
  226. }
  227. /*
  228. * This one is RM7000-specific
  229. */
  230. static inline void invalidate_tcache_page(unsigned long addr)
  231. {
  232. cache_op(Page_Invalidate_T, addr);
  233. }
  234. #define cache16_unroll32(base,op) \
  235. __asm__ __volatile__( \
  236. " .set push \n" \
  237. " .set noreorder \n" \
  238. " .set mips3 \n" \
  239. " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
  240. " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
  241. " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
  242. " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
  243. " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
  244. " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
  245. " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
  246. " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
  247. " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
  248. " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
  249. " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
  250. " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
  251. " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
  252. " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
  253. " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
  254. " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
  255. " .set pop \n" \
  256. : \
  257. : "r" (base), \
  258. "i" (op));
  259. #define cache32_unroll32(base,op) \
  260. __asm__ __volatile__( \
  261. " .set push \n" \
  262. " .set noreorder \n" \
  263. " .set mips3 \n" \
  264. " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
  265. " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
  266. " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
  267. " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
  268. " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
  269. " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
  270. " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
  271. " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
  272. " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
  273. " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
  274. " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
  275. " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
  276. " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
  277. " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
  278. " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
  279. " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
  280. " .set pop \n" \
  281. : \
  282. : "r" (base), \
  283. "i" (op));
  284. #define cache64_unroll32(base,op) \
  285. __asm__ __volatile__( \
  286. " .set push \n" \
  287. " .set noreorder \n" \
  288. " .set mips3 \n" \
  289. " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
  290. " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
  291. " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
  292. " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
  293. " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
  294. " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
  295. " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
  296. " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
  297. " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
  298. " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
  299. " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
  300. " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
  301. " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
  302. " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
  303. " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
  304. " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
  305. " .set pop \n" \
  306. : \
  307. : "r" (base), \
  308. "i" (op));
  309. #define cache128_unroll32(base,op) \
  310. __asm__ __volatile__( \
  311. " .set push \n" \
  312. " .set noreorder \n" \
  313. " .set mips3 \n" \
  314. " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
  315. " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
  316. " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
  317. " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
  318. " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
  319. " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
  320. " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
  321. " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
  322. " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
  323. " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
  324. " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
  325. " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
  326. " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
  327. " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
  328. " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
  329. " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
  330. " .set pop \n" \
  331. : \
  332. : "r" (base), \
  333. "i" (op));
  334. /*
  335. * Perform the cache operation specified by op using a user mode virtual
  336. * address while in kernel mode.
  337. */
  338. #define cache16_unroll32_user(base,op) \
  339. __asm__ __volatile__( \
  340. " .set push \n" \
  341. " .set noreorder \n" \
  342. " .set mips0 \n" \
  343. " .set eva \n" \
  344. " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
  345. " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
  346. " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
  347. " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
  348. " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
  349. " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
  350. " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
  351. " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
  352. " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
  353. " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
  354. " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
  355. " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
  356. " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
  357. " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
  358. " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
  359. " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
  360. " .set pop \n" \
  361. : \
  362. : "r" (base), \
  363. "i" (op));
  364. #define cache32_unroll32_user(base, op) \
  365. __asm__ __volatile__( \
  366. " .set push \n" \
  367. " .set noreorder \n" \
  368. " .set mips0 \n" \
  369. " .set eva \n" \
  370. " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
  371. " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
  372. " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
  373. " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
  374. " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
  375. " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
  376. " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
  377. " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
  378. " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
  379. " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
  380. " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
  381. " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
  382. " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
  383. " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
  384. " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
  385. " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
  386. " .set pop \n" \
  387. : \
  388. : "r" (base), \
  389. "i" (op));
  390. #define cache64_unroll32_user(base, op) \
  391. __asm__ __volatile__( \
  392. " .set push \n" \
  393. " .set noreorder \n" \
  394. " .set mips0 \n" \
  395. " .set eva \n" \
  396. " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
  397. " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
  398. " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
  399. " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
  400. " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
  401. " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
  402. " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
  403. " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
  404. " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
  405. " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
  406. " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
  407. " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
  408. " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
  409. " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
  410. " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
  411. " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
  412. " .set pop \n" \
  413. : \
  414. : "r" (base), \
  415. "i" (op));
  416. /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
  417. #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
  418. static inline void extra##blast_##pfx##cache##lsize(void) \
  419. { \
  420. unsigned long start = INDEX_BASE; \
  421. unsigned long end = start + current_cpu_data.desc.waysize; \
  422. unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
  423. unsigned long ws_end = current_cpu_data.desc.ways << \
  424. current_cpu_data.desc.waybit; \
  425. unsigned long ws, addr; \
  426. \
  427. __##pfx##flush_prologue \
  428. \
  429. for (ws = 0; ws < ws_end; ws += ws_inc) \
  430. for (addr = start; addr < end; addr += lsize * 32) \
  431. cache##lsize##_unroll32(addr|ws, indexop); \
  432. \
  433. __##pfx##flush_epilogue \
  434. } \
  435. \
  436. static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
  437. { \
  438. unsigned long start = page; \
  439. unsigned long end = page + PAGE_SIZE; \
  440. \
  441. __##pfx##flush_prologue \
  442. \
  443. do { \
  444. cache##lsize##_unroll32(start, hitop); \
  445. start += lsize * 32; \
  446. } while (start < end); \
  447. \
  448. __##pfx##flush_epilogue \
  449. } \
  450. \
  451. static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
  452. { \
  453. unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
  454. unsigned long start = INDEX_BASE + (page & indexmask); \
  455. unsigned long end = start + PAGE_SIZE; \
  456. unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
  457. unsigned long ws_end = current_cpu_data.desc.ways << \
  458. current_cpu_data.desc.waybit; \
  459. unsigned long ws, addr; \
  460. \
  461. __##pfx##flush_prologue \
  462. \
  463. for (ws = 0; ws < ws_end; ws += ws_inc) \
  464. for (addr = start; addr < end; addr += lsize * 32) \
  465. cache##lsize##_unroll32(addr|ws, indexop); \
  466. \
  467. __##pfx##flush_epilogue \
  468. }
  469. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
  470. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
  471. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
  472. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
  473. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
  474. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
  475. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
  476. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
  477. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
  478. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
  479. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
  480. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
  481. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
  482. __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
  483. __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
  484. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
  485. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
  486. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
  487. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
  488. #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
  489. static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
  490. { \
  491. unsigned long start = page; \
  492. unsigned long end = page + PAGE_SIZE; \
  493. \
  494. __##pfx##flush_prologue \
  495. \
  496. do { \
  497. cache##lsize##_unroll32_user(start, hitop); \
  498. start += lsize * 32; \
  499. } while (start < end); \
  500. \
  501. __##pfx##flush_epilogue \
  502. }
  503. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  504. 16)
  505. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
  506. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  507. 32)
  508. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
  509. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  510. 64)
  511. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
  512. /* build blast_xxx_range, protected_blast_xxx_range */
  513. #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
  514. static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
  515. unsigned long end) \
  516. { \
  517. unsigned long lsize = cpu_##desc##_line_size(); \
  518. unsigned long addr = start & ~(lsize - 1); \
  519. unsigned long aend = (end - 1) & ~(lsize - 1); \
  520. \
  521. __##pfx##flush_prologue \
  522. \
  523. while (1) { \
  524. prot##cache_op(hitop, addr); \
  525. if (addr == aend) \
  526. break; \
  527. addr += lsize; \
  528. } \
  529. \
  530. __##pfx##flush_epilogue \
  531. }
  532. #ifndef CONFIG_EVA
  533. __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
  534. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
  535. #else
  536. #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
  537. static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
  538. unsigned long end) \
  539. { \
  540. unsigned long lsize = cpu_##desc##_line_size(); \
  541. unsigned long addr = start & ~(lsize - 1); \
  542. unsigned long aend = (end - 1) & ~(lsize - 1); \
  543. \
  544. __##pfx##flush_prologue \
  545. \
  546. if (segment_eq(get_fs(), USER_DS)) { \
  547. while (1) { \
  548. protected_cachee_op(hitop, addr); \
  549. if (addr == aend) \
  550. break; \
  551. addr += lsize; \
  552. } \
  553. } else { \
  554. while (1) { \
  555. protected_cache_op(hitop, addr); \
  556. if (addr == aend) \
  557. break; \
  558. addr += lsize; \
  559. } \
  560. \
  561. } \
  562. __##pfx##flush_epilogue \
  563. }
  564. __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
  565. __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
  566. #endif
  567. __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
  568. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
  569. protected_, loongson2_)
  570. __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
  571. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
  572. __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
  573. /* blast_inv_dcache_range */
  574. __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
  575. __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
  576. #endif /* _ASM_R4KCACHE_H */