c-r4k.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7. * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. */
  10. #include <linux/cpu_pm.h>
  11. #include <linux/hardirq.h>
  12. #include <linux/init.h>
  13. #include <linux/highmem.h>
  14. #include <linux/kernel.h>
  15. #include <linux/linkage.h>
  16. #include <linux/preempt.h>
  17. #include <linux/sched.h>
  18. #include <linux/smp.h>
  19. #include <linux/mm.h>
  20. #include <linux/module.h>
  21. #include <linux/bitops.h>
  22. #include <asm/bcache.h>
  23. #include <asm/bootinfo.h>
  24. #include <asm/cache.h>
  25. #include <asm/cacheops.h>
  26. #include <asm/cpu.h>
  27. #include <asm/cpu-features.h>
  28. #include <asm/cpu-type.h>
  29. #include <asm/io.h>
  30. #include <asm/page.h>
  31. #include <asm/pgtable.h>
  32. #include <asm/r4kcache.h>
  33. #include <asm/sections.h>
  34. #include <asm/mmu_context.h>
  35. #include <asm/war.h>
  36. #include <asm/cacheflush.h> /* for run_uncached() */
  37. #include <asm/traps.h>
  38. #include <asm/dma-coherence.h>
  39. #include <asm/mips-cm.h>
  40. /*
  41. * Special Variant of smp_call_function for use by cache functions:
  42. *
  43. * o No return value
  44. * o collapses to normal function call on UP kernels
  45. * o collapses to normal function call on systems with a single shared
  46. * primary cache.
  47. * o doesn't disable interrupts on the local CPU
  48. */
  49. static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
  50. {
  51. preempt_disable();
  52. /*
  53. * The Coherent Manager propagates address-based cache ops to other
  54. * cores but not index-based ops. However, r4k_on_each_cpu is used
  55. * in both cases so there is no easy way to tell what kind of op is
  56. * executed to the other cores. The best we can probably do is
  57. * to restrict that call when a CM is not present because both
  58. * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
  59. */
  60. if (!mips_cm_present())
  61. smp_call_function_many(&cpu_foreign_map, func, info, 1);
  62. func(info);
  63. preempt_enable();
  64. }
  65. #if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
  66. #define cpu_has_safe_index_cacheops 0
  67. #else
  68. #define cpu_has_safe_index_cacheops 1
  69. #endif
  70. /*
  71. * Must die.
  72. */
  73. static unsigned long icache_size __read_mostly;
  74. static unsigned long dcache_size __read_mostly;
  75. static unsigned long scache_size __read_mostly;
  76. /*
  77. * Dummy cache handling routines for machines without boardcaches
  78. */
  79. static void cache_noop(void) {}
  80. static struct bcache_ops no_sc_ops = {
  81. .bc_enable = (void *)cache_noop,
  82. .bc_disable = (void *)cache_noop,
  83. .bc_wback_inv = (void *)cache_noop,
  84. .bc_inv = (void *)cache_noop
  85. };
  86. struct bcache_ops *bcops = &no_sc_ops;
  87. #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  88. #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  89. #define R4600_HIT_CACHEOP_WAR_IMPL \
  90. do { \
  91. if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
  92. *(volatile unsigned long *)CKSEG1; \
  93. if (R4600_V1_HIT_CACHEOP_WAR) \
  94. __asm__ __volatile__("nop;nop;nop;nop"); \
  95. } while (0)
  96. static void (*r4k_blast_dcache_page)(unsigned long addr);
  97. static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  98. {
  99. R4600_HIT_CACHEOP_WAR_IMPL;
  100. blast_dcache32_page(addr);
  101. }
  102. static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
  103. {
  104. blast_dcache64_page(addr);
  105. }
  106. static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
  107. {
  108. blast_dcache128_page(addr);
  109. }
  110. static void r4k_blast_dcache_page_setup(void)
  111. {
  112. unsigned long dc_lsize = cpu_dcache_line_size();
  113. switch (dc_lsize) {
  114. case 0:
  115. r4k_blast_dcache_page = (void *)cache_noop;
  116. break;
  117. case 16:
  118. r4k_blast_dcache_page = blast_dcache16_page;
  119. break;
  120. case 32:
  121. r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
  122. break;
  123. case 64:
  124. r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
  125. break;
  126. case 128:
  127. r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
  128. break;
  129. default:
  130. break;
  131. }
  132. }
  133. #ifndef CONFIG_EVA
  134. #define r4k_blast_dcache_user_page r4k_blast_dcache_page
  135. #else
  136. static void (*r4k_blast_dcache_user_page)(unsigned long addr);
  137. static void r4k_blast_dcache_user_page_setup(void)
  138. {
  139. unsigned long dc_lsize = cpu_dcache_line_size();
  140. if (dc_lsize == 0)
  141. r4k_blast_dcache_user_page = (void *)cache_noop;
  142. else if (dc_lsize == 16)
  143. r4k_blast_dcache_user_page = blast_dcache16_user_page;
  144. else if (dc_lsize == 32)
  145. r4k_blast_dcache_user_page = blast_dcache32_user_page;
  146. else if (dc_lsize == 64)
  147. r4k_blast_dcache_user_page = blast_dcache64_user_page;
  148. }
  149. #endif
  150. static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
  151. static void r4k_blast_dcache_page_indexed_setup(void)
  152. {
  153. unsigned long dc_lsize = cpu_dcache_line_size();
  154. if (dc_lsize == 0)
  155. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  156. else if (dc_lsize == 16)
  157. r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
  158. else if (dc_lsize == 32)
  159. r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
  160. else if (dc_lsize == 64)
  161. r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
  162. else if (dc_lsize == 128)
  163. r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
  164. }
  165. void (* r4k_blast_dcache)(void);
  166. EXPORT_SYMBOL(r4k_blast_dcache);
  167. static void r4k_blast_dcache_setup(void)
  168. {
  169. unsigned long dc_lsize = cpu_dcache_line_size();
  170. if (dc_lsize == 0)
  171. r4k_blast_dcache = (void *)cache_noop;
  172. else if (dc_lsize == 16)
  173. r4k_blast_dcache = blast_dcache16;
  174. else if (dc_lsize == 32)
  175. r4k_blast_dcache = blast_dcache32;
  176. else if (dc_lsize == 64)
  177. r4k_blast_dcache = blast_dcache64;
  178. else if (dc_lsize == 128)
  179. r4k_blast_dcache = blast_dcache128;
  180. }
  181. /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
  182. #define JUMP_TO_ALIGN(order) \
  183. __asm__ __volatile__( \
  184. "b\t1f\n\t" \
  185. ".align\t" #order "\n\t" \
  186. "1:\n\t" \
  187. )
  188. #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
  189. #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
  190. static inline void blast_r4600_v1_icache32(void)
  191. {
  192. unsigned long flags;
  193. local_irq_save(flags);
  194. blast_icache32();
  195. local_irq_restore(flags);
  196. }
  197. static inline void tx49_blast_icache32(void)
  198. {
  199. unsigned long start = INDEX_BASE;
  200. unsigned long end = start + current_cpu_data.icache.waysize;
  201. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  202. unsigned long ws_end = current_cpu_data.icache.ways <<
  203. current_cpu_data.icache.waybit;
  204. unsigned long ws, addr;
  205. CACHE32_UNROLL32_ALIGN2;
  206. /* I'm in even chunk. blast odd chunks */
  207. for (ws = 0; ws < ws_end; ws += ws_inc)
  208. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  209. cache32_unroll32(addr|ws, Index_Invalidate_I);
  210. CACHE32_UNROLL32_ALIGN;
  211. /* I'm in odd chunk. blast even chunks */
  212. for (ws = 0; ws < ws_end; ws += ws_inc)
  213. for (addr = start; addr < end; addr += 0x400 * 2)
  214. cache32_unroll32(addr|ws, Index_Invalidate_I);
  215. }
  216. static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
  217. {
  218. unsigned long flags;
  219. local_irq_save(flags);
  220. blast_icache32_page_indexed(page);
  221. local_irq_restore(flags);
  222. }
  223. static inline void tx49_blast_icache32_page_indexed(unsigned long page)
  224. {
  225. unsigned long indexmask = current_cpu_data.icache.waysize - 1;
  226. unsigned long start = INDEX_BASE + (page & indexmask);
  227. unsigned long end = start + PAGE_SIZE;
  228. unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  229. unsigned long ws_end = current_cpu_data.icache.ways <<
  230. current_cpu_data.icache.waybit;
  231. unsigned long ws, addr;
  232. CACHE32_UNROLL32_ALIGN2;
  233. /* I'm in even chunk. blast odd chunks */
  234. for (ws = 0; ws < ws_end; ws += ws_inc)
  235. for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
  236. cache32_unroll32(addr|ws, Index_Invalidate_I);
  237. CACHE32_UNROLL32_ALIGN;
  238. /* I'm in odd chunk. blast even chunks */
  239. for (ws = 0; ws < ws_end; ws += ws_inc)
  240. for (addr = start; addr < end; addr += 0x400 * 2)
  241. cache32_unroll32(addr|ws, Index_Invalidate_I);
  242. }
  243. static void (* r4k_blast_icache_page)(unsigned long addr);
  244. static void r4k_blast_icache_page_setup(void)
  245. {
  246. unsigned long ic_lsize = cpu_icache_line_size();
  247. if (ic_lsize == 0)
  248. r4k_blast_icache_page = (void *)cache_noop;
  249. else if (ic_lsize == 16)
  250. r4k_blast_icache_page = blast_icache16_page;
  251. else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
  252. r4k_blast_icache_page = loongson2_blast_icache32_page;
  253. else if (ic_lsize == 32)
  254. r4k_blast_icache_page = blast_icache32_page;
  255. else if (ic_lsize == 64)
  256. r4k_blast_icache_page = blast_icache64_page;
  257. else if (ic_lsize == 128)
  258. r4k_blast_icache_page = blast_icache128_page;
  259. }
  260. #ifndef CONFIG_EVA
  261. #define r4k_blast_icache_user_page r4k_blast_icache_page
  262. #else
  263. static void (*r4k_blast_icache_user_page)(unsigned long addr);
  264. static void r4k_blast_icache_user_page_setup(void)
  265. {
  266. unsigned long ic_lsize = cpu_icache_line_size();
  267. if (ic_lsize == 0)
  268. r4k_blast_icache_user_page = (void *)cache_noop;
  269. else if (ic_lsize == 16)
  270. r4k_blast_icache_user_page = blast_icache16_user_page;
  271. else if (ic_lsize == 32)
  272. r4k_blast_icache_user_page = blast_icache32_user_page;
  273. else if (ic_lsize == 64)
  274. r4k_blast_icache_user_page = blast_icache64_user_page;
  275. }
  276. #endif
  277. static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
  278. static void r4k_blast_icache_page_indexed_setup(void)
  279. {
  280. unsigned long ic_lsize = cpu_icache_line_size();
  281. if (ic_lsize == 0)
  282. r4k_blast_icache_page_indexed = (void *)cache_noop;
  283. else if (ic_lsize == 16)
  284. r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
  285. else if (ic_lsize == 32) {
  286. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  287. r4k_blast_icache_page_indexed =
  288. blast_icache32_r4600_v1_page_indexed;
  289. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  290. r4k_blast_icache_page_indexed =
  291. tx49_blast_icache32_page_indexed;
  292. else if (current_cpu_type() == CPU_LOONGSON2)
  293. r4k_blast_icache_page_indexed =
  294. loongson2_blast_icache32_page_indexed;
  295. else
  296. r4k_blast_icache_page_indexed =
  297. blast_icache32_page_indexed;
  298. } else if (ic_lsize == 64)
  299. r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
  300. }
  301. void (* r4k_blast_icache)(void);
  302. EXPORT_SYMBOL(r4k_blast_icache);
  303. static void r4k_blast_icache_setup(void)
  304. {
  305. unsigned long ic_lsize = cpu_icache_line_size();
  306. if (ic_lsize == 0)
  307. r4k_blast_icache = (void *)cache_noop;
  308. else if (ic_lsize == 16)
  309. r4k_blast_icache = blast_icache16;
  310. else if (ic_lsize == 32) {
  311. if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
  312. r4k_blast_icache = blast_r4600_v1_icache32;
  313. else if (TX49XX_ICACHE_INDEX_INV_WAR)
  314. r4k_blast_icache = tx49_blast_icache32;
  315. else if (current_cpu_type() == CPU_LOONGSON2)
  316. r4k_blast_icache = loongson2_blast_icache32;
  317. else
  318. r4k_blast_icache = blast_icache32;
  319. } else if (ic_lsize == 64)
  320. r4k_blast_icache = blast_icache64;
  321. else if (ic_lsize == 128)
  322. r4k_blast_icache = blast_icache128;
  323. }
  324. static void (* r4k_blast_scache_page)(unsigned long addr);
  325. static void r4k_blast_scache_page_setup(void)
  326. {
  327. unsigned long sc_lsize = cpu_scache_line_size();
  328. if (scache_size == 0)
  329. r4k_blast_scache_page = (void *)cache_noop;
  330. else if (sc_lsize == 16)
  331. r4k_blast_scache_page = blast_scache16_page;
  332. else if (sc_lsize == 32)
  333. r4k_blast_scache_page = blast_scache32_page;
  334. else if (sc_lsize == 64)
  335. r4k_blast_scache_page = blast_scache64_page;
  336. else if (sc_lsize == 128)
  337. r4k_blast_scache_page = blast_scache128_page;
  338. }
  339. static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
  340. static void r4k_blast_scache_page_indexed_setup(void)
  341. {
  342. unsigned long sc_lsize = cpu_scache_line_size();
  343. if (scache_size == 0)
  344. r4k_blast_scache_page_indexed = (void *)cache_noop;
  345. else if (sc_lsize == 16)
  346. r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
  347. else if (sc_lsize == 32)
  348. r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
  349. else if (sc_lsize == 64)
  350. r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
  351. else if (sc_lsize == 128)
  352. r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
  353. }
  354. static void (* r4k_blast_scache)(void);
  355. static void r4k_blast_scache_setup(void)
  356. {
  357. unsigned long sc_lsize = cpu_scache_line_size();
  358. if (scache_size == 0)
  359. r4k_blast_scache = (void *)cache_noop;
  360. else if (sc_lsize == 16)
  361. r4k_blast_scache = blast_scache16;
  362. else if (sc_lsize == 32)
  363. r4k_blast_scache = blast_scache32;
  364. else if (sc_lsize == 64)
  365. r4k_blast_scache = blast_scache64;
  366. else if (sc_lsize == 128)
  367. r4k_blast_scache = blast_scache128;
  368. }
  369. static inline void local_r4k___flush_cache_all(void * args)
  370. {
  371. switch (current_cpu_type()) {
  372. case CPU_LOONGSON2:
  373. case CPU_LOONGSON3:
  374. case CPU_R4000SC:
  375. case CPU_R4000MC:
  376. case CPU_R4400SC:
  377. case CPU_R4400MC:
  378. case CPU_R10000:
  379. case CPU_R12000:
  380. case CPU_R14000:
  381. case CPU_R16000:
  382. /*
  383. * These caches are inclusive caches, that is, if something
  384. * is not cached in the S-cache, we know it also won't be
  385. * in one of the primary caches.
  386. */
  387. r4k_blast_scache();
  388. break;
  389. default:
  390. r4k_blast_dcache();
  391. r4k_blast_icache();
  392. break;
  393. }
  394. }
  395. static void r4k___flush_cache_all(void)
  396. {
  397. r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
  398. }
  399. static inline int has_valid_asid(const struct mm_struct *mm)
  400. {
  401. #ifdef CONFIG_MIPS_MT_SMP
  402. int i;
  403. for_each_online_cpu(i)
  404. if (cpu_context(i, mm))
  405. return 1;
  406. return 0;
  407. #else
  408. return cpu_context(smp_processor_id(), mm);
  409. #endif
  410. }
  411. static void r4k__flush_cache_vmap(void)
  412. {
  413. r4k_blast_dcache();
  414. }
  415. static void r4k__flush_cache_vunmap(void)
  416. {
  417. r4k_blast_dcache();
  418. }
  419. static inline void local_r4k_flush_cache_range(void * args)
  420. {
  421. struct vm_area_struct *vma = args;
  422. int exec = vma->vm_flags & VM_EXEC;
  423. if (!(has_valid_asid(vma->vm_mm)))
  424. return;
  425. r4k_blast_dcache();
  426. if (exec)
  427. r4k_blast_icache();
  428. }
  429. static void r4k_flush_cache_range(struct vm_area_struct *vma,
  430. unsigned long start, unsigned long end)
  431. {
  432. int exec = vma->vm_flags & VM_EXEC;
  433. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
  434. r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
  435. }
  436. static inline void local_r4k_flush_cache_mm(void * args)
  437. {
  438. struct mm_struct *mm = args;
  439. if (!has_valid_asid(mm))
  440. return;
  441. /*
  442. * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
  443. * only flush the primary caches but R1x000 behave sane ...
  444. * R4000SC and R4400SC indexed S-cache ops also invalidate primary
  445. * caches, so we can bail out early.
  446. */
  447. if (current_cpu_type() == CPU_R4000SC ||
  448. current_cpu_type() == CPU_R4000MC ||
  449. current_cpu_type() == CPU_R4400SC ||
  450. current_cpu_type() == CPU_R4400MC) {
  451. r4k_blast_scache();
  452. return;
  453. }
  454. r4k_blast_dcache();
  455. }
  456. static void r4k_flush_cache_mm(struct mm_struct *mm)
  457. {
  458. if (!cpu_has_dc_aliases)
  459. return;
  460. r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
  461. }
  462. struct flush_cache_page_args {
  463. struct vm_area_struct *vma;
  464. unsigned long addr;
  465. unsigned long pfn;
  466. };
  467. static inline void local_r4k_flush_cache_page(void *args)
  468. {
  469. struct flush_cache_page_args *fcp_args = args;
  470. struct vm_area_struct *vma = fcp_args->vma;
  471. unsigned long addr = fcp_args->addr;
  472. struct page *page = pfn_to_page(fcp_args->pfn);
  473. int exec = vma->vm_flags & VM_EXEC;
  474. struct mm_struct *mm = vma->vm_mm;
  475. int map_coherent = 0;
  476. pgd_t *pgdp;
  477. pud_t *pudp;
  478. pmd_t *pmdp;
  479. pte_t *ptep;
  480. void *vaddr;
  481. /*
  482. * If ownes no valid ASID yet, cannot possibly have gotten
  483. * this page into the cache.
  484. */
  485. if (!has_valid_asid(mm))
  486. return;
  487. addr &= PAGE_MASK;
  488. pgdp = pgd_offset(mm, addr);
  489. pudp = pud_offset(pgdp, addr);
  490. pmdp = pmd_offset(pudp, addr);
  491. ptep = pte_offset(pmdp, addr);
  492. /*
  493. * If the page isn't marked valid, the page cannot possibly be
  494. * in the cache.
  495. */
  496. if (!(pte_present(*ptep)))
  497. return;
  498. if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
  499. vaddr = NULL;
  500. else {
  501. /*
  502. * Use kmap_coherent or kmap_atomic to do flushes for
  503. * another ASID than the current one.
  504. */
  505. map_coherent = (cpu_has_dc_aliases &&
  506. page_mapcount(page) &&
  507. !Page_dcache_dirty(page));
  508. if (map_coherent)
  509. vaddr = kmap_coherent(page, addr);
  510. else
  511. vaddr = kmap_atomic(page);
  512. addr = (unsigned long)vaddr;
  513. }
  514. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
  515. vaddr ? r4k_blast_dcache_page(addr) :
  516. r4k_blast_dcache_user_page(addr);
  517. if (exec && !cpu_icache_snoops_remote_store)
  518. r4k_blast_scache_page(addr);
  519. }
  520. if (exec) {
  521. if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
  522. int cpu = smp_processor_id();
  523. if (cpu_context(cpu, mm) != 0)
  524. drop_mmu_context(mm, cpu);
  525. } else
  526. vaddr ? r4k_blast_icache_page(addr) :
  527. r4k_blast_icache_user_page(addr);
  528. }
  529. if (vaddr) {
  530. if (map_coherent)
  531. kunmap_coherent();
  532. else
  533. kunmap_atomic(vaddr);
  534. }
  535. }
  536. static void r4k_flush_cache_page(struct vm_area_struct *vma,
  537. unsigned long addr, unsigned long pfn)
  538. {
  539. struct flush_cache_page_args args;
  540. args.vma = vma;
  541. args.addr = addr;
  542. args.pfn = pfn;
  543. r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
  544. }
  545. static inline void local_r4k_flush_data_cache_page(void * addr)
  546. {
  547. r4k_blast_dcache_page((unsigned long) addr);
  548. }
  549. static void r4k_flush_data_cache_page(unsigned long addr)
  550. {
  551. if (in_atomic())
  552. local_r4k_flush_data_cache_page((void *)addr);
  553. else
  554. r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
  555. }
  556. struct flush_icache_range_args {
  557. unsigned long start;
  558. unsigned long end;
  559. };
  560. static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
  561. {
  562. if (!cpu_has_ic_fills_f_dc) {
  563. if (end - start >= dcache_size) {
  564. r4k_blast_dcache();
  565. } else {
  566. R4600_HIT_CACHEOP_WAR_IMPL;
  567. protected_blast_dcache_range(start, end);
  568. }
  569. }
  570. if (end - start > icache_size)
  571. r4k_blast_icache();
  572. else {
  573. switch (boot_cpu_type()) {
  574. case CPU_LOONGSON2:
  575. protected_loongson2_blast_icache_range(start, end);
  576. break;
  577. default:
  578. protected_blast_icache_range(start, end);
  579. break;
  580. }
  581. }
  582. #ifdef CONFIG_EVA
  583. /*
  584. * Due to all possible segment mappings, there might cache aliases
  585. * caused by the bootloader being in non-EVA mode, and the CPU switching
  586. * to EVA during early kernel init. It's best to flush the scache
  587. * to avoid having secondary cores fetching stale data and lead to
  588. * kernel crashes.
  589. */
  590. bc_wback_inv(start, (end - start));
  591. __sync();
  592. #endif
  593. }
  594. static inline void local_r4k_flush_icache_range_ipi(void *args)
  595. {
  596. struct flush_icache_range_args *fir_args = args;
  597. unsigned long start = fir_args->start;
  598. unsigned long end = fir_args->end;
  599. local_r4k_flush_icache_range(start, end);
  600. }
  601. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  602. {
  603. struct flush_icache_range_args args;
  604. args.start = start;
  605. args.end = end;
  606. r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
  607. instruction_hazard();
  608. }
  609. #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
  610. static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
  611. {
  612. /* Catch bad driver code */
  613. BUG_ON(size == 0);
  614. preempt_disable();
  615. if (cpu_has_inclusive_pcaches) {
  616. if (size >= scache_size)
  617. r4k_blast_scache();
  618. else
  619. blast_scache_range(addr, addr + size);
  620. preempt_enable();
  621. __sync();
  622. return;
  623. }
  624. /*
  625. * Either no secondary cache or the available caches don't have the
  626. * subset property so we have to flush the primary caches
  627. * explicitly
  628. */
  629. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  630. r4k_blast_dcache();
  631. } else {
  632. R4600_HIT_CACHEOP_WAR_IMPL;
  633. blast_dcache_range(addr, addr + size);
  634. }
  635. preempt_enable();
  636. bc_wback_inv(addr, size);
  637. __sync();
  638. }
  639. static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
  640. {
  641. /* Catch bad driver code */
  642. BUG_ON(size == 0);
  643. preempt_disable();
  644. if (cpu_has_inclusive_pcaches) {
  645. if (size >= scache_size)
  646. r4k_blast_scache();
  647. else {
  648. /*
  649. * There is no clearly documented alignment requirement
  650. * for the cache instruction on MIPS processors and
  651. * some processors, among them the RM5200 and RM7000
  652. * QED processors will throw an address error for cache
  653. * hit ops with insufficient alignment. Solved by
  654. * aligning the address to cache line size.
  655. */
  656. blast_inv_scache_range(addr, addr + size);
  657. }
  658. preempt_enable();
  659. __sync();
  660. return;
  661. }
  662. if (cpu_has_safe_index_cacheops && size >= dcache_size) {
  663. r4k_blast_dcache();
  664. } else {
  665. R4600_HIT_CACHEOP_WAR_IMPL;
  666. blast_inv_dcache_range(addr, addr + size);
  667. }
  668. preempt_enable();
  669. bc_inv(addr, size);
  670. __sync();
  671. }
  672. #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
  673. /*
  674. * While we're protected against bad userland addresses we don't care
  675. * very much about what happens in that case. Usually a segmentation
  676. * fault will dump the process later on anyway ...
  677. */
  678. static void local_r4k_flush_cache_sigtramp(void * arg)
  679. {
  680. unsigned long ic_lsize = cpu_icache_line_size();
  681. unsigned long dc_lsize = cpu_dcache_line_size();
  682. unsigned long sc_lsize = cpu_scache_line_size();
  683. unsigned long addr = (unsigned long) arg;
  684. R4600_HIT_CACHEOP_WAR_IMPL;
  685. if (dc_lsize)
  686. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  687. if (!cpu_icache_snoops_remote_store && scache_size)
  688. protected_writeback_scache_line(addr & ~(sc_lsize - 1));
  689. if (ic_lsize)
  690. protected_flush_icache_line(addr & ~(ic_lsize - 1));
  691. if (MIPS4K_ICACHE_REFILL_WAR) {
  692. __asm__ __volatile__ (
  693. ".set push\n\t"
  694. ".set noat\n\t"
  695. ".set "MIPS_ISA_LEVEL"\n\t"
  696. #ifdef CONFIG_32BIT
  697. "la $at,1f\n\t"
  698. #endif
  699. #ifdef CONFIG_64BIT
  700. "dla $at,1f\n\t"
  701. #endif
  702. "cache %0,($at)\n\t"
  703. "nop; nop; nop\n"
  704. "1:\n\t"
  705. ".set pop"
  706. :
  707. : "i" (Hit_Invalidate_I));
  708. }
  709. if (MIPS_CACHE_SYNC_WAR)
  710. __asm__ __volatile__ ("sync");
  711. }
  712. static void r4k_flush_cache_sigtramp(unsigned long addr)
  713. {
  714. r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
  715. }
  716. static void r4k_flush_icache_all(void)
  717. {
  718. if (cpu_has_vtag_icache)
  719. r4k_blast_icache();
  720. }
  721. struct flush_kernel_vmap_range_args {
  722. unsigned long vaddr;
  723. int size;
  724. };
  725. static inline void local_r4k_flush_kernel_vmap_range(void *args)
  726. {
  727. struct flush_kernel_vmap_range_args *vmra = args;
  728. unsigned long vaddr = vmra->vaddr;
  729. int size = vmra->size;
  730. /*
  731. * Aliases only affect the primary caches so don't bother with
  732. * S-caches or T-caches.
  733. */
  734. if (cpu_has_safe_index_cacheops && size >= dcache_size)
  735. r4k_blast_dcache();
  736. else {
  737. R4600_HIT_CACHEOP_WAR_IMPL;
  738. blast_dcache_range(vaddr, vaddr + size);
  739. }
  740. }
  741. static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
  742. {
  743. struct flush_kernel_vmap_range_args args;
  744. args.vaddr = (unsigned long) vaddr;
  745. args.size = size;
  746. r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
  747. }
  748. static inline void rm7k_erratum31(void)
  749. {
  750. const unsigned long ic_lsize = 32;
  751. unsigned long addr;
  752. /* RM7000 erratum #31. The icache is screwed at startup. */
  753. write_c0_taglo(0);
  754. write_c0_taghi(0);
  755. for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
  756. __asm__ __volatile__ (
  757. ".set push\n\t"
  758. ".set noreorder\n\t"
  759. ".set mips3\n\t"
  760. "cache\t%1, 0(%0)\n\t"
  761. "cache\t%1, 0x1000(%0)\n\t"
  762. "cache\t%1, 0x2000(%0)\n\t"
  763. "cache\t%1, 0x3000(%0)\n\t"
  764. "cache\t%2, 0(%0)\n\t"
  765. "cache\t%2, 0x1000(%0)\n\t"
  766. "cache\t%2, 0x2000(%0)\n\t"
  767. "cache\t%2, 0x3000(%0)\n\t"
  768. "cache\t%1, 0(%0)\n\t"
  769. "cache\t%1, 0x1000(%0)\n\t"
  770. "cache\t%1, 0x2000(%0)\n\t"
  771. "cache\t%1, 0x3000(%0)\n\t"
  772. ".set pop\n"
  773. :
  774. : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
  775. }
  776. }
  777. static inline int alias_74k_erratum(struct cpuinfo_mips *c)
  778. {
  779. unsigned int imp = c->processor_id & PRID_IMP_MASK;
  780. unsigned int rev = c->processor_id & PRID_REV_MASK;
  781. int present = 0;
  782. /*
  783. * Early versions of the 74K do not update the cache tags on a
  784. * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
  785. * aliases. In this case it is better to treat the cache as always
  786. * having aliases. Also disable the synonym tag update feature
  787. * where available. In this case no opportunistic tag update will
  788. * happen where a load causes a virtual address miss but a physical
  789. * address hit during a D-cache look-up.
  790. */
  791. switch (imp) {
  792. case PRID_IMP_74K:
  793. if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
  794. present = 1;
  795. if (rev == PRID_REV_ENCODE_332(2, 4, 0))
  796. write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
  797. break;
  798. case PRID_IMP_1074K:
  799. if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
  800. present = 1;
  801. write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
  802. }
  803. break;
  804. default:
  805. BUG();
  806. }
  807. return present;
  808. }
  809. static void b5k_instruction_hazard(void)
  810. {
  811. __sync();
  812. __sync();
  813. __asm__ __volatile__(
  814. " nop; nop; nop; nop; nop; nop; nop; nop\n"
  815. " nop; nop; nop; nop; nop; nop; nop; nop\n"
  816. " nop; nop; nop; nop; nop; nop; nop; nop\n"
  817. " nop; nop; nop; nop; nop; nop; nop; nop\n"
  818. : : : "memory");
  819. }
  820. static char *way_string[] = { NULL, "direct mapped", "2-way",
  821. "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
  822. "9-way", "10-way", "11-way", "12-way",
  823. "13-way", "14-way", "15-way", "16-way",
  824. };
  825. static void probe_pcache(void)
  826. {
  827. struct cpuinfo_mips *c = &current_cpu_data;
  828. unsigned int config = read_c0_config();
  829. unsigned int prid = read_c0_prid();
  830. int has_74k_erratum = 0;
  831. unsigned long config1;
  832. unsigned int lsize;
  833. switch (current_cpu_type()) {
  834. case CPU_R4600: /* QED style two way caches? */
  835. case CPU_R4700:
  836. case CPU_R5000:
  837. case CPU_NEVADA:
  838. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  839. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  840. c->icache.ways = 2;
  841. c->icache.waybit = __ffs(icache_size/2);
  842. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  843. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  844. c->dcache.ways = 2;
  845. c->dcache.waybit= __ffs(dcache_size/2);
  846. c->options |= MIPS_CPU_CACHE_CDEX_P;
  847. break;
  848. case CPU_R5432:
  849. case CPU_R5500:
  850. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  851. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  852. c->icache.ways = 2;
  853. c->icache.waybit= 0;
  854. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  855. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  856. c->dcache.ways = 2;
  857. c->dcache.waybit = 0;
  858. c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
  859. break;
  860. case CPU_TX49XX:
  861. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  862. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  863. c->icache.ways = 4;
  864. c->icache.waybit= 0;
  865. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  866. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  867. c->dcache.ways = 4;
  868. c->dcache.waybit = 0;
  869. c->options |= MIPS_CPU_CACHE_CDEX_P;
  870. c->options |= MIPS_CPU_PREFETCH;
  871. break;
  872. case CPU_R4000PC:
  873. case CPU_R4000SC:
  874. case CPU_R4000MC:
  875. case CPU_R4400PC:
  876. case CPU_R4400SC:
  877. case CPU_R4400MC:
  878. case CPU_R4300:
  879. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  880. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  881. c->icache.ways = 1;
  882. c->icache.waybit = 0; /* doesn't matter */
  883. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  884. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  885. c->dcache.ways = 1;
  886. c->dcache.waybit = 0; /* does not matter */
  887. c->options |= MIPS_CPU_CACHE_CDEX_P;
  888. break;
  889. case CPU_R10000:
  890. case CPU_R12000:
  891. case CPU_R14000:
  892. case CPU_R16000:
  893. icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
  894. c->icache.linesz = 64;
  895. c->icache.ways = 2;
  896. c->icache.waybit = 0;
  897. dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
  898. c->dcache.linesz = 32;
  899. c->dcache.ways = 2;
  900. c->dcache.waybit = 0;
  901. c->options |= MIPS_CPU_PREFETCH;
  902. break;
  903. case CPU_VR4133:
  904. write_c0_config(config & ~VR41_CONF_P4K);
  905. case CPU_VR4131:
  906. /* Workaround for cache instruction bug of VR4131 */
  907. if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
  908. c->processor_id == 0x0c82U) {
  909. config |= 0x00400000U;
  910. if (c->processor_id == 0x0c80U)
  911. config |= VR41_CONF_BP;
  912. write_c0_config(config);
  913. } else
  914. c->options |= MIPS_CPU_CACHE_CDEX_P;
  915. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  916. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  917. c->icache.ways = 2;
  918. c->icache.waybit = __ffs(icache_size/2);
  919. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  920. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  921. c->dcache.ways = 2;
  922. c->dcache.waybit = __ffs(dcache_size/2);
  923. break;
  924. case CPU_VR41XX:
  925. case CPU_VR4111:
  926. case CPU_VR4121:
  927. case CPU_VR4122:
  928. case CPU_VR4181:
  929. case CPU_VR4181A:
  930. icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
  931. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  932. c->icache.ways = 1;
  933. c->icache.waybit = 0; /* doesn't matter */
  934. dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
  935. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  936. c->dcache.ways = 1;
  937. c->dcache.waybit = 0; /* does not matter */
  938. c->options |= MIPS_CPU_CACHE_CDEX_P;
  939. break;
  940. case CPU_RM7000:
  941. rm7k_erratum31();
  942. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  943. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  944. c->icache.ways = 4;
  945. c->icache.waybit = __ffs(icache_size / c->icache.ways);
  946. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  947. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  948. c->dcache.ways = 4;
  949. c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
  950. c->options |= MIPS_CPU_CACHE_CDEX_P;
  951. c->options |= MIPS_CPU_PREFETCH;
  952. break;
  953. case CPU_LOONGSON2:
  954. icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
  955. c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
  956. if (prid & 0x3)
  957. c->icache.ways = 4;
  958. else
  959. c->icache.ways = 2;
  960. c->icache.waybit = 0;
  961. dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
  962. c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
  963. if (prid & 0x3)
  964. c->dcache.ways = 4;
  965. else
  966. c->dcache.ways = 2;
  967. c->dcache.waybit = 0;
  968. break;
  969. case CPU_LOONGSON3:
  970. config1 = read_c0_config1();
  971. lsize = (config1 >> 19) & 7;
  972. if (lsize)
  973. c->icache.linesz = 2 << lsize;
  974. else
  975. c->icache.linesz = 0;
  976. c->icache.sets = 64 << ((config1 >> 22) & 7);
  977. c->icache.ways = 1 + ((config1 >> 16) & 7);
  978. icache_size = c->icache.sets *
  979. c->icache.ways *
  980. c->icache.linesz;
  981. c->icache.waybit = 0;
  982. lsize = (config1 >> 10) & 7;
  983. if (lsize)
  984. c->dcache.linesz = 2 << lsize;
  985. else
  986. c->dcache.linesz = 0;
  987. c->dcache.sets = 64 << ((config1 >> 13) & 7);
  988. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  989. dcache_size = c->dcache.sets *
  990. c->dcache.ways *
  991. c->dcache.linesz;
  992. c->dcache.waybit = 0;
  993. break;
  994. case CPU_CAVIUM_OCTEON3:
  995. /* For now lie about the number of ways. */
  996. c->icache.linesz = 128;
  997. c->icache.sets = 16;
  998. c->icache.ways = 8;
  999. c->icache.flags |= MIPS_CACHE_VTAG;
  1000. icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
  1001. c->dcache.linesz = 128;
  1002. c->dcache.ways = 8;
  1003. c->dcache.sets = 8;
  1004. dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
  1005. c->options |= MIPS_CPU_PREFETCH;
  1006. break;
  1007. default:
  1008. if (!(config & MIPS_CONF_M))
  1009. panic("Don't know how to probe P-caches on this cpu.");
  1010. /*
  1011. * So we seem to be a MIPS32 or MIPS64 CPU
  1012. * So let's probe the I-cache ...
  1013. */
  1014. config1 = read_c0_config1();
  1015. lsize = (config1 >> 19) & 7;
  1016. /* IL == 7 is reserved */
  1017. if (lsize == 7)
  1018. panic("Invalid icache line size");
  1019. c->icache.linesz = lsize ? 2 << lsize : 0;
  1020. c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
  1021. c->icache.ways = 1 + ((config1 >> 16) & 7);
  1022. icache_size = c->icache.sets *
  1023. c->icache.ways *
  1024. c->icache.linesz;
  1025. c->icache.waybit = __ffs(icache_size/c->icache.ways);
  1026. if (config & 0x8) /* VI bit */
  1027. c->icache.flags |= MIPS_CACHE_VTAG;
  1028. /*
  1029. * Now probe the MIPS32 / MIPS64 data cache.
  1030. */
  1031. c->dcache.flags = 0;
  1032. lsize = (config1 >> 10) & 7;
  1033. /* DL == 7 is reserved */
  1034. if (lsize == 7)
  1035. panic("Invalid dcache line size");
  1036. c->dcache.linesz = lsize ? 2 << lsize : 0;
  1037. c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
  1038. c->dcache.ways = 1 + ((config1 >> 7) & 7);
  1039. dcache_size = c->dcache.sets *
  1040. c->dcache.ways *
  1041. c->dcache.linesz;
  1042. c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
  1043. c->options |= MIPS_CPU_PREFETCH;
  1044. break;
  1045. }
  1046. /*
  1047. * Processor configuration sanity check for the R4000SC erratum
  1048. * #5. With page sizes larger than 32kB there is no possibility
  1049. * to get a VCE exception anymore so we don't care about this
  1050. * misconfiguration. The case is rather theoretical anyway;
  1051. * presumably no vendor is shipping his hardware in the "bad"
  1052. * configuration.
  1053. */
  1054. if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
  1055. (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
  1056. !(config & CONF_SC) && c->icache.linesz != 16 &&
  1057. PAGE_SIZE <= 0x8000)
  1058. panic("Improper R4000SC processor configuration detected");
  1059. /* compute a couple of other cache variables */
  1060. c->icache.waysize = icache_size / c->icache.ways;
  1061. c->dcache.waysize = dcache_size / c->dcache.ways;
  1062. c->icache.sets = c->icache.linesz ?
  1063. icache_size / (c->icache.linesz * c->icache.ways) : 0;
  1064. c->dcache.sets = c->dcache.linesz ?
  1065. dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
  1066. /*
  1067. * R1x000 P-caches are odd in a positive way. They're 32kB 2-way
  1068. * virtually indexed so normally would suffer from aliases. So
  1069. * normally they'd suffer from aliases but magic in the hardware deals
  1070. * with that for us so we don't need to take care ourselves.
  1071. */
  1072. switch (current_cpu_type()) {
  1073. case CPU_20KC:
  1074. case CPU_25KF:
  1075. case CPU_SB1:
  1076. case CPU_SB1A:
  1077. case CPU_XLR:
  1078. c->dcache.flags |= MIPS_CACHE_PINDEX;
  1079. break;
  1080. case CPU_R10000:
  1081. case CPU_R12000:
  1082. case CPU_R14000:
  1083. case CPU_R16000:
  1084. break;
  1085. case CPU_74K:
  1086. case CPU_1074K:
  1087. has_74k_erratum = alias_74k_erratum(c);
  1088. /* Fall through. */
  1089. case CPU_M14KC:
  1090. case CPU_M14KEC:
  1091. case CPU_24K:
  1092. case CPU_34K:
  1093. case CPU_1004K:
  1094. case CPU_INTERAPTIV:
  1095. case CPU_P5600:
  1096. case CPU_PROAPTIV:
  1097. case CPU_M5150:
  1098. case CPU_QEMU_GENERIC:
  1099. case CPU_I6400:
  1100. if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
  1101. (c->icache.waysize > PAGE_SIZE))
  1102. c->icache.flags |= MIPS_CACHE_ALIASES;
  1103. if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
  1104. /*
  1105. * Effectively physically indexed dcache,
  1106. * thus no virtual aliases.
  1107. */
  1108. c->dcache.flags |= MIPS_CACHE_PINDEX;
  1109. break;
  1110. }
  1111. default:
  1112. if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
  1113. c->dcache.flags |= MIPS_CACHE_ALIASES;
  1114. }
  1115. switch (current_cpu_type()) {
  1116. case CPU_20KC:
  1117. /*
  1118. * Some older 20Kc chips doesn't have the 'VI' bit in
  1119. * the config register.
  1120. */
  1121. c->icache.flags |= MIPS_CACHE_VTAG;
  1122. break;
  1123. case CPU_ALCHEMY:
  1124. c->icache.flags |= MIPS_CACHE_IC_F_DC;
  1125. break;
  1126. case CPU_LOONGSON2:
  1127. /*
  1128. * LOONGSON2 has 4 way icache, but when using indexed cache op,
  1129. * one op will act on all 4 ways
  1130. */
  1131. c->icache.ways = 1;
  1132. }
  1133. printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
  1134. icache_size >> 10,
  1135. c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
  1136. way_string[c->icache.ways], c->icache.linesz);
  1137. printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
  1138. dcache_size >> 10, way_string[c->dcache.ways],
  1139. (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
  1140. (c->dcache.flags & MIPS_CACHE_ALIASES) ?
  1141. "cache aliases" : "no aliases",
  1142. c->dcache.linesz);
  1143. }
  1144. /*
  1145. * If you even _breathe_ on this function, look at the gcc output and make sure
  1146. * it does not pop things on and off the stack for the cache sizing loop that
  1147. * executes in KSEG1 space or else you will crash and burn badly. You have
  1148. * been warned.
  1149. */
  1150. static int probe_scache(void)
  1151. {
  1152. unsigned long flags, addr, begin, end, pow2;
  1153. unsigned int config = read_c0_config();
  1154. struct cpuinfo_mips *c = &current_cpu_data;
  1155. if (config & CONF_SC)
  1156. return 0;
  1157. begin = (unsigned long) &_stext;
  1158. begin &= ~((4 * 1024 * 1024) - 1);
  1159. end = begin + (4 * 1024 * 1024);
  1160. /*
  1161. * This is such a bitch, you'd think they would make it easy to do
  1162. * this. Away you daemons of stupidity!
  1163. */
  1164. local_irq_save(flags);
  1165. /* Fill each size-multiple cache line with a valid tag. */
  1166. pow2 = (64 * 1024);
  1167. for (addr = begin; addr < end; addr = (begin + pow2)) {
  1168. unsigned long *p = (unsigned long *) addr;
  1169. __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
  1170. pow2 <<= 1;
  1171. }
  1172. /* Load first line with zero (therefore invalid) tag. */
  1173. write_c0_taglo(0);
  1174. write_c0_taghi(0);
  1175. __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
  1176. cache_op(Index_Store_Tag_I, begin);
  1177. cache_op(Index_Store_Tag_D, begin);
  1178. cache_op(Index_Store_Tag_SD, begin);
  1179. /* Now search for the wrap around point. */
  1180. pow2 = (128 * 1024);
  1181. for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
  1182. cache_op(Index_Load_Tag_SD, addr);
  1183. __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
  1184. if (!read_c0_taglo())
  1185. break;
  1186. pow2 <<= 1;
  1187. }
  1188. local_irq_restore(flags);
  1189. addr -= begin;
  1190. scache_size = addr;
  1191. c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
  1192. c->scache.ways = 1;
  1193. c->scache.waybit = 0; /* does not matter */
  1194. return 1;
  1195. }
  1196. static void __init loongson2_sc_init(void)
  1197. {
  1198. struct cpuinfo_mips *c = &current_cpu_data;
  1199. scache_size = 512*1024;
  1200. c->scache.linesz = 32;
  1201. c->scache.ways = 4;
  1202. c->scache.waybit = 0;
  1203. c->scache.waysize = scache_size / (c->scache.ways);
  1204. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1205. pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1206. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1207. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1208. }
  1209. static void __init loongson3_sc_init(void)
  1210. {
  1211. struct cpuinfo_mips *c = &current_cpu_data;
  1212. unsigned int config2, lsize;
  1213. config2 = read_c0_config2();
  1214. lsize = (config2 >> 4) & 15;
  1215. if (lsize)
  1216. c->scache.linesz = 2 << lsize;
  1217. else
  1218. c->scache.linesz = 0;
  1219. c->scache.sets = 64 << ((config2 >> 8) & 15);
  1220. c->scache.ways = 1 + (config2 & 15);
  1221. scache_size = c->scache.sets *
  1222. c->scache.ways *
  1223. c->scache.linesz;
  1224. /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
  1225. scache_size *= 4;
  1226. c->scache.waybit = 0;
  1227. pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1228. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1229. if (scache_size)
  1230. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1231. return;
  1232. }
  1233. extern int r5k_sc_init(void);
  1234. extern int rm7k_sc_init(void);
  1235. extern int mips_sc_init(void);
  1236. static void setup_scache(void)
  1237. {
  1238. struct cpuinfo_mips *c = &current_cpu_data;
  1239. unsigned int config = read_c0_config();
  1240. int sc_present = 0;
  1241. /*
  1242. * Do the probing thing on R4000SC and R4400SC processors. Other
  1243. * processors don't have a S-cache that would be relevant to the
  1244. * Linux memory management.
  1245. */
  1246. switch (current_cpu_type()) {
  1247. case CPU_R4000SC:
  1248. case CPU_R4000MC:
  1249. case CPU_R4400SC:
  1250. case CPU_R4400MC:
  1251. sc_present = run_uncached(probe_scache);
  1252. if (sc_present)
  1253. c->options |= MIPS_CPU_CACHE_CDEX_S;
  1254. break;
  1255. case CPU_R10000:
  1256. case CPU_R12000:
  1257. case CPU_R14000:
  1258. case CPU_R16000:
  1259. scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
  1260. c->scache.linesz = 64 << ((config >> 13) & 1);
  1261. c->scache.ways = 2;
  1262. c->scache.waybit= 0;
  1263. sc_present = 1;
  1264. break;
  1265. case CPU_R5000:
  1266. case CPU_NEVADA:
  1267. #ifdef CONFIG_R5000_CPU_SCACHE
  1268. r5k_sc_init();
  1269. #endif
  1270. return;
  1271. case CPU_RM7000:
  1272. #ifdef CONFIG_RM7000_CPU_SCACHE
  1273. rm7k_sc_init();
  1274. #endif
  1275. return;
  1276. case CPU_LOONGSON2:
  1277. loongson2_sc_init();
  1278. return;
  1279. case CPU_LOONGSON3:
  1280. loongson3_sc_init();
  1281. return;
  1282. case CPU_CAVIUM_OCTEON3:
  1283. case CPU_XLP:
  1284. /* don't need to worry about L2, fully coherent */
  1285. return;
  1286. default:
  1287. if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
  1288. MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
  1289. MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
  1290. #ifdef CONFIG_MIPS_CPU_SCACHE
  1291. if (mips_sc_init ()) {
  1292. scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
  1293. printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
  1294. scache_size >> 10,
  1295. way_string[c->scache.ways], c->scache.linesz);
  1296. }
  1297. #else
  1298. if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
  1299. panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
  1300. #endif
  1301. return;
  1302. }
  1303. sc_present = 0;
  1304. }
  1305. if (!sc_present)
  1306. return;
  1307. /* compute a couple of other cache variables */
  1308. c->scache.waysize = scache_size / c->scache.ways;
  1309. c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
  1310. printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
  1311. scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
  1312. c->options |= MIPS_CPU_INCLUSIVE_CACHES;
  1313. }
  1314. void au1x00_fixup_config_od(void)
  1315. {
  1316. /*
  1317. * c0_config.od (bit 19) was write only (and read as 0)
  1318. * on the early revisions of Alchemy SOCs. It disables the bus
  1319. * transaction overlapping and needs to be set to fix various errata.
  1320. */
  1321. switch (read_c0_prid()) {
  1322. case 0x00030100: /* Au1000 DA */
  1323. case 0x00030201: /* Au1000 HA */
  1324. case 0x00030202: /* Au1000 HB */
  1325. case 0x01030200: /* Au1500 AB */
  1326. /*
  1327. * Au1100 errata actually keeps silence about this bit, so we set it
  1328. * just in case for those revisions that require it to be set according
  1329. * to the (now gone) cpu table.
  1330. */
  1331. case 0x02030200: /* Au1100 AB */
  1332. case 0x02030201: /* Au1100 BA */
  1333. case 0x02030202: /* Au1100 BC */
  1334. set_c0_config(1 << 19);
  1335. break;
  1336. }
  1337. }
  1338. /* CP0 hazard avoidance. */
  1339. #define NXP_BARRIER() \
  1340. __asm__ __volatile__( \
  1341. ".set noreorder\n\t" \
  1342. "nop; nop; nop; nop; nop; nop;\n\t" \
  1343. ".set reorder\n\t")
  1344. static void nxp_pr4450_fixup_config(void)
  1345. {
  1346. unsigned long config0;
  1347. config0 = read_c0_config();
  1348. /* clear all three cache coherency fields */
  1349. config0 &= ~(0x7 | (7 << 25) | (7 << 28));
  1350. config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
  1351. ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
  1352. ((_page_cachable_default >> _CACHE_SHIFT) << 28));
  1353. write_c0_config(config0);
  1354. NXP_BARRIER();
  1355. }
  1356. static int cca = -1;
  1357. static int __init cca_setup(char *str)
  1358. {
  1359. get_option(&str, &cca);
  1360. return 0;
  1361. }
  1362. early_param("cca", cca_setup);
  1363. static void coherency_setup(void)
  1364. {
  1365. if (cca < 0 || cca > 7)
  1366. cca = read_c0_config() & CONF_CM_CMASK;
  1367. _page_cachable_default = cca << _CACHE_SHIFT;
  1368. pr_debug("Using cache attribute %d\n", cca);
  1369. change_c0_config(CONF_CM_CMASK, cca);
  1370. /*
  1371. * c0_status.cu=0 specifies that updates by the sc instruction use
  1372. * the coherency mode specified by the TLB; 1 means cachable
  1373. * coherent update on write will be used. Not all processors have
  1374. * this bit and; some wire it to zero, others like Toshiba had the
  1375. * silly idea of putting something else there ...
  1376. */
  1377. switch (current_cpu_type()) {
  1378. case CPU_R4000PC:
  1379. case CPU_R4000SC:
  1380. case CPU_R4000MC:
  1381. case CPU_R4400PC:
  1382. case CPU_R4400SC:
  1383. case CPU_R4400MC:
  1384. clear_c0_config(CONF_CU);
  1385. break;
  1386. /*
  1387. * We need to catch the early Alchemy SOCs with
  1388. * the write-only co_config.od bit and set it back to one on:
  1389. * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
  1390. */
  1391. case CPU_ALCHEMY:
  1392. au1x00_fixup_config_od();
  1393. break;
  1394. case PRID_IMP_PR4450:
  1395. nxp_pr4450_fixup_config();
  1396. break;
  1397. }
  1398. }
  1399. static void r4k_cache_error_setup(void)
  1400. {
  1401. extern char __weak except_vec2_generic;
  1402. extern char __weak except_vec2_sb1;
  1403. switch (current_cpu_type()) {
  1404. case CPU_SB1:
  1405. case CPU_SB1A:
  1406. set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
  1407. break;
  1408. default:
  1409. set_uncached_handler(0x100, &except_vec2_generic, 0x80);
  1410. break;
  1411. }
  1412. }
  1413. void r4k_cache_init(void)
  1414. {
  1415. extern void build_clear_page(void);
  1416. extern void build_copy_page(void);
  1417. struct cpuinfo_mips *c = &current_cpu_data;
  1418. probe_pcache();
  1419. setup_scache();
  1420. r4k_blast_dcache_page_setup();
  1421. r4k_blast_dcache_page_indexed_setup();
  1422. r4k_blast_dcache_setup();
  1423. r4k_blast_icache_page_setup();
  1424. r4k_blast_icache_page_indexed_setup();
  1425. r4k_blast_icache_setup();
  1426. r4k_blast_scache_page_setup();
  1427. r4k_blast_scache_page_indexed_setup();
  1428. r4k_blast_scache_setup();
  1429. #ifdef CONFIG_EVA
  1430. r4k_blast_dcache_user_page_setup();
  1431. r4k_blast_icache_user_page_setup();
  1432. #endif
  1433. /*
  1434. * Some MIPS32 and MIPS64 processors have physically indexed caches.
  1435. * This code supports virtually indexed processors and will be
  1436. * unnecessarily inefficient on physically indexed processors.
  1437. */
  1438. if (c->dcache.linesz)
  1439. shm_align_mask = max_t( unsigned long,
  1440. c->dcache.sets * c->dcache.linesz - 1,
  1441. PAGE_SIZE - 1);
  1442. else
  1443. shm_align_mask = PAGE_SIZE-1;
  1444. __flush_cache_vmap = r4k__flush_cache_vmap;
  1445. __flush_cache_vunmap = r4k__flush_cache_vunmap;
  1446. flush_cache_all = cache_noop;
  1447. __flush_cache_all = r4k___flush_cache_all;
  1448. flush_cache_mm = r4k_flush_cache_mm;
  1449. flush_cache_page = r4k_flush_cache_page;
  1450. flush_cache_range = r4k_flush_cache_range;
  1451. __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
  1452. flush_cache_sigtramp = r4k_flush_cache_sigtramp;
  1453. flush_icache_all = r4k_flush_icache_all;
  1454. local_flush_data_cache_page = local_r4k_flush_data_cache_page;
  1455. flush_data_cache_page = r4k_flush_data_cache_page;
  1456. flush_icache_range = r4k_flush_icache_range;
  1457. local_flush_icache_range = local_r4k_flush_icache_range;
  1458. #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
  1459. if (coherentio) {
  1460. _dma_cache_wback_inv = (void *)cache_noop;
  1461. _dma_cache_wback = (void *)cache_noop;
  1462. _dma_cache_inv = (void *)cache_noop;
  1463. } else {
  1464. _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
  1465. _dma_cache_wback = r4k_dma_cache_wback_inv;
  1466. _dma_cache_inv = r4k_dma_cache_inv;
  1467. }
  1468. #endif
  1469. build_clear_page();
  1470. build_copy_page();
  1471. /*
  1472. * We want to run CMP kernels on core with and without coherent
  1473. * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
  1474. * or not to flush caches.
  1475. */
  1476. local_r4k___flush_cache_all(NULL);
  1477. coherency_setup();
  1478. board_cache_error_setup = r4k_cache_error_setup;
  1479. /*
  1480. * Per-CPU overrides
  1481. */
  1482. switch (current_cpu_type()) {
  1483. case CPU_BMIPS4350:
  1484. case CPU_BMIPS4380:
  1485. /* No IPI is needed because all CPUs share the same D$ */
  1486. flush_data_cache_page = r4k_blast_dcache_page;
  1487. break;
  1488. case CPU_BMIPS5000:
  1489. /* We lose our superpowers if L2 is disabled */
  1490. if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
  1491. break;
  1492. /* I$ fills from D$ just by emptying the write buffers */
  1493. flush_cache_page = (void *)b5k_instruction_hazard;
  1494. flush_cache_range = (void *)b5k_instruction_hazard;
  1495. flush_cache_sigtramp = (void *)b5k_instruction_hazard;
  1496. local_flush_data_cache_page = (void *)b5k_instruction_hazard;
  1497. flush_data_cache_page = (void *)b5k_instruction_hazard;
  1498. flush_icache_range = (void *)b5k_instruction_hazard;
  1499. local_flush_icache_range = (void *)b5k_instruction_hazard;
  1500. /* Cache aliases are handled in hardware; allow HIGHMEM */
  1501. current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES;
  1502. /* Optimization: an L2 flush implicitly flushes the L1 */
  1503. current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
  1504. break;
  1505. }
  1506. }
  1507. static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
  1508. void *v)
  1509. {
  1510. switch (cmd) {
  1511. case CPU_PM_ENTER_FAILED:
  1512. case CPU_PM_EXIT:
  1513. coherency_setup();
  1514. break;
  1515. }
  1516. return NOTIFY_OK;
  1517. }
  1518. static struct notifier_block r4k_cache_pm_notifier_block = {
  1519. .notifier_call = r4k_cache_pm_notifier,
  1520. };
  1521. int __init r4k_cache_init_pm(void)
  1522. {
  1523. return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
  1524. }
  1525. arch_initcall(r4k_cache_init_pm);