cache-b15-rac.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * Broadcom Brahma-B15 CPU read-ahead cache management functions
  3. *
  4. * Copyright (C) 2015-2016 Broadcom
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/err.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/io.h>
  13. #include <linux/bitops.h>
  14. #include <linux/of_address.h>
  15. #include <linux/notifier.h>
  16. #include <linux/cpu.h>
  17. #include <linux/syscore_ops.h>
  18. #include <linux/reboot.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/hardware/cache-b15-rac.h>
  21. extern void v7_flush_kern_cache_all(void);
  22. /* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */
  23. #define RAC_CONFIG0_REG (0x78)
  24. #define RACENPREF_MASK (0x3)
  25. #define RACPREFINST_SHIFT (0)
  26. #define RACENINST_SHIFT (2)
  27. #define RACPREFDATA_SHIFT (4)
  28. #define RACENDATA_SHIFT (6)
  29. #define RAC_CPU_SHIFT (8)
  30. #define RACCFG_MASK (0xff)
  31. #define RAC_CONFIG1_REG (0x7c)
  32. #define RAC_FLUSH_REG (0x80)
  33. #define FLUSH_RAC (1 << 0)
  34. /* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
  35. #define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
  36. RACENPREF_MASK << RACENINST_SHIFT | \
  37. 1 << RACPREFDATA_SHIFT | \
  38. RACENPREF_MASK << RACENDATA_SHIFT)
  39. #define RAC_ENABLED 0
  40. /* Special state where we want to bypass the spinlock and call directly
  41. * into the v7 cache maintenance operations during suspend/resume
  42. */
  43. #define RAC_SUSPENDED 1
  44. static void __iomem *b15_rac_base;
  45. static DEFINE_SPINLOCK(rac_lock);
  46. static u32 rac_config0_reg;
  47. /* Initialization flag to avoid checking for b15_rac_base, and to prevent
  48. * multi-platform kernels from crashing here as well.
  49. */
  50. static unsigned long b15_rac_flags;
  51. static inline u32 __b15_rac_disable(void)
  52. {
  53. u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
  54. __raw_writel(0, b15_rac_base + RAC_CONFIG0_REG);
  55. dmb();
  56. return val;
  57. }
  58. static inline void __b15_rac_flush(void)
  59. {
  60. u32 reg;
  61. __raw_writel(FLUSH_RAC, b15_rac_base + RAC_FLUSH_REG);
  62. do {
  63. /* This dmb() is required to force the Bus Interface Unit
  64. * to clean oustanding writes, and forces an idle cycle
  65. * to be inserted.
  66. */
  67. dmb();
  68. reg = __raw_readl(b15_rac_base + RAC_FLUSH_REG);
  69. } while (reg & FLUSH_RAC);
  70. }
  71. static inline u32 b15_rac_disable_and_flush(void)
  72. {
  73. u32 reg;
  74. reg = __b15_rac_disable();
  75. __b15_rac_flush();
  76. return reg;
  77. }
  78. static inline void __b15_rac_enable(u32 val)
  79. {
  80. __raw_writel(val, b15_rac_base + RAC_CONFIG0_REG);
  81. /* dsb() is required here to be consistent with __flush_icache_all() */
  82. dsb();
  83. }
  84. #define BUILD_RAC_CACHE_OP(name, bar) \
  85. void b15_flush_##name(void) \
  86. { \
  87. unsigned int do_flush; \
  88. u32 val = 0; \
  89. \
  90. if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) { \
  91. v7_flush_##name(); \
  92. bar; \
  93. return; \
  94. } \
  95. \
  96. spin_lock(&rac_lock); \
  97. do_flush = test_bit(RAC_ENABLED, &b15_rac_flags); \
  98. if (do_flush) \
  99. val = b15_rac_disable_and_flush(); \
  100. v7_flush_##name(); \
  101. if (!do_flush) \
  102. bar; \
  103. else \
  104. __b15_rac_enable(val); \
  105. spin_unlock(&rac_lock); \
  106. }
  107. #define nobarrier
  108. /* The readahead cache present in the Brahma-B15 CPU is a special piece of
  109. * hardware after the integrated L2 cache of the B15 CPU complex whose purpose
  110. * is to prefetch instruction and/or data with a line size of either 64 bytes
  111. * or 256 bytes. The rationale is that the data-bus of the CPU interface is
  112. * optimized for 256-bytes transactions, and enabling the readahead cache
  113. * provides a significant performance boost we want it enabled (typically
  114. * twice the performance for a memcpy benchmark application).
  115. *
  116. * The readahead cache is transparent for Modified Virtual Addresses
  117. * cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and
  118. * DCCIMVAC.
  119. *
  120. * It is however not transparent for the following cache maintenance
  121. * operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely
  122. * what we are patching here with our BUILD_RAC_CACHE_OP here.
  123. */
  124. BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier);
  125. static void b15_rac_enable(void)
  126. {
  127. unsigned int cpu;
  128. u32 enable = 0;
  129. for_each_possible_cpu(cpu)
  130. enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
  131. b15_rac_disable_and_flush();
  132. __b15_rac_enable(enable);
  133. }
  134. static int b15_rac_reboot_notifier(struct notifier_block *nb,
  135. unsigned long action,
  136. void *data)
  137. {
  138. /* During kexec, we are not yet migrated on the boot CPU, so we need to
  139. * make sure we are SMP safe here. Once the RAC is disabled, flag it as
  140. * suspended such that the hotplug notifier returns early.
  141. */
  142. if (action == SYS_RESTART) {
  143. spin_lock(&rac_lock);
  144. b15_rac_disable_and_flush();
  145. clear_bit(RAC_ENABLED, &b15_rac_flags);
  146. set_bit(RAC_SUSPENDED, &b15_rac_flags);
  147. spin_unlock(&rac_lock);
  148. }
  149. return NOTIFY_DONE;
  150. }
  151. static struct notifier_block b15_rac_reboot_nb = {
  152. .notifier_call = b15_rac_reboot_notifier,
  153. };
  154. /* The CPU hotplug case is the most interesting one, we basically need to make
  155. * sure that the RAC is disabled for the entire system prior to having a CPU
  156. * die, in particular prior to this dying CPU having exited the coherency
  157. * domain.
  158. *
  159. * Once this CPU is marked dead, we can safely re-enable the RAC for the
  160. * remaining CPUs in the system which are still online.
  161. *
  162. * Offlining a CPU is the problematic case, onlining a CPU is not much of an
  163. * issue since the CPU and its cache-level hierarchy will start filling with
  164. * the RAC disabled, so L1 and L2 only.
  165. *
  166. * In this function, we should NOT have to verify any unsafe setting/condition
  167. * b15_rac_base:
  168. *
  169. * It is protected by the RAC_ENABLED flag which is cleared by default, and
  170. * being cleared when initial procedure is done. b15_rac_base had been set at
  171. * that time.
  172. *
  173. * RAC_ENABLED:
  174. * There is a small timing windows, in b15_rac_init(), between
  175. * cpuhp_setup_state_*()
  176. * ...
  177. * set RAC_ENABLED
  178. * However, there is no hotplug activity based on the Linux booting procedure.
  179. *
  180. * Since we have to disable RAC for all cores, we keep RAC on as long as as
  181. * possible (disable it as late as possible) to gain the cache benefit.
  182. *
  183. * Thus, dying/dead states are chosen here
  184. *
  185. * We are choosing not do disable the RAC on a per-CPU basis, here, if we did
  186. * we would want to consider disabling it as early as possible to benefit the
  187. * other active CPUs.
  188. */
  189. /* Running on the dying CPU */
  190. static int b15_rac_dying_cpu(unsigned int cpu)
  191. {
  192. /* During kexec/reboot, the RAC is disabled via the reboot notifier
  193. * return early here.
  194. */
  195. if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
  196. return 0;
  197. spin_lock(&rac_lock);
  198. /* Indicate that we are starting a hotplug procedure */
  199. __clear_bit(RAC_ENABLED, &b15_rac_flags);
  200. /* Disable the readahead cache and save its value to a global */
  201. rac_config0_reg = b15_rac_disable_and_flush();
  202. spin_unlock(&rac_lock);
  203. return 0;
  204. }
  205. /* Running on a non-dying CPU */
  206. static int b15_rac_dead_cpu(unsigned int cpu)
  207. {
  208. /* During kexec/reboot, the RAC is disabled via the reboot notifier
  209. * return early here.
  210. */
  211. if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
  212. return 0;
  213. spin_lock(&rac_lock);
  214. /* And enable it */
  215. __b15_rac_enable(rac_config0_reg);
  216. __set_bit(RAC_ENABLED, &b15_rac_flags);
  217. spin_unlock(&rac_lock);
  218. return 0;
  219. }
  220. static int b15_rac_suspend(void)
  221. {
  222. /* Suspend the read-ahead cache oeprations, forcing our cache
  223. * implementation to fallback to the regular ARMv7 calls.
  224. *
  225. * We are guaranteed to be running on the boot CPU at this point and
  226. * with every other CPU quiesced, so setting RAC_SUSPENDED is not racy
  227. * here.
  228. */
  229. rac_config0_reg = b15_rac_disable_and_flush();
  230. set_bit(RAC_SUSPENDED, &b15_rac_flags);
  231. return 0;
  232. }
  233. static void b15_rac_resume(void)
  234. {
  235. /* Coming out of a S3 suspend/resume cycle, the read-ahead cache
  236. * register RAC_CONFIG0_REG will be restored to its default value, make
  237. * sure we re-enable it and set the enable flag, we are also guaranteed
  238. * to run on the boot CPU, so not racy again.
  239. */
  240. __b15_rac_enable(rac_config0_reg);
  241. clear_bit(RAC_SUSPENDED, &b15_rac_flags);
  242. }
  243. static struct syscore_ops b15_rac_syscore_ops = {
  244. .suspend = b15_rac_suspend,
  245. .resume = b15_rac_resume,
  246. };
  247. static int __init b15_rac_init(void)
  248. {
  249. struct device_node *dn;
  250. int ret = 0, cpu;
  251. u32 reg, en_mask = 0;
  252. dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
  253. if (!dn)
  254. return -ENODEV;
  255. if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
  256. goto out;
  257. b15_rac_base = of_iomap(dn, 0);
  258. if (!b15_rac_base) {
  259. pr_err("failed to remap BIU control base\n");
  260. ret = -ENOMEM;
  261. goto out;
  262. }
  263. ret = register_reboot_notifier(&b15_rac_reboot_nb);
  264. if (ret) {
  265. pr_err("failed to register reboot notifier\n");
  266. iounmap(b15_rac_base);
  267. goto out;
  268. }
  269. if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
  270. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
  271. "arm/cache-b15-rac:dead",
  272. NULL, b15_rac_dead_cpu);
  273. if (ret)
  274. goto out_unmap;
  275. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
  276. "arm/cache-b15-rac:dying",
  277. NULL, b15_rac_dying_cpu);
  278. if (ret)
  279. goto out_cpu_dead;
  280. }
  281. if (IS_ENABLED(CONFIG_PM_SLEEP))
  282. register_syscore_ops(&b15_rac_syscore_ops);
  283. spin_lock(&rac_lock);
  284. reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
  285. for_each_possible_cpu(cpu)
  286. en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
  287. WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");
  288. b15_rac_enable();
  289. set_bit(RAC_ENABLED, &b15_rac_flags);
  290. spin_unlock(&rac_lock);
  291. pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n",
  292. b15_rac_base + RAC_CONFIG0_REG);
  293. goto out;
  294. out_cpu_dead:
  295. cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
  296. out_unmap:
  297. unregister_reboot_notifier(&b15_rac_reboot_nb);
  298. iounmap(b15_rac_base);
  299. out:
  300. of_node_put(dn);
  301. return ret;
  302. }
  303. arch_initcall(b15_rac_init);