cpu_errata.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /*
  2. * Contains CPU specific errata definitions
  3. *
  4. * Copyright (C) 2014 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/types.h>
  19. #include <asm/cpu.h>
  20. #include <asm/cputype.h>
  21. #include <asm/cpufeature.h>
  22. static bool __maybe_unused
  23. is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
  24. {
  25. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  26. return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
  27. entry->midr_range_min,
  28. entry->midr_range_max);
  29. }
  30. static bool
  31. has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
  32. int scope)
  33. {
  34. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  35. return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
  36. (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
  37. }
  38. static int cpu_enable_trap_ctr_access(void *__unused)
  39. {
  40. /* Clear SCTLR_EL1.UCT */
  41. config_sctlr_el1(SCTLR_EL1_UCT, 0);
  42. return 0;
  43. }
  44. #define MIDR_RANGE(model, min, max) \
  45. .def_scope = SCOPE_LOCAL_CPU, \
  46. .matches = is_affected_midr_range, \
  47. .midr_model = model, \
  48. .midr_range_min = min, \
  49. .midr_range_max = max
  50. #define MIDR_ALL_VERSIONS(model) \
  51. .def_scope = SCOPE_LOCAL_CPU, \
  52. .matches = is_affected_midr_range, \
  53. .midr_model = model, \
  54. .midr_range_min = 0, \
  55. .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
  56. const struct arm64_cpu_capabilities arm64_errata[] = {
  57. #if defined(CONFIG_ARM64_ERRATUM_826319) || \
  58. defined(CONFIG_ARM64_ERRATUM_827319) || \
  59. defined(CONFIG_ARM64_ERRATUM_824069)
  60. {
  61. /* Cortex-A53 r0p[012] */
  62. .desc = "ARM errata 826319, 827319, 824069",
  63. .capability = ARM64_WORKAROUND_CLEAN_CACHE,
  64. MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
  65. .enable = cpu_enable_cache_maint_trap,
  66. },
  67. #endif
  68. #ifdef CONFIG_ARM64_ERRATUM_819472
  69. {
  70. /* Cortex-A53 r0p[01] */
  71. .desc = "ARM errata 819472",
  72. .capability = ARM64_WORKAROUND_CLEAN_CACHE,
  73. MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
  74. .enable = cpu_enable_cache_maint_trap,
  75. },
  76. #endif
  77. #ifdef CONFIG_ARM64_ERRATUM_832075
  78. {
  79. /* Cortex-A57 r0p0 - r1p2 */
  80. .desc = "ARM erratum 832075",
  81. .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
  82. MIDR_RANGE(MIDR_CORTEX_A57,
  83. MIDR_CPU_VAR_REV(0, 0),
  84. MIDR_CPU_VAR_REV(1, 2)),
  85. },
  86. #endif
  87. #ifdef CONFIG_ARM64_ERRATUM_834220
  88. {
  89. /* Cortex-A57 r0p0 - r1p2 */
  90. .desc = "ARM erratum 834220",
  91. .capability = ARM64_WORKAROUND_834220,
  92. MIDR_RANGE(MIDR_CORTEX_A57,
  93. MIDR_CPU_VAR_REV(0, 0),
  94. MIDR_CPU_VAR_REV(1, 2)),
  95. },
  96. #endif
  97. #ifdef CONFIG_ARM64_ERRATUM_845719
  98. {
  99. /* Cortex-A53 r0p[01234] */
  100. .desc = "ARM erratum 845719",
  101. .capability = ARM64_WORKAROUND_845719,
  102. MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
  103. },
  104. #endif
  105. #ifdef CONFIG_CAVIUM_ERRATUM_23154
  106. {
  107. /* Cavium ThunderX, pass 1.x */
  108. .desc = "Cavium erratum 23154",
  109. .capability = ARM64_WORKAROUND_CAVIUM_23154,
  110. MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
  111. },
  112. #endif
  113. #ifdef CONFIG_CAVIUM_ERRATUM_27456
  114. {
  115. /* Cavium ThunderX, T88 pass 1.x - 2.1 */
  116. .desc = "Cavium erratum 27456",
  117. .capability = ARM64_WORKAROUND_CAVIUM_27456,
  118. MIDR_RANGE(MIDR_THUNDERX,
  119. MIDR_CPU_VAR_REV(0, 0),
  120. MIDR_CPU_VAR_REV(1, 1)),
  121. },
  122. {
  123. /* Cavium ThunderX, T81 pass 1.0 */
  124. .desc = "Cavium erratum 27456",
  125. .capability = ARM64_WORKAROUND_CAVIUM_27456,
  126. MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
  127. },
  128. #endif
  129. #ifdef CONFIG_CAVIUM_ERRATUM_30115
  130. {
  131. /* Cavium ThunderX, T88 pass 1.x - 2.2 */
  132. .desc = "Cavium erratum 30115",
  133. .capability = ARM64_WORKAROUND_CAVIUM_30115,
  134. MIDR_RANGE(MIDR_THUNDERX, 0x00,
  135. (1 << MIDR_VARIANT_SHIFT) | 2),
  136. },
  137. {
  138. /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
  139. .desc = "Cavium erratum 30115",
  140. .capability = ARM64_WORKAROUND_CAVIUM_30115,
  141. MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
  142. },
  143. {
  144. /* Cavium ThunderX, T83 pass 1.0 */
  145. .desc = "Cavium erratum 30115",
  146. .capability = ARM64_WORKAROUND_CAVIUM_30115,
  147. MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
  148. },
  149. #endif
  150. {
  151. .desc = "Mismatched cache line size",
  152. .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
  153. .matches = has_mismatched_cache_line_size,
  154. .def_scope = SCOPE_LOCAL_CPU,
  155. .enable = cpu_enable_trap_ctr_access,
  156. },
  157. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
  158. {
  159. .desc = "Qualcomm Technologies Falkor erratum 1003",
  160. .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
  161. MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
  162. MIDR_CPU_VAR_REV(0, 0),
  163. MIDR_CPU_VAR_REV(0, 0)),
  164. },
  165. #endif
  166. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
  167. {
  168. .desc = "Qualcomm Technologies Falkor erratum 1009",
  169. .capability = ARM64_WORKAROUND_REPEAT_TLBI,
  170. MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
  171. MIDR_CPU_VAR_REV(0, 0),
  172. MIDR_CPU_VAR_REV(0, 0)),
  173. },
  174. #endif
  175. #ifdef CONFIG_ARM64_ERRATUM_858921
  176. {
  177. /* Cortex-A73 all versions */
  178. .desc = "ARM erratum 858921",
  179. .capability = ARM64_WORKAROUND_858921,
  180. MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
  181. },
  182. #endif
  183. {
  184. }
  185. };
  186. /*
  187. * The CPU Errata work arounds are detected and applied at boot time
  188. * and the related information is freed soon after. If the new CPU requires
  189. * an errata not detected at boot, fail this CPU.
  190. */
  191. void verify_local_cpu_errata_workarounds(void)
  192. {
  193. const struct arm64_cpu_capabilities *caps = arm64_errata;
  194. for (; caps->matches; caps++)
  195. if (!cpus_have_cap(caps->capability) &&
  196. caps->matches(caps, SCOPE_LOCAL_CPU)) {
  197. pr_crit("CPU%d: Requires work around for %s, not detected"
  198. " at boot time\n",
  199. smp_processor_id(),
  200. caps->desc ? : "an erratum");
  201. cpu_die_early();
  202. }
  203. }
  204. void update_cpu_errata_workarounds(void)
  205. {
  206. update_cpu_capabilities(arm64_errata, "enabling workaround for");
  207. }
  208. void __init enable_errata_workarounds(void)
  209. {
  210. enable_cpu_capabilities(arm64_errata);
  211. }