cpufeature.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438
  1. /*
  2. * Contains CPU feature definitions
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "CPU features: " fmt
  19. #include <linux/bsearch.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/sort.h>
  22. #include <linux/stop_machine.h>
  23. #include <linux/types.h>
  24. #include <linux/mm.h>
  25. #include <asm/cpu.h>
  26. #include <asm/cpufeature.h>
  27. #include <asm/cpu_ops.h>
  28. #include <asm/fpsimd.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/processor.h>
  31. #include <asm/sysreg.h>
  32. #include <asm/traps.h>
  33. #include <asm/virt.h>
  34. unsigned long elf_hwcap __read_mostly;
  35. EXPORT_SYMBOL_GPL(elf_hwcap);
  36. #ifdef CONFIG_COMPAT
  37. #define COMPAT_ELF_HWCAP_DEFAULT \
  38. (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
  39. COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
  40. COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
  41. COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
  42. COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
  43. COMPAT_HWCAP_LPAE)
  44. unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
  45. unsigned int compat_elf_hwcap2 __read_mostly;
  46. #endif
  47. DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  48. EXPORT_SYMBOL(cpu_hwcaps);
  49. /*
  50. * Flag to indicate if we have computed the system wide
  51. * capabilities based on the boot time active CPUs. This
  52. * will be used to determine if a new booting CPU should
  53. * go through the verification process to make sure that it
  54. * supports the system capabilities, without using a hotplug
  55. * notifier.
  56. */
  57. static bool sys_caps_initialised;
  58. static inline void set_sys_caps_initialised(void)
  59. {
  60. sys_caps_initialised = true;
  61. }
  62. static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
  63. {
  64. /* file-wide pr_fmt adds "CPU features: " prefix */
  65. pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
  66. return 0;
  67. }
  68. static struct notifier_block cpu_hwcaps_notifier = {
  69. .notifier_call = dump_cpu_hwcaps
  70. };
  71. static int __init register_cpu_hwcaps_dumper(void)
  72. {
  73. atomic_notifier_chain_register(&panic_notifier_list,
  74. &cpu_hwcaps_notifier);
  75. return 0;
  76. }
  77. __initcall(register_cpu_hwcaps_dumper);
  78. DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
  79. EXPORT_SYMBOL(cpu_hwcap_keys);
  80. #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  81. { \
  82. .sign = SIGNED, \
  83. .visible = VISIBLE, \
  84. .strict = STRICT, \
  85. .type = TYPE, \
  86. .shift = SHIFT, \
  87. .width = WIDTH, \
  88. .safe_val = SAFE_VAL, \
  89. }
  90. /* Define a feature with unsigned values */
  91. #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  92. __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  93. /* Define a feature with a signed value */
  94. #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  95. __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  96. #define ARM64_FTR_END \
  97. { \
  98. .width = 0, \
  99. }
  100. /* meta feature for alternatives */
  101. static bool __maybe_unused
  102. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
  103. /*
  104. * NOTE: Any changes to the visibility of features should be kept in
  105. * sync with the documentation of the CPU feature register ABI.
  106. */
  107. static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
  108. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
  109. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
  110. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
  111. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
  112. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
  113. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
  114. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
  115. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
  116. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
  117. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
  118. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
  119. ARM64_FTR_END,
  120. };
  121. static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
  122. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
  123. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
  124. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
  125. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
  126. ARM64_FTR_END,
  127. };
  128. static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
  129. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
  130. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
  131. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
  132. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
  133. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
  134. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
  135. /* Linux doesn't care about the EL3 */
  136. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
  137. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
  138. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
  139. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
  140. ARM64_FTR_END,
  141. };
  142. static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
  143. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
  144. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
  145. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
  146. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
  147. /* Linux shouldn't care about secure memory */
  148. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
  149. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
  150. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
  151. /*
  152. * Differing PARange is fine as long as all peripherals and memory are mapped
  153. * within the minimum PARange of all CPUs
  154. */
  155. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
  156. ARM64_FTR_END,
  157. };
  158. static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
  159. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
  160. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
  161. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
  162. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
  163. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
  164. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
  165. ARM64_FTR_END,
  166. };
  167. static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
  168. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
  169. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
  170. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
  171. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
  172. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
  173. ARM64_FTR_END,
  174. };
  175. static const struct arm64_ftr_bits ftr_ctr[] = {
  176. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
  177. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
  178. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
  179. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
  180. /*
  181. * Linux can handle differing I-cache policies. Userspace JITs will
  182. * make use of *minLine.
  183. * If we have differing I-cache policies, report it as the weakest - VIPT.
  184. */
  185. ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
  186. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
  187. ARM64_FTR_END,
  188. };
  189. struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
  190. .name = "SYS_CTR_EL0",
  191. .ftr_bits = ftr_ctr
  192. };
  193. static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
  194. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
  195. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
  196. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
  197. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
  198. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
  199. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
  200. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
  201. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
  202. ARM64_FTR_END,
  203. };
  204. static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
  205. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
  206. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
  207. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
  208. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
  209. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
  210. /*
  211. * We can instantiate multiple PMU instances with different levels
  212. * of support.
  213. */
  214. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
  215. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
  216. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
  217. ARM64_FTR_END,
  218. };
  219. static const struct arm64_ftr_bits ftr_mvfr2[] = {
  220. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
  221. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
  222. ARM64_FTR_END,
  223. };
  224. static const struct arm64_ftr_bits ftr_dczid[] = {
  225. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
  226. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
  227. ARM64_FTR_END,
  228. };
  229. static const struct arm64_ftr_bits ftr_id_isar5[] = {
  230. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
  231. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
  232. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
  233. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
  234. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
  235. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
  236. ARM64_FTR_END,
  237. };
  238. static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
  239. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
  240. ARM64_FTR_END,
  241. };
  242. static const struct arm64_ftr_bits ftr_id_pfr0[] = {
  243. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
  244. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
  245. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
  246. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
  247. ARM64_FTR_END,
  248. };
  249. static const struct arm64_ftr_bits ftr_id_dfr0[] = {
  250. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  251. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
  252. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  253. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  254. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  255. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  256. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  257. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  258. ARM64_FTR_END,
  259. };
  260. static const struct arm64_ftr_bits ftr_zcr[] = {
  261. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
  262. ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
  263. ARM64_FTR_END,
  264. };
  265. /*
  266. * Common ftr bits for a 32bit register with all hidden, strict
  267. * attributes, with 4bit feature fields and a default safe value of
  268. * 0. Covers the following 32bit registers:
  269. * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
  270. */
  271. static const struct arm64_ftr_bits ftr_generic_32bits[] = {
  272. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  273. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
  274. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  275. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  276. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  277. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  278. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  279. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  280. ARM64_FTR_END,
  281. };
  282. /* Table for a single 32bit feature value */
  283. static const struct arm64_ftr_bits ftr_single32[] = {
  284. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
  285. ARM64_FTR_END,
  286. };
  287. static const struct arm64_ftr_bits ftr_raz[] = {
  288. ARM64_FTR_END,
  289. };
  290. #define ARM64_FTR_REG(id, table) { \
  291. .sys_id = id, \
  292. .reg = &(struct arm64_ftr_reg){ \
  293. .name = #id, \
  294. .ftr_bits = &((table)[0]), \
  295. }}
  296. static const struct __ftr_reg_entry {
  297. u32 sys_id;
  298. struct arm64_ftr_reg *reg;
  299. } arm64_ftr_regs[] = {
  300. /* Op1 = 0, CRn = 0, CRm = 1 */
  301. ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
  302. ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
  303. ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
  304. ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
  305. ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
  306. ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
  307. ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
  308. /* Op1 = 0, CRn = 0, CRm = 2 */
  309. ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
  310. ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
  311. ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
  312. ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
  313. ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
  314. ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
  315. ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
  316. /* Op1 = 0, CRn = 0, CRm = 3 */
  317. ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
  318. ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
  319. ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
  320. /* Op1 = 0, CRn = 0, CRm = 4 */
  321. ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
  322. ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
  323. ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
  324. /* Op1 = 0, CRn = 0, CRm = 5 */
  325. ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
  326. ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
  327. /* Op1 = 0, CRn = 0, CRm = 6 */
  328. ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
  329. ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
  330. /* Op1 = 0, CRn = 0, CRm = 7 */
  331. ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
  332. ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
  333. ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
  334. /* Op1 = 0, CRn = 1, CRm = 2 */
  335. ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
  336. /* Op1 = 3, CRn = 0, CRm = 0 */
  337. { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
  338. ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
  339. /* Op1 = 3, CRn = 14, CRm = 0 */
  340. ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
  341. };
  342. static int search_cmp_ftr_reg(const void *id, const void *regp)
  343. {
  344. return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
  345. }
  346. /*
  347. * get_arm64_ftr_reg - Lookup a feature register entry using its
  348. * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
  349. * ascending order of sys_id , we use binary search to find a matching
  350. * entry.
  351. *
  352. * returns - Upon success, matching ftr_reg entry for id.
  353. * - NULL on failure. It is upto the caller to decide
  354. * the impact of a failure.
  355. */
  356. static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
  357. {
  358. const struct __ftr_reg_entry *ret;
  359. ret = bsearch((const void *)(unsigned long)sys_id,
  360. arm64_ftr_regs,
  361. ARRAY_SIZE(arm64_ftr_regs),
  362. sizeof(arm64_ftr_regs[0]),
  363. search_cmp_ftr_reg);
  364. if (ret)
  365. return ret->reg;
  366. return NULL;
  367. }
  368. static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
  369. s64 ftr_val)
  370. {
  371. u64 mask = arm64_ftr_mask(ftrp);
  372. reg &= ~mask;
  373. reg |= (ftr_val << ftrp->shift) & mask;
  374. return reg;
  375. }
  376. static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
  377. s64 cur)
  378. {
  379. s64 ret = 0;
  380. switch (ftrp->type) {
  381. case FTR_EXACT:
  382. ret = ftrp->safe_val;
  383. break;
  384. case FTR_LOWER_SAFE:
  385. ret = new < cur ? new : cur;
  386. break;
  387. case FTR_HIGHER_SAFE:
  388. ret = new > cur ? new : cur;
  389. break;
  390. default:
  391. BUG();
  392. }
  393. return ret;
  394. }
  395. static void __init sort_ftr_regs(void)
  396. {
  397. int i;
  398. /* Check that the array is sorted so that we can do the binary search */
  399. for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
  400. BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
  401. }
  402. /*
  403. * Initialise the CPU feature register from Boot CPU values.
  404. * Also initiliases the strict_mask for the register.
  405. * Any bits that are not covered by an arm64_ftr_bits entry are considered
  406. * RES0 for the system-wide value, and must strictly match.
  407. */
  408. static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
  409. {
  410. u64 val = 0;
  411. u64 strict_mask = ~0x0ULL;
  412. u64 user_mask = 0;
  413. u64 valid_mask = 0;
  414. const struct arm64_ftr_bits *ftrp;
  415. struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
  416. BUG_ON(!reg);
  417. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  418. u64 ftr_mask = arm64_ftr_mask(ftrp);
  419. s64 ftr_new = arm64_ftr_value(ftrp, new);
  420. val = arm64_ftr_set_value(ftrp, val, ftr_new);
  421. valid_mask |= ftr_mask;
  422. if (!ftrp->strict)
  423. strict_mask &= ~ftr_mask;
  424. if (ftrp->visible)
  425. user_mask |= ftr_mask;
  426. else
  427. reg->user_val = arm64_ftr_set_value(ftrp,
  428. reg->user_val,
  429. ftrp->safe_val);
  430. }
  431. val &= valid_mask;
  432. reg->sys_val = val;
  433. reg->strict_mask = strict_mask;
  434. reg->user_mask = user_mask;
  435. }
  436. void __init init_cpu_features(struct cpuinfo_arm64 *info)
  437. {
  438. /* Before we start using the tables, make sure it is sorted */
  439. sort_ftr_regs();
  440. init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
  441. init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
  442. init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
  443. init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
  444. init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
  445. init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
  446. init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
  447. init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
  448. init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
  449. init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
  450. init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
  451. init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
  452. init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
  453. if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  454. init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
  455. init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
  456. init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
  457. init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
  458. init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
  459. init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
  460. init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
  461. init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
  462. init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
  463. init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
  464. init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
  465. init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
  466. init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
  467. init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
  468. init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
  469. init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
  470. }
  471. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  472. init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
  473. sve_init_vq_map();
  474. }
  475. }
  476. static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
  477. {
  478. const struct arm64_ftr_bits *ftrp;
  479. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  480. s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
  481. s64 ftr_new = arm64_ftr_value(ftrp, new);
  482. if (ftr_cur == ftr_new)
  483. continue;
  484. /* Find a safe value */
  485. ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
  486. reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
  487. }
  488. }
  489. static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
  490. {
  491. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
  492. BUG_ON(!regp);
  493. update_cpu_ftr_reg(regp, val);
  494. if ((boot & regp->strict_mask) == (val & regp->strict_mask))
  495. return 0;
  496. pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
  497. regp->name, boot, cpu, val);
  498. return 1;
  499. }
  500. /*
  501. * Update system wide CPU feature registers with the values from a
  502. * non-boot CPU. Also performs SANITY checks to make sure that there
  503. * aren't any insane variations from that of the boot CPU.
  504. */
  505. void update_cpu_features(int cpu,
  506. struct cpuinfo_arm64 *info,
  507. struct cpuinfo_arm64 *boot)
  508. {
  509. int taint = 0;
  510. /*
  511. * The kernel can handle differing I-cache policies, but otherwise
  512. * caches should look identical. Userspace JITs will make use of
  513. * *minLine.
  514. */
  515. taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
  516. info->reg_ctr, boot->reg_ctr);
  517. /*
  518. * Userspace may perform DC ZVA instructions. Mismatched block sizes
  519. * could result in too much or too little memory being zeroed if a
  520. * process is preempted and migrated between CPUs.
  521. */
  522. taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
  523. info->reg_dczid, boot->reg_dczid);
  524. /* If different, timekeeping will be broken (especially with KVM) */
  525. taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
  526. info->reg_cntfrq, boot->reg_cntfrq);
  527. /*
  528. * The kernel uses self-hosted debug features and expects CPUs to
  529. * support identical debug features. We presently need CTX_CMPs, WRPs,
  530. * and BRPs to be identical.
  531. * ID_AA64DFR1 is currently RES0.
  532. */
  533. taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
  534. info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
  535. taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
  536. info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
  537. /*
  538. * Even in big.LITTLE, processors should be identical instruction-set
  539. * wise.
  540. */
  541. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
  542. info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
  543. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
  544. info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
  545. /*
  546. * Differing PARange support is fine as long as all peripherals and
  547. * memory are mapped within the minimum PARange of all CPUs.
  548. * Linux should not care about secure memory.
  549. */
  550. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
  551. info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
  552. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
  553. info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
  554. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
  555. info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
  556. /*
  557. * EL3 is not our concern.
  558. * ID_AA64PFR1 is currently RES0.
  559. */
  560. taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
  561. info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
  562. taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
  563. info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
  564. taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
  565. info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
  566. /*
  567. * If we have AArch32, we care about 32-bit features for compat.
  568. * If the system doesn't support AArch32, don't update them.
  569. */
  570. if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  571. id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  572. taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
  573. info->reg_id_dfr0, boot->reg_id_dfr0);
  574. taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
  575. info->reg_id_isar0, boot->reg_id_isar0);
  576. taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
  577. info->reg_id_isar1, boot->reg_id_isar1);
  578. taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
  579. info->reg_id_isar2, boot->reg_id_isar2);
  580. taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
  581. info->reg_id_isar3, boot->reg_id_isar3);
  582. taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
  583. info->reg_id_isar4, boot->reg_id_isar4);
  584. taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
  585. info->reg_id_isar5, boot->reg_id_isar5);
  586. /*
  587. * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
  588. * ACTLR formats could differ across CPUs and therefore would have to
  589. * be trapped for virtualization anyway.
  590. */
  591. taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
  592. info->reg_id_mmfr0, boot->reg_id_mmfr0);
  593. taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
  594. info->reg_id_mmfr1, boot->reg_id_mmfr1);
  595. taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
  596. info->reg_id_mmfr2, boot->reg_id_mmfr2);
  597. taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
  598. info->reg_id_mmfr3, boot->reg_id_mmfr3);
  599. taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
  600. info->reg_id_pfr0, boot->reg_id_pfr0);
  601. taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
  602. info->reg_id_pfr1, boot->reg_id_pfr1);
  603. taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
  604. info->reg_mvfr0, boot->reg_mvfr0);
  605. taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
  606. info->reg_mvfr1, boot->reg_mvfr1);
  607. taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
  608. info->reg_mvfr2, boot->reg_mvfr2);
  609. }
  610. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  611. taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
  612. info->reg_zcr, boot->reg_zcr);
  613. /* Probe vector lengths, unless we already gave up on SVE */
  614. if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  615. !sys_caps_initialised)
  616. sve_update_vq_map();
  617. }
  618. /*
  619. * Mismatched CPU features are a recipe for disaster. Don't even
  620. * pretend to support them.
  621. */
  622. if (taint) {
  623. pr_warn_once("Unsupported CPU feature variation detected.\n");
  624. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  625. }
  626. }
  627. u64 read_sanitised_ftr_reg(u32 id)
  628. {
  629. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
  630. /* We shouldn't get a request for an unsupported register */
  631. BUG_ON(!regp);
  632. return regp->sys_val;
  633. }
  634. #define read_sysreg_case(r) \
  635. case r: return read_sysreg_s(r)
  636. /*
  637. * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
  638. * Read the system register on the current CPU
  639. */
  640. static u64 __read_sysreg_by_encoding(u32 sys_id)
  641. {
  642. switch (sys_id) {
  643. read_sysreg_case(SYS_ID_PFR0_EL1);
  644. read_sysreg_case(SYS_ID_PFR1_EL1);
  645. read_sysreg_case(SYS_ID_DFR0_EL1);
  646. read_sysreg_case(SYS_ID_MMFR0_EL1);
  647. read_sysreg_case(SYS_ID_MMFR1_EL1);
  648. read_sysreg_case(SYS_ID_MMFR2_EL1);
  649. read_sysreg_case(SYS_ID_MMFR3_EL1);
  650. read_sysreg_case(SYS_ID_ISAR0_EL1);
  651. read_sysreg_case(SYS_ID_ISAR1_EL1);
  652. read_sysreg_case(SYS_ID_ISAR2_EL1);
  653. read_sysreg_case(SYS_ID_ISAR3_EL1);
  654. read_sysreg_case(SYS_ID_ISAR4_EL1);
  655. read_sysreg_case(SYS_ID_ISAR5_EL1);
  656. read_sysreg_case(SYS_MVFR0_EL1);
  657. read_sysreg_case(SYS_MVFR1_EL1);
  658. read_sysreg_case(SYS_MVFR2_EL1);
  659. read_sysreg_case(SYS_ID_AA64PFR0_EL1);
  660. read_sysreg_case(SYS_ID_AA64PFR1_EL1);
  661. read_sysreg_case(SYS_ID_AA64DFR0_EL1);
  662. read_sysreg_case(SYS_ID_AA64DFR1_EL1);
  663. read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
  664. read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
  665. read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
  666. read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
  667. read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
  668. read_sysreg_case(SYS_CNTFRQ_EL0);
  669. read_sysreg_case(SYS_CTR_EL0);
  670. read_sysreg_case(SYS_DCZID_EL0);
  671. default:
  672. BUG();
  673. return 0;
  674. }
  675. }
  676. #include <linux/irqchip/arm-gic-v3.h>
  677. static bool
  678. feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
  679. {
  680. int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
  681. return val >= entry->min_field_value;
  682. }
  683. static bool
  684. has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
  685. {
  686. u64 val;
  687. WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
  688. if (scope == SCOPE_SYSTEM)
  689. val = read_sanitised_ftr_reg(entry->sys_reg);
  690. else
  691. val = __read_sysreg_by_encoding(entry->sys_reg);
  692. return feature_matches(val, entry);
  693. }
  694. static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
  695. {
  696. bool has_sre;
  697. if (!has_cpuid_feature(entry, scope))
  698. return false;
  699. has_sre = gic_enable_sre();
  700. if (!has_sre)
  701. pr_warn_once("%s present but disabled by higher exception level\n",
  702. entry->desc);
  703. return has_sre;
  704. }
  705. static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
  706. {
  707. u32 midr = read_cpuid_id();
  708. /* Cavium ThunderX pass 1.x and 2.x */
  709. return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
  710. MIDR_CPU_VAR_REV(0, 0),
  711. MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
  712. }
  713. static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
  714. {
  715. return is_kernel_in_hyp_mode();
  716. }
  717. static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
  718. int __unused)
  719. {
  720. phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
  721. /*
  722. * Activate the lower HYP offset only if:
  723. * - the idmap doesn't clash with it,
  724. * - the kernel is not running at EL2.
  725. */
  726. return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
  727. }
  728. static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
  729. {
  730. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  731. return cpuid_feature_extract_signed_field(pfr0,
  732. ID_AA64PFR0_FP_SHIFT) < 0;
  733. }
  734. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  735. static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
  736. static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
  737. int __unused)
  738. {
  739. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  740. /* Forced on command line? */
  741. if (__kpti_forced) {
  742. pr_info_once("kernel page table isolation forced %s by command line option\n",
  743. __kpti_forced > 0 ? "ON" : "OFF");
  744. return __kpti_forced > 0;
  745. }
  746. /* Useful for KASLR robustness */
  747. if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  748. return true;
  749. /* Defer to CPU feature registers */
  750. return !cpuid_feature_extract_unsigned_field(pfr0,
  751. ID_AA64PFR0_CSV3_SHIFT);
  752. }
  753. static int __init parse_kpti(char *str)
  754. {
  755. bool enabled;
  756. int ret = strtobool(str, &enabled);
  757. if (ret)
  758. return ret;
  759. __kpti_forced = enabled ? 1 : -1;
  760. return 0;
  761. }
  762. __setup("kpti=", parse_kpti);
  763. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  764. static const struct arm64_cpu_capabilities arm64_features[] = {
  765. {
  766. .desc = "GIC system register CPU interface",
  767. .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
  768. .def_scope = SCOPE_SYSTEM,
  769. .matches = has_useable_gicv3_cpuif,
  770. .sys_reg = SYS_ID_AA64PFR0_EL1,
  771. .field_pos = ID_AA64PFR0_GIC_SHIFT,
  772. .sign = FTR_UNSIGNED,
  773. .min_field_value = 1,
  774. },
  775. #ifdef CONFIG_ARM64_PAN
  776. {
  777. .desc = "Privileged Access Never",
  778. .capability = ARM64_HAS_PAN,
  779. .def_scope = SCOPE_SYSTEM,
  780. .matches = has_cpuid_feature,
  781. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  782. .field_pos = ID_AA64MMFR1_PAN_SHIFT,
  783. .sign = FTR_UNSIGNED,
  784. .min_field_value = 1,
  785. .enable = cpu_enable_pan,
  786. },
  787. #endif /* CONFIG_ARM64_PAN */
  788. #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
  789. {
  790. .desc = "LSE atomic instructions",
  791. .capability = ARM64_HAS_LSE_ATOMICS,
  792. .def_scope = SCOPE_SYSTEM,
  793. .matches = has_cpuid_feature,
  794. .sys_reg = SYS_ID_AA64ISAR0_EL1,
  795. .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
  796. .sign = FTR_UNSIGNED,
  797. .min_field_value = 2,
  798. },
  799. #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
  800. {
  801. .desc = "Software prefetching using PRFM",
  802. .capability = ARM64_HAS_NO_HW_PREFETCH,
  803. .def_scope = SCOPE_SYSTEM,
  804. .matches = has_no_hw_prefetch,
  805. },
  806. #ifdef CONFIG_ARM64_UAO
  807. {
  808. .desc = "User Access Override",
  809. .capability = ARM64_HAS_UAO,
  810. .def_scope = SCOPE_SYSTEM,
  811. .matches = has_cpuid_feature,
  812. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  813. .field_pos = ID_AA64MMFR2_UAO_SHIFT,
  814. .min_field_value = 1,
  815. /*
  816. * We rely on stop_machine() calling uao_thread_switch() to set
  817. * UAO immediately after patching.
  818. */
  819. },
  820. #endif /* CONFIG_ARM64_UAO */
  821. #ifdef CONFIG_ARM64_PAN
  822. {
  823. .capability = ARM64_ALT_PAN_NOT_UAO,
  824. .def_scope = SCOPE_SYSTEM,
  825. .matches = cpufeature_pan_not_uao,
  826. },
  827. #endif /* CONFIG_ARM64_PAN */
  828. {
  829. .desc = "Virtualization Host Extensions",
  830. .capability = ARM64_HAS_VIRT_HOST_EXTN,
  831. .def_scope = SCOPE_SYSTEM,
  832. .matches = runs_at_el2,
  833. },
  834. {
  835. .desc = "32-bit EL0 Support",
  836. .capability = ARM64_HAS_32BIT_EL0,
  837. .def_scope = SCOPE_SYSTEM,
  838. .matches = has_cpuid_feature,
  839. .sys_reg = SYS_ID_AA64PFR0_EL1,
  840. .sign = FTR_UNSIGNED,
  841. .field_pos = ID_AA64PFR0_EL0_SHIFT,
  842. .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
  843. },
  844. {
  845. .desc = "Reduced HYP mapping offset",
  846. .capability = ARM64_HYP_OFFSET_LOW,
  847. .def_scope = SCOPE_SYSTEM,
  848. .matches = hyp_offset_low,
  849. },
  850. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  851. {
  852. .desc = "Kernel page table isolation (KPTI)",
  853. .capability = ARM64_UNMAP_KERNEL_AT_EL0,
  854. .def_scope = SCOPE_SYSTEM,
  855. .matches = unmap_kernel_at_el0,
  856. },
  857. #endif
  858. {
  859. /* FP/SIMD is not implemented */
  860. .capability = ARM64_HAS_NO_FPSIMD,
  861. .def_scope = SCOPE_SYSTEM,
  862. .min_field_value = 0,
  863. .matches = has_no_fpsimd,
  864. },
  865. #ifdef CONFIG_ARM64_PMEM
  866. {
  867. .desc = "Data cache clean to Point of Persistence",
  868. .capability = ARM64_HAS_DCPOP,
  869. .def_scope = SCOPE_SYSTEM,
  870. .matches = has_cpuid_feature,
  871. .sys_reg = SYS_ID_AA64ISAR1_EL1,
  872. .field_pos = ID_AA64ISAR1_DPB_SHIFT,
  873. .min_field_value = 1,
  874. },
  875. #endif
  876. #ifdef CONFIG_ARM64_SVE
  877. {
  878. .desc = "Scalable Vector Extension",
  879. .capability = ARM64_SVE,
  880. .def_scope = SCOPE_SYSTEM,
  881. .sys_reg = SYS_ID_AA64PFR0_EL1,
  882. .sign = FTR_UNSIGNED,
  883. .field_pos = ID_AA64PFR0_SVE_SHIFT,
  884. .min_field_value = ID_AA64PFR0_SVE,
  885. .matches = has_cpuid_feature,
  886. .enable = sve_kernel_enable,
  887. },
  888. #endif /* CONFIG_ARM64_SVE */
  889. {},
  890. };
  891. #define HWCAP_CAP(reg, field, s, min_value, type, cap) \
  892. { \
  893. .desc = #cap, \
  894. .def_scope = SCOPE_SYSTEM, \
  895. .matches = has_cpuid_feature, \
  896. .sys_reg = reg, \
  897. .field_pos = field, \
  898. .sign = s, \
  899. .min_field_value = min_value, \
  900. .hwcap_type = type, \
  901. .hwcap = cap, \
  902. }
  903. static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
  904. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
  905. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
  906. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
  907. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
  908. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
  909. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
  910. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
  911. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
  912. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
  913. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
  914. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
  915. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
  916. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
  917. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
  918. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
  919. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
  920. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
  921. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
  922. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
  923. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
  924. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
  925. #ifdef CONFIG_ARM64_SVE
  926. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
  927. #endif
  928. {},
  929. };
  930. static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
  931. #ifdef CONFIG_COMPAT
  932. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
  933. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
  934. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
  935. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
  936. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
  937. #endif
  938. {},
  939. };
  940. static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  941. {
  942. switch (cap->hwcap_type) {
  943. case CAP_HWCAP:
  944. elf_hwcap |= cap->hwcap;
  945. break;
  946. #ifdef CONFIG_COMPAT
  947. case CAP_COMPAT_HWCAP:
  948. compat_elf_hwcap |= (u32)cap->hwcap;
  949. break;
  950. case CAP_COMPAT_HWCAP2:
  951. compat_elf_hwcap2 |= (u32)cap->hwcap;
  952. break;
  953. #endif
  954. default:
  955. WARN_ON(1);
  956. break;
  957. }
  958. }
  959. /* Check if we have a particular HWCAP enabled */
  960. static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  961. {
  962. bool rc;
  963. switch (cap->hwcap_type) {
  964. case CAP_HWCAP:
  965. rc = (elf_hwcap & cap->hwcap) != 0;
  966. break;
  967. #ifdef CONFIG_COMPAT
  968. case CAP_COMPAT_HWCAP:
  969. rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
  970. break;
  971. case CAP_COMPAT_HWCAP2:
  972. rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
  973. break;
  974. #endif
  975. default:
  976. WARN_ON(1);
  977. rc = false;
  978. }
  979. return rc;
  980. }
  981. static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
  982. {
  983. /* We support emulation of accesses to CPU ID feature registers */
  984. elf_hwcap |= HWCAP_CPUID;
  985. for (; hwcaps->matches; hwcaps++)
  986. if (hwcaps->matches(hwcaps, hwcaps->def_scope))
  987. cap_set_elf_hwcap(hwcaps);
  988. }
  989. void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  990. const char *info)
  991. {
  992. for (; caps->matches; caps++) {
  993. if (!caps->matches(caps, caps->def_scope))
  994. continue;
  995. if (!cpus_have_cap(caps->capability) && caps->desc)
  996. pr_info("%s %s\n", info, caps->desc);
  997. cpus_set_cap(caps->capability);
  998. }
  999. }
  1000. /*
  1001. * Run through the enabled capabilities and enable() it on all active
  1002. * CPUs
  1003. */
  1004. void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
  1005. {
  1006. for (; caps->matches; caps++) {
  1007. unsigned int num = caps->capability;
  1008. if (!cpus_have_cap(num))
  1009. continue;
  1010. /* Ensure cpus_have_const_cap(num) works */
  1011. static_branch_enable(&cpu_hwcap_keys[num]);
  1012. if (caps->enable) {
  1013. /*
  1014. * Use stop_machine() as it schedules the work allowing
  1015. * us to modify PSTATE, instead of on_each_cpu() which
  1016. * uses an IPI, giving us a PSTATE that disappears when
  1017. * we return.
  1018. */
  1019. stop_machine(caps->enable, (void *)caps, cpu_online_mask);
  1020. }
  1021. }
  1022. }
  1023. /*
  1024. * Check for CPU features that are used in early boot
  1025. * based on the Boot CPU value.
  1026. */
  1027. static void check_early_cpu_features(void)
  1028. {
  1029. verify_cpu_run_el();
  1030. verify_cpu_asid_bits();
  1031. }
  1032. static void
  1033. verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
  1034. {
  1035. for (; caps->matches; caps++)
  1036. if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
  1037. pr_crit("CPU%d: missing HWCAP: %s\n",
  1038. smp_processor_id(), caps->desc);
  1039. cpu_die_early();
  1040. }
  1041. }
  1042. static void
  1043. verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
  1044. {
  1045. for (; caps->matches; caps++) {
  1046. if (!cpus_have_cap(caps->capability))
  1047. continue;
  1048. /*
  1049. * If the new CPU misses an advertised feature, we cannot proceed
  1050. * further, park the cpu.
  1051. */
  1052. if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
  1053. pr_crit("CPU%d: missing feature: %s\n",
  1054. smp_processor_id(), caps->desc);
  1055. cpu_die_early();
  1056. }
  1057. if (caps->enable)
  1058. caps->enable((void *)caps);
  1059. }
  1060. }
  1061. static void verify_sve_features(void)
  1062. {
  1063. u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
  1064. u64 zcr = read_zcr_features();
  1065. unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
  1066. unsigned int len = zcr & ZCR_ELx_LEN_MASK;
  1067. if (len < safe_len || sve_verify_vq_map()) {
  1068. pr_crit("CPU%d: SVE: required vector length(s) missing\n",
  1069. smp_processor_id());
  1070. cpu_die_early();
  1071. }
  1072. /* Add checks on other ZCR bits here if necessary */
  1073. }
  1074. /*
  1075. * Run through the enabled system capabilities and enable() it on this CPU.
  1076. * The capabilities were decided based on the available CPUs at the boot time.
  1077. * Any new CPU should match the system wide status of the capability. If the
  1078. * new CPU doesn't have a capability which the system now has enabled, we
  1079. * cannot do anything to fix it up and could cause unexpected failures. So
  1080. * we park the CPU.
  1081. */
  1082. static void verify_local_cpu_capabilities(void)
  1083. {
  1084. verify_local_cpu_errata_workarounds();
  1085. verify_local_cpu_features(arm64_features);
  1086. verify_local_elf_hwcaps(arm64_elf_hwcaps);
  1087. if (system_supports_32bit_el0())
  1088. verify_local_elf_hwcaps(compat_elf_hwcaps);
  1089. if (system_supports_sve())
  1090. verify_sve_features();
  1091. }
  1092. void check_local_cpu_capabilities(void)
  1093. {
  1094. /*
  1095. * All secondary CPUs should conform to the early CPU features
  1096. * in use by the kernel based on boot CPU.
  1097. */
  1098. check_early_cpu_features();
  1099. /*
  1100. * If we haven't finalised the system capabilities, this CPU gets
  1101. * a chance to update the errata work arounds.
  1102. * Otherwise, this CPU should verify that it has all the system
  1103. * advertised capabilities.
  1104. */
  1105. if (!sys_caps_initialised)
  1106. update_cpu_errata_workarounds();
  1107. else
  1108. verify_local_cpu_capabilities();
  1109. }
  1110. static void __init setup_feature_capabilities(void)
  1111. {
  1112. update_cpu_capabilities(arm64_features, "detected feature:");
  1113. enable_cpu_capabilities(arm64_features);
  1114. }
  1115. DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
  1116. EXPORT_SYMBOL(arm64_const_caps_ready);
  1117. static void __init mark_const_caps_ready(void)
  1118. {
  1119. static_branch_enable(&arm64_const_caps_ready);
  1120. }
  1121. /*
  1122. * Check if the current CPU has a given feature capability.
  1123. * Should be called from non-preemptible context.
  1124. */
  1125. static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
  1126. unsigned int cap)
  1127. {
  1128. const struct arm64_cpu_capabilities *caps;
  1129. if (WARN_ON(preemptible()))
  1130. return false;
  1131. for (caps = cap_array; caps->desc; caps++)
  1132. if (caps->capability == cap && caps->matches)
  1133. return caps->matches(caps, SCOPE_LOCAL_CPU);
  1134. return false;
  1135. }
  1136. extern const struct arm64_cpu_capabilities arm64_errata[];
  1137. bool this_cpu_has_cap(unsigned int cap)
  1138. {
  1139. return (__this_cpu_has_cap(arm64_features, cap) ||
  1140. __this_cpu_has_cap(arm64_errata, cap));
  1141. }
  1142. void __init setup_cpu_features(void)
  1143. {
  1144. u32 cwg;
  1145. int cls;
  1146. /* Set the CPU feature capabilies */
  1147. setup_feature_capabilities();
  1148. enable_errata_workarounds();
  1149. mark_const_caps_ready();
  1150. setup_elf_hwcaps(arm64_elf_hwcaps);
  1151. if (system_supports_32bit_el0())
  1152. setup_elf_hwcaps(compat_elf_hwcaps);
  1153. sve_setup();
  1154. /* Advertise that we have computed the system capabilities */
  1155. set_sys_caps_initialised();
  1156. /*
  1157. * Check for sane CTR_EL0.CWG value.
  1158. */
  1159. cwg = cache_type_cwg();
  1160. cls = cache_line_size();
  1161. if (!cwg)
  1162. pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
  1163. cls);
  1164. if (L1_CACHE_BYTES < cls)
  1165. pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
  1166. L1_CACHE_BYTES, cls);
  1167. }
  1168. static bool __maybe_unused
  1169. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
  1170. {
  1171. return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
  1172. }
  1173. /*
  1174. * We emulate only the following system register space.
  1175. * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
  1176. * See Table C5-6 System instruction encodings for System register accesses,
  1177. * ARMv8 ARM(ARM DDI 0487A.f) for more details.
  1178. */
  1179. static inline bool __attribute_const__ is_emulated(u32 id)
  1180. {
  1181. return (sys_reg_Op0(id) == 0x3 &&
  1182. sys_reg_CRn(id) == 0x0 &&
  1183. sys_reg_Op1(id) == 0x0 &&
  1184. (sys_reg_CRm(id) == 0 ||
  1185. ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
  1186. }
  1187. /*
  1188. * With CRm == 0, reg should be one of :
  1189. * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
  1190. */
  1191. static inline int emulate_id_reg(u32 id, u64 *valp)
  1192. {
  1193. switch (id) {
  1194. case SYS_MIDR_EL1:
  1195. *valp = read_cpuid_id();
  1196. break;
  1197. case SYS_MPIDR_EL1:
  1198. *valp = SYS_MPIDR_SAFE_VAL;
  1199. break;
  1200. case SYS_REVIDR_EL1:
  1201. /* IMPLEMENTATION DEFINED values are emulated with 0 */
  1202. *valp = 0;
  1203. break;
  1204. default:
  1205. return -EINVAL;
  1206. }
  1207. return 0;
  1208. }
  1209. static int emulate_sys_reg(u32 id, u64 *valp)
  1210. {
  1211. struct arm64_ftr_reg *regp;
  1212. if (!is_emulated(id))
  1213. return -EINVAL;
  1214. if (sys_reg_CRm(id) == 0)
  1215. return emulate_id_reg(id, valp);
  1216. regp = get_arm64_ftr_reg(id);
  1217. if (regp)
  1218. *valp = arm64_ftr_reg_user_value(regp);
  1219. else
  1220. /*
  1221. * The untracked registers are either IMPLEMENTATION DEFINED
  1222. * (e.g, ID_AFR0_EL1) or reserved RAZ.
  1223. */
  1224. *valp = 0;
  1225. return 0;
  1226. }
  1227. static int emulate_mrs(struct pt_regs *regs, u32 insn)
  1228. {
  1229. int rc;
  1230. u32 sys_reg, dst;
  1231. u64 val;
  1232. /*
  1233. * sys_reg values are defined as used in mrs/msr instruction.
  1234. * shift the imm value to get the encoding.
  1235. */
  1236. sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
  1237. rc = emulate_sys_reg(sys_reg, &val);
  1238. if (!rc) {
  1239. dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
  1240. pt_regs_write_reg(regs, dst, val);
  1241. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  1242. }
  1243. return rc;
  1244. }
  1245. static struct undef_hook mrs_hook = {
  1246. .instr_mask = 0xfff00000,
  1247. .instr_val = 0xd5300000,
  1248. .pstate_mask = COMPAT_PSR_MODE_MASK,
  1249. .pstate_val = PSR_MODE_EL0t,
  1250. .fn = emulate_mrs,
  1251. };
  1252. static int __init enable_mrs_emulation(void)
  1253. {
  1254. register_undef_hook(&mrs_hook);
  1255. return 0;
  1256. }
  1257. core_initcall(enable_mrs_emulation);