cpufeature.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513
  1. /*
  2. * Contains CPU feature definitions
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "CPU features: " fmt
  19. #include <linux/bsearch.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/sort.h>
  22. #include <linux/stop_machine.h>
  23. #include <linux/types.h>
  24. #include <linux/mm.h>
  25. #include <asm/cpu.h>
  26. #include <asm/cpufeature.h>
  27. #include <asm/cpu_ops.h>
  28. #include <asm/fpsimd.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/processor.h>
  31. #include <asm/sysreg.h>
  32. #include <asm/traps.h>
  33. #include <asm/virt.h>
  34. unsigned long elf_hwcap __read_mostly;
  35. EXPORT_SYMBOL_GPL(elf_hwcap);
  36. #ifdef CONFIG_COMPAT
  37. #define COMPAT_ELF_HWCAP_DEFAULT \
  38. (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
  39. COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
  40. COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
  41. COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
  42. COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
  43. COMPAT_HWCAP_LPAE)
  44. unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
  45. unsigned int compat_elf_hwcap2 __read_mostly;
  46. #endif
  47. DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  48. EXPORT_SYMBOL(cpu_hwcaps);
  49. /*
  50. * Flag to indicate if we have computed the system wide
  51. * capabilities based on the boot time active CPUs. This
  52. * will be used to determine if a new booting CPU should
  53. * go through the verification process to make sure that it
  54. * supports the system capabilities, without using a hotplug
  55. * notifier.
  56. */
  57. static bool sys_caps_initialised;
  58. static inline void set_sys_caps_initialised(void)
  59. {
  60. sys_caps_initialised = true;
  61. }
  62. static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
  63. {
  64. /* file-wide pr_fmt adds "CPU features: " prefix */
  65. pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
  66. return 0;
  67. }
  68. static struct notifier_block cpu_hwcaps_notifier = {
  69. .notifier_call = dump_cpu_hwcaps
  70. };
  71. static int __init register_cpu_hwcaps_dumper(void)
  72. {
  73. atomic_notifier_chain_register(&panic_notifier_list,
  74. &cpu_hwcaps_notifier);
  75. return 0;
  76. }
  77. __initcall(register_cpu_hwcaps_dumper);
  78. DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
  79. EXPORT_SYMBOL(cpu_hwcap_keys);
  80. #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  81. { \
  82. .sign = SIGNED, \
  83. .visible = VISIBLE, \
  84. .strict = STRICT, \
  85. .type = TYPE, \
  86. .shift = SHIFT, \
  87. .width = WIDTH, \
  88. .safe_val = SAFE_VAL, \
  89. }
  90. /* Define a feature with unsigned values */
  91. #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  92. __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  93. /* Define a feature with a signed value */
  94. #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  95. __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  96. #define ARM64_FTR_END \
  97. { \
  98. .width = 0, \
  99. }
  100. /* meta feature for alternatives */
  101. static bool __maybe_unused
  102. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
  103. /*
  104. * NOTE: Any changes to the visibility of features should be kept in
  105. * sync with the documentation of the CPU feature register ABI.
  106. */
  107. static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
  108. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
  109. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
  110. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
  111. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
  112. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
  113. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
  114. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
  115. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
  116. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
  117. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
  118. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
  119. ARM64_FTR_END,
  120. };
  121. static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
  122. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
  123. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
  124. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
  125. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
  126. ARM64_FTR_END,
  127. };
  128. static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
  129. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
  130. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
  131. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
  132. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
  133. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
  134. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
  135. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
  136. /* Linux doesn't care about the EL3 */
  137. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
  138. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
  139. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
  140. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
  141. ARM64_FTR_END,
  142. };
  143. static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
  144. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
  145. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
  146. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
  147. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
  148. /* Linux shouldn't care about secure memory */
  149. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
  150. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
  151. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
  152. /*
  153. * Differing PARange is fine as long as all peripherals and memory are mapped
  154. * within the minimum PARange of all CPUs
  155. */
  156. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
  157. ARM64_FTR_END,
  158. };
  159. static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
  160. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
  161. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
  162. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
  163. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
  164. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
  165. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
  166. ARM64_FTR_END,
  167. };
  168. static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
  169. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
  170. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
  171. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
  172. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
  173. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
  174. ARM64_FTR_END,
  175. };
  176. static const struct arm64_ftr_bits ftr_ctr[] = {
  177. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
  178. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
  179. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
  180. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
  181. /*
  182. * Linux can handle differing I-cache policies. Userspace JITs will
  183. * make use of *minLine.
  184. * If we have differing I-cache policies, report it as the weakest - VIPT.
  185. */
  186. ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
  187. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
  188. ARM64_FTR_END,
  189. };
  190. struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
  191. .name = "SYS_CTR_EL0",
  192. .ftr_bits = ftr_ctr
  193. };
  194. static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
  195. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
  196. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
  197. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
  198. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
  199. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
  200. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
  201. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
  202. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
  203. ARM64_FTR_END,
  204. };
  205. static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
  206. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
  207. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
  208. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
  209. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
  210. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
  211. /*
  212. * We can instantiate multiple PMU instances with different levels
  213. * of support.
  214. */
  215. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
  216. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
  217. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
  218. ARM64_FTR_END,
  219. };
  220. static const struct arm64_ftr_bits ftr_mvfr2[] = {
  221. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
  222. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
  223. ARM64_FTR_END,
  224. };
  225. static const struct arm64_ftr_bits ftr_dczid[] = {
  226. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
  227. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
  228. ARM64_FTR_END,
  229. };
  230. static const struct arm64_ftr_bits ftr_id_isar5[] = {
  231. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
  232. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
  233. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
  234. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
  235. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
  236. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
  237. ARM64_FTR_END,
  238. };
  239. static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
  240. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
  241. ARM64_FTR_END,
  242. };
  243. static const struct arm64_ftr_bits ftr_id_pfr0[] = {
  244. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
  245. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
  246. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
  247. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
  248. ARM64_FTR_END,
  249. };
  250. static const struct arm64_ftr_bits ftr_id_dfr0[] = {
  251. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  252. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
  253. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  254. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  255. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  256. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  257. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  258. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  259. ARM64_FTR_END,
  260. };
  261. static const struct arm64_ftr_bits ftr_zcr[] = {
  262. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
  263. ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
  264. ARM64_FTR_END,
  265. };
  266. /*
  267. * Common ftr bits for a 32bit register with all hidden, strict
  268. * attributes, with 4bit feature fields and a default safe value of
  269. * 0. Covers the following 32bit registers:
  270. * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
  271. */
  272. static const struct arm64_ftr_bits ftr_generic_32bits[] = {
  273. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  274. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
  275. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  276. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  277. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  278. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  279. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  280. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  281. ARM64_FTR_END,
  282. };
  283. /* Table for a single 32bit feature value */
  284. static const struct arm64_ftr_bits ftr_single32[] = {
  285. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
  286. ARM64_FTR_END,
  287. };
  288. static const struct arm64_ftr_bits ftr_raz[] = {
  289. ARM64_FTR_END,
  290. };
  291. #define ARM64_FTR_REG(id, table) { \
  292. .sys_id = id, \
  293. .reg = &(struct arm64_ftr_reg){ \
  294. .name = #id, \
  295. .ftr_bits = &((table)[0]), \
  296. }}
  297. static const struct __ftr_reg_entry {
  298. u32 sys_id;
  299. struct arm64_ftr_reg *reg;
  300. } arm64_ftr_regs[] = {
  301. /* Op1 = 0, CRn = 0, CRm = 1 */
  302. ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
  303. ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
  304. ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
  305. ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
  306. ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
  307. ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
  308. ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
  309. /* Op1 = 0, CRn = 0, CRm = 2 */
  310. ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
  311. ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
  312. ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
  313. ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
  314. ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
  315. ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
  316. ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
  317. /* Op1 = 0, CRn = 0, CRm = 3 */
  318. ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
  319. ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
  320. ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
  321. /* Op1 = 0, CRn = 0, CRm = 4 */
  322. ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
  323. ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
  324. ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
  325. /* Op1 = 0, CRn = 0, CRm = 5 */
  326. ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
  327. ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
  328. /* Op1 = 0, CRn = 0, CRm = 6 */
  329. ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
  330. ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
  331. /* Op1 = 0, CRn = 0, CRm = 7 */
  332. ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
  333. ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
  334. ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
  335. /* Op1 = 0, CRn = 1, CRm = 2 */
  336. ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
  337. /* Op1 = 3, CRn = 0, CRm = 0 */
  338. { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
  339. ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
  340. /* Op1 = 3, CRn = 14, CRm = 0 */
  341. ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
  342. };
  343. static int search_cmp_ftr_reg(const void *id, const void *regp)
  344. {
  345. return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
  346. }
  347. /*
  348. * get_arm64_ftr_reg - Lookup a feature register entry using its
  349. * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
  350. * ascending order of sys_id , we use binary search to find a matching
  351. * entry.
  352. *
  353. * returns - Upon success, matching ftr_reg entry for id.
  354. * - NULL on failure. It is upto the caller to decide
  355. * the impact of a failure.
  356. */
  357. static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
  358. {
  359. const struct __ftr_reg_entry *ret;
  360. ret = bsearch((const void *)(unsigned long)sys_id,
  361. arm64_ftr_regs,
  362. ARRAY_SIZE(arm64_ftr_regs),
  363. sizeof(arm64_ftr_regs[0]),
  364. search_cmp_ftr_reg);
  365. if (ret)
  366. return ret->reg;
  367. return NULL;
  368. }
  369. static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
  370. s64 ftr_val)
  371. {
  372. u64 mask = arm64_ftr_mask(ftrp);
  373. reg &= ~mask;
  374. reg |= (ftr_val << ftrp->shift) & mask;
  375. return reg;
  376. }
  377. static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
  378. s64 cur)
  379. {
  380. s64 ret = 0;
  381. switch (ftrp->type) {
  382. case FTR_EXACT:
  383. ret = ftrp->safe_val;
  384. break;
  385. case FTR_LOWER_SAFE:
  386. ret = new < cur ? new : cur;
  387. break;
  388. case FTR_HIGHER_SAFE:
  389. ret = new > cur ? new : cur;
  390. break;
  391. default:
  392. BUG();
  393. }
  394. return ret;
  395. }
  396. static void __init sort_ftr_regs(void)
  397. {
  398. int i;
  399. /* Check that the array is sorted so that we can do the binary search */
  400. for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
  401. BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
  402. }
  403. /*
  404. * Initialise the CPU feature register from Boot CPU values.
  405. * Also initiliases the strict_mask for the register.
  406. * Any bits that are not covered by an arm64_ftr_bits entry are considered
  407. * RES0 for the system-wide value, and must strictly match.
  408. */
  409. static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
  410. {
  411. u64 val = 0;
  412. u64 strict_mask = ~0x0ULL;
  413. u64 user_mask = 0;
  414. u64 valid_mask = 0;
  415. const struct arm64_ftr_bits *ftrp;
  416. struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
  417. BUG_ON(!reg);
  418. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  419. u64 ftr_mask = arm64_ftr_mask(ftrp);
  420. s64 ftr_new = arm64_ftr_value(ftrp, new);
  421. val = arm64_ftr_set_value(ftrp, val, ftr_new);
  422. valid_mask |= ftr_mask;
  423. if (!ftrp->strict)
  424. strict_mask &= ~ftr_mask;
  425. if (ftrp->visible)
  426. user_mask |= ftr_mask;
  427. else
  428. reg->user_val = arm64_ftr_set_value(ftrp,
  429. reg->user_val,
  430. ftrp->safe_val);
  431. }
  432. val &= valid_mask;
  433. reg->sys_val = val;
  434. reg->strict_mask = strict_mask;
  435. reg->user_mask = user_mask;
  436. }
  437. void __init init_cpu_features(struct cpuinfo_arm64 *info)
  438. {
  439. /* Before we start using the tables, make sure it is sorted */
  440. sort_ftr_regs();
  441. init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
  442. init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
  443. init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
  444. init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
  445. init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
  446. init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
  447. init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
  448. init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
  449. init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
  450. init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
  451. init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
  452. init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
  453. init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
  454. if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  455. init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
  456. init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
  457. init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
  458. init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
  459. init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
  460. init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
  461. init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
  462. init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
  463. init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
  464. init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
  465. init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
  466. init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
  467. init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
  468. init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
  469. init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
  470. init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
  471. }
  472. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  473. init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
  474. sve_init_vq_map();
  475. }
  476. }
  477. static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
  478. {
  479. const struct arm64_ftr_bits *ftrp;
  480. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  481. s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
  482. s64 ftr_new = arm64_ftr_value(ftrp, new);
  483. if (ftr_cur == ftr_new)
  484. continue;
  485. /* Find a safe value */
  486. ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
  487. reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
  488. }
  489. }
  490. static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
  491. {
  492. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
  493. BUG_ON(!regp);
  494. update_cpu_ftr_reg(regp, val);
  495. if ((boot & regp->strict_mask) == (val & regp->strict_mask))
  496. return 0;
  497. pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
  498. regp->name, boot, cpu, val);
  499. return 1;
  500. }
  501. /*
  502. * Update system wide CPU feature registers with the values from a
  503. * non-boot CPU. Also performs SANITY checks to make sure that there
  504. * aren't any insane variations from that of the boot CPU.
  505. */
  506. void update_cpu_features(int cpu,
  507. struct cpuinfo_arm64 *info,
  508. struct cpuinfo_arm64 *boot)
  509. {
  510. int taint = 0;
  511. /*
  512. * The kernel can handle differing I-cache policies, but otherwise
  513. * caches should look identical. Userspace JITs will make use of
  514. * *minLine.
  515. */
  516. taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
  517. info->reg_ctr, boot->reg_ctr);
  518. /*
  519. * Userspace may perform DC ZVA instructions. Mismatched block sizes
  520. * could result in too much or too little memory being zeroed if a
  521. * process is preempted and migrated between CPUs.
  522. */
  523. taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
  524. info->reg_dczid, boot->reg_dczid);
  525. /* If different, timekeeping will be broken (especially with KVM) */
  526. taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
  527. info->reg_cntfrq, boot->reg_cntfrq);
  528. /*
  529. * The kernel uses self-hosted debug features and expects CPUs to
  530. * support identical debug features. We presently need CTX_CMPs, WRPs,
  531. * and BRPs to be identical.
  532. * ID_AA64DFR1 is currently RES0.
  533. */
  534. taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
  535. info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
  536. taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
  537. info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
  538. /*
  539. * Even in big.LITTLE, processors should be identical instruction-set
  540. * wise.
  541. */
  542. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
  543. info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
  544. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
  545. info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
  546. /*
  547. * Differing PARange support is fine as long as all peripherals and
  548. * memory are mapped within the minimum PARange of all CPUs.
  549. * Linux should not care about secure memory.
  550. */
  551. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
  552. info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
  553. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
  554. info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
  555. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
  556. info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
  557. /*
  558. * EL3 is not our concern.
  559. * ID_AA64PFR1 is currently RES0.
  560. */
  561. taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
  562. info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
  563. taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
  564. info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
  565. taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
  566. info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
  567. /*
  568. * If we have AArch32, we care about 32-bit features for compat.
  569. * If the system doesn't support AArch32, don't update them.
  570. */
  571. if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  572. id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  573. taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
  574. info->reg_id_dfr0, boot->reg_id_dfr0);
  575. taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
  576. info->reg_id_isar0, boot->reg_id_isar0);
  577. taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
  578. info->reg_id_isar1, boot->reg_id_isar1);
  579. taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
  580. info->reg_id_isar2, boot->reg_id_isar2);
  581. taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
  582. info->reg_id_isar3, boot->reg_id_isar3);
  583. taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
  584. info->reg_id_isar4, boot->reg_id_isar4);
  585. taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
  586. info->reg_id_isar5, boot->reg_id_isar5);
  587. /*
  588. * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
  589. * ACTLR formats could differ across CPUs and therefore would have to
  590. * be trapped for virtualization anyway.
  591. */
  592. taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
  593. info->reg_id_mmfr0, boot->reg_id_mmfr0);
  594. taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
  595. info->reg_id_mmfr1, boot->reg_id_mmfr1);
  596. taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
  597. info->reg_id_mmfr2, boot->reg_id_mmfr2);
  598. taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
  599. info->reg_id_mmfr3, boot->reg_id_mmfr3);
  600. taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
  601. info->reg_id_pfr0, boot->reg_id_pfr0);
  602. taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
  603. info->reg_id_pfr1, boot->reg_id_pfr1);
  604. taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
  605. info->reg_mvfr0, boot->reg_mvfr0);
  606. taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
  607. info->reg_mvfr1, boot->reg_mvfr1);
  608. taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
  609. info->reg_mvfr2, boot->reg_mvfr2);
  610. }
  611. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  612. taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
  613. info->reg_zcr, boot->reg_zcr);
  614. /* Probe vector lengths, unless we already gave up on SVE */
  615. if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  616. !sys_caps_initialised)
  617. sve_update_vq_map();
  618. }
  619. /*
  620. * Mismatched CPU features are a recipe for disaster. Don't even
  621. * pretend to support them.
  622. */
  623. if (taint) {
  624. pr_warn_once("Unsupported CPU feature variation detected.\n");
  625. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  626. }
  627. }
  628. u64 read_sanitised_ftr_reg(u32 id)
  629. {
  630. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
  631. /* We shouldn't get a request for an unsupported register */
  632. BUG_ON(!regp);
  633. return regp->sys_val;
  634. }
  635. #define read_sysreg_case(r) \
  636. case r: return read_sysreg_s(r)
  637. /*
  638. * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
  639. * Read the system register on the current CPU
  640. */
  641. static u64 __read_sysreg_by_encoding(u32 sys_id)
  642. {
  643. switch (sys_id) {
  644. read_sysreg_case(SYS_ID_PFR0_EL1);
  645. read_sysreg_case(SYS_ID_PFR1_EL1);
  646. read_sysreg_case(SYS_ID_DFR0_EL1);
  647. read_sysreg_case(SYS_ID_MMFR0_EL1);
  648. read_sysreg_case(SYS_ID_MMFR1_EL1);
  649. read_sysreg_case(SYS_ID_MMFR2_EL1);
  650. read_sysreg_case(SYS_ID_MMFR3_EL1);
  651. read_sysreg_case(SYS_ID_ISAR0_EL1);
  652. read_sysreg_case(SYS_ID_ISAR1_EL1);
  653. read_sysreg_case(SYS_ID_ISAR2_EL1);
  654. read_sysreg_case(SYS_ID_ISAR3_EL1);
  655. read_sysreg_case(SYS_ID_ISAR4_EL1);
  656. read_sysreg_case(SYS_ID_ISAR5_EL1);
  657. read_sysreg_case(SYS_MVFR0_EL1);
  658. read_sysreg_case(SYS_MVFR1_EL1);
  659. read_sysreg_case(SYS_MVFR2_EL1);
  660. read_sysreg_case(SYS_ID_AA64PFR0_EL1);
  661. read_sysreg_case(SYS_ID_AA64PFR1_EL1);
  662. read_sysreg_case(SYS_ID_AA64DFR0_EL1);
  663. read_sysreg_case(SYS_ID_AA64DFR1_EL1);
  664. read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
  665. read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
  666. read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
  667. read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
  668. read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
  669. read_sysreg_case(SYS_CNTFRQ_EL0);
  670. read_sysreg_case(SYS_CTR_EL0);
  671. read_sysreg_case(SYS_DCZID_EL0);
  672. default:
  673. BUG();
  674. return 0;
  675. }
  676. }
  677. #include <linux/irqchip/arm-gic-v3.h>
  678. static bool
  679. feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
  680. {
  681. int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
  682. return val >= entry->min_field_value;
  683. }
  684. static bool
  685. has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
  686. {
  687. u64 val;
  688. WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
  689. if (scope == SCOPE_SYSTEM)
  690. val = read_sanitised_ftr_reg(entry->sys_reg);
  691. else
  692. val = __read_sysreg_by_encoding(entry->sys_reg);
  693. return feature_matches(val, entry);
  694. }
  695. static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
  696. {
  697. bool has_sre;
  698. if (!has_cpuid_feature(entry, scope))
  699. return false;
  700. has_sre = gic_enable_sre();
  701. if (!has_sre)
  702. pr_warn_once("%s present but disabled by higher exception level\n",
  703. entry->desc);
  704. return has_sre;
  705. }
  706. static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
  707. {
  708. u32 midr = read_cpuid_id();
  709. /* Cavium ThunderX pass 1.x and 2.x */
  710. return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
  711. MIDR_CPU_VAR_REV(0, 0),
  712. MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
  713. }
  714. static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
  715. {
  716. return is_kernel_in_hyp_mode();
  717. }
  718. static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
  719. int __unused)
  720. {
  721. phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
  722. /*
  723. * Activate the lower HYP offset only if:
  724. * - the idmap doesn't clash with it,
  725. * - the kernel is not running at EL2.
  726. */
  727. return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
  728. }
  729. static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
  730. {
  731. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  732. return cpuid_feature_extract_signed_field(pfr0,
  733. ID_AA64PFR0_FP_SHIFT) < 0;
  734. }
  735. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  736. static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
  737. static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
  738. int __unused)
  739. {
  740. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  741. /* Forced on command line? */
  742. if (__kpti_forced) {
  743. pr_info_once("kernel page table isolation forced %s by command line option\n",
  744. __kpti_forced > 0 ? "ON" : "OFF");
  745. return __kpti_forced > 0;
  746. }
  747. /* Useful for KASLR robustness */
  748. if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  749. return true;
  750. /* Don't force KPTI for CPUs that are not vulnerable */
  751. switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
  752. case MIDR_CAVIUM_THUNDERX2:
  753. case MIDR_BRCM_VULCAN:
  754. return false;
  755. }
  756. /* Defer to CPU feature registers */
  757. return !cpuid_feature_extract_unsigned_field(pfr0,
  758. ID_AA64PFR0_CSV3_SHIFT);
  759. }
  760. static int kpti_install_ng_mappings(void *__unused)
  761. {
  762. typedef void (kpti_remap_fn)(int, int, phys_addr_t);
  763. extern kpti_remap_fn idmap_kpti_install_ng_mappings;
  764. kpti_remap_fn *remap_fn;
  765. static bool kpti_applied = false;
  766. int cpu = smp_processor_id();
  767. if (kpti_applied)
  768. return 0;
  769. remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
  770. cpu_install_idmap();
  771. remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
  772. cpu_uninstall_idmap();
  773. if (!cpu)
  774. kpti_applied = true;
  775. return 0;
  776. }
  777. static int __init parse_kpti(char *str)
  778. {
  779. bool enabled;
  780. int ret = strtobool(str, &enabled);
  781. if (ret)
  782. return ret;
  783. __kpti_forced = enabled ? 1 : -1;
  784. return 0;
  785. }
  786. __setup("kpti=", parse_kpti);
  787. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  788. static int cpu_copy_el2regs(void *__unused)
  789. {
  790. /*
  791. * Copy register values that aren't redirected by hardware.
  792. *
  793. * Before code patching, we only set tpidr_el1, all CPUs need to copy
  794. * this value to tpidr_el2 before we patch the code. Once we've done
  795. * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
  796. * do anything here.
  797. */
  798. if (!alternatives_applied)
  799. write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
  800. return 0;
  801. }
  802. static const struct arm64_cpu_capabilities arm64_features[] = {
  803. {
  804. .desc = "GIC system register CPU interface",
  805. .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
  806. .def_scope = SCOPE_SYSTEM,
  807. .matches = has_useable_gicv3_cpuif,
  808. .sys_reg = SYS_ID_AA64PFR0_EL1,
  809. .field_pos = ID_AA64PFR0_GIC_SHIFT,
  810. .sign = FTR_UNSIGNED,
  811. .min_field_value = 1,
  812. },
  813. #ifdef CONFIG_ARM64_PAN
  814. {
  815. .desc = "Privileged Access Never",
  816. .capability = ARM64_HAS_PAN,
  817. .def_scope = SCOPE_SYSTEM,
  818. .matches = has_cpuid_feature,
  819. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  820. .field_pos = ID_AA64MMFR1_PAN_SHIFT,
  821. .sign = FTR_UNSIGNED,
  822. .min_field_value = 1,
  823. .enable = cpu_enable_pan,
  824. },
  825. #endif /* CONFIG_ARM64_PAN */
  826. #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
  827. {
  828. .desc = "LSE atomic instructions",
  829. .capability = ARM64_HAS_LSE_ATOMICS,
  830. .def_scope = SCOPE_SYSTEM,
  831. .matches = has_cpuid_feature,
  832. .sys_reg = SYS_ID_AA64ISAR0_EL1,
  833. .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
  834. .sign = FTR_UNSIGNED,
  835. .min_field_value = 2,
  836. },
  837. #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
  838. {
  839. .desc = "Software prefetching using PRFM",
  840. .capability = ARM64_HAS_NO_HW_PREFETCH,
  841. .def_scope = SCOPE_SYSTEM,
  842. .matches = has_no_hw_prefetch,
  843. },
  844. #ifdef CONFIG_ARM64_UAO
  845. {
  846. .desc = "User Access Override",
  847. .capability = ARM64_HAS_UAO,
  848. .def_scope = SCOPE_SYSTEM,
  849. .matches = has_cpuid_feature,
  850. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  851. .field_pos = ID_AA64MMFR2_UAO_SHIFT,
  852. .min_field_value = 1,
  853. /*
  854. * We rely on stop_machine() calling uao_thread_switch() to set
  855. * UAO immediately after patching.
  856. */
  857. },
  858. #endif /* CONFIG_ARM64_UAO */
  859. #ifdef CONFIG_ARM64_PAN
  860. {
  861. .capability = ARM64_ALT_PAN_NOT_UAO,
  862. .def_scope = SCOPE_SYSTEM,
  863. .matches = cpufeature_pan_not_uao,
  864. },
  865. #endif /* CONFIG_ARM64_PAN */
  866. {
  867. .desc = "Virtualization Host Extensions",
  868. .capability = ARM64_HAS_VIRT_HOST_EXTN,
  869. .def_scope = SCOPE_SYSTEM,
  870. .matches = runs_at_el2,
  871. .enable = cpu_copy_el2regs,
  872. },
  873. {
  874. .desc = "32-bit EL0 Support",
  875. .capability = ARM64_HAS_32BIT_EL0,
  876. .def_scope = SCOPE_SYSTEM,
  877. .matches = has_cpuid_feature,
  878. .sys_reg = SYS_ID_AA64PFR0_EL1,
  879. .sign = FTR_UNSIGNED,
  880. .field_pos = ID_AA64PFR0_EL0_SHIFT,
  881. .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
  882. },
  883. {
  884. .desc = "Reduced HYP mapping offset",
  885. .capability = ARM64_HYP_OFFSET_LOW,
  886. .def_scope = SCOPE_SYSTEM,
  887. .matches = hyp_offset_low,
  888. },
  889. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  890. {
  891. .desc = "Kernel page table isolation (KPTI)",
  892. .capability = ARM64_UNMAP_KERNEL_AT_EL0,
  893. .def_scope = SCOPE_SYSTEM,
  894. .matches = unmap_kernel_at_el0,
  895. .enable = kpti_install_ng_mappings,
  896. },
  897. #endif
  898. {
  899. /* FP/SIMD is not implemented */
  900. .capability = ARM64_HAS_NO_FPSIMD,
  901. .def_scope = SCOPE_SYSTEM,
  902. .min_field_value = 0,
  903. .matches = has_no_fpsimd,
  904. },
  905. #ifdef CONFIG_ARM64_PMEM
  906. {
  907. .desc = "Data cache clean to Point of Persistence",
  908. .capability = ARM64_HAS_DCPOP,
  909. .def_scope = SCOPE_SYSTEM,
  910. .matches = has_cpuid_feature,
  911. .sys_reg = SYS_ID_AA64ISAR1_EL1,
  912. .field_pos = ID_AA64ISAR1_DPB_SHIFT,
  913. .min_field_value = 1,
  914. },
  915. #endif
  916. #ifdef CONFIG_ARM64_SVE
  917. {
  918. .desc = "Scalable Vector Extension",
  919. .capability = ARM64_SVE,
  920. .def_scope = SCOPE_SYSTEM,
  921. .sys_reg = SYS_ID_AA64PFR0_EL1,
  922. .sign = FTR_UNSIGNED,
  923. .field_pos = ID_AA64PFR0_SVE_SHIFT,
  924. .min_field_value = ID_AA64PFR0_SVE,
  925. .matches = has_cpuid_feature,
  926. .enable = sve_kernel_enable,
  927. },
  928. #endif /* CONFIG_ARM64_SVE */
  929. #ifdef CONFIG_ARM64_RAS_EXTN
  930. {
  931. .desc = "RAS Extension Support",
  932. .capability = ARM64_HAS_RAS_EXTN,
  933. .def_scope = SCOPE_SYSTEM,
  934. .matches = has_cpuid_feature,
  935. .sys_reg = SYS_ID_AA64PFR0_EL1,
  936. .sign = FTR_UNSIGNED,
  937. .field_pos = ID_AA64PFR0_RAS_SHIFT,
  938. .min_field_value = ID_AA64PFR0_RAS_V1,
  939. .enable = cpu_clear_disr,
  940. },
  941. #endif /* CONFIG_ARM64_RAS_EXTN */
  942. {},
  943. };
  944. #define HWCAP_CAP(reg, field, s, min_value, type, cap) \
  945. { \
  946. .desc = #cap, \
  947. .def_scope = SCOPE_SYSTEM, \
  948. .matches = has_cpuid_feature, \
  949. .sys_reg = reg, \
  950. .field_pos = field, \
  951. .sign = s, \
  952. .min_field_value = min_value, \
  953. .hwcap_type = type, \
  954. .hwcap = cap, \
  955. }
  956. static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
  957. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
  958. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
  959. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
  960. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
  961. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
  962. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
  963. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
  964. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
  965. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
  966. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
  967. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
  968. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
  969. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
  970. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
  971. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
  972. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
  973. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
  974. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
  975. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
  976. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
  977. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
  978. #ifdef CONFIG_ARM64_SVE
  979. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
  980. #endif
  981. {},
  982. };
  983. static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
  984. #ifdef CONFIG_COMPAT
  985. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
  986. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
  987. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
  988. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
  989. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
  990. #endif
  991. {},
  992. };
  993. static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  994. {
  995. switch (cap->hwcap_type) {
  996. case CAP_HWCAP:
  997. elf_hwcap |= cap->hwcap;
  998. break;
  999. #ifdef CONFIG_COMPAT
  1000. case CAP_COMPAT_HWCAP:
  1001. compat_elf_hwcap |= (u32)cap->hwcap;
  1002. break;
  1003. case CAP_COMPAT_HWCAP2:
  1004. compat_elf_hwcap2 |= (u32)cap->hwcap;
  1005. break;
  1006. #endif
  1007. default:
  1008. WARN_ON(1);
  1009. break;
  1010. }
  1011. }
  1012. /* Check if we have a particular HWCAP enabled */
  1013. static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1014. {
  1015. bool rc;
  1016. switch (cap->hwcap_type) {
  1017. case CAP_HWCAP:
  1018. rc = (elf_hwcap & cap->hwcap) != 0;
  1019. break;
  1020. #ifdef CONFIG_COMPAT
  1021. case CAP_COMPAT_HWCAP:
  1022. rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
  1023. break;
  1024. case CAP_COMPAT_HWCAP2:
  1025. rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
  1026. break;
  1027. #endif
  1028. default:
  1029. WARN_ON(1);
  1030. rc = false;
  1031. }
  1032. return rc;
  1033. }
  1034. static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
  1035. {
  1036. /* We support emulation of accesses to CPU ID feature registers */
  1037. elf_hwcap |= HWCAP_CPUID;
  1038. for (; hwcaps->matches; hwcaps++)
  1039. if (hwcaps->matches(hwcaps, hwcaps->def_scope))
  1040. cap_set_elf_hwcap(hwcaps);
  1041. }
  1042. /*
  1043. * Check if the current CPU has a given feature capability.
  1044. * Should be called from non-preemptible context.
  1045. */
  1046. static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
  1047. unsigned int cap)
  1048. {
  1049. const struct arm64_cpu_capabilities *caps;
  1050. if (WARN_ON(preemptible()))
  1051. return false;
  1052. for (caps = cap_array; caps->matches; caps++)
  1053. if (caps->capability == cap &&
  1054. caps->matches(caps, SCOPE_LOCAL_CPU))
  1055. return true;
  1056. return false;
  1057. }
  1058. void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  1059. const char *info)
  1060. {
  1061. for (; caps->matches; caps++) {
  1062. if (!caps->matches(caps, caps->def_scope))
  1063. continue;
  1064. if (!cpus_have_cap(caps->capability) && caps->desc)
  1065. pr_info("%s %s\n", info, caps->desc);
  1066. cpus_set_cap(caps->capability);
  1067. }
  1068. }
  1069. /*
  1070. * Run through the enabled capabilities and enable() it on all active
  1071. * CPUs
  1072. */
  1073. void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
  1074. {
  1075. for (; caps->matches; caps++) {
  1076. unsigned int num = caps->capability;
  1077. if (!cpus_have_cap(num))
  1078. continue;
  1079. /* Ensure cpus_have_const_cap(num) works */
  1080. static_branch_enable(&cpu_hwcap_keys[num]);
  1081. if (caps->enable) {
  1082. /*
  1083. * Use stop_machine() as it schedules the work allowing
  1084. * us to modify PSTATE, instead of on_each_cpu() which
  1085. * uses an IPI, giving us a PSTATE that disappears when
  1086. * we return.
  1087. */
  1088. stop_machine(caps->enable, (void *)caps, cpu_online_mask);
  1089. }
  1090. }
  1091. }
  1092. /*
  1093. * Check for CPU features that are used in early boot
  1094. * based on the Boot CPU value.
  1095. */
  1096. static void check_early_cpu_features(void)
  1097. {
  1098. verify_cpu_run_el();
  1099. verify_cpu_asid_bits();
  1100. }
  1101. static void
  1102. verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
  1103. {
  1104. for (; caps->matches; caps++)
  1105. if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
  1106. pr_crit("CPU%d: missing HWCAP: %s\n",
  1107. smp_processor_id(), caps->desc);
  1108. cpu_die_early();
  1109. }
  1110. }
  1111. static void
  1112. verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
  1113. {
  1114. const struct arm64_cpu_capabilities *caps = caps_list;
  1115. for (; caps->matches; caps++) {
  1116. if (!cpus_have_cap(caps->capability))
  1117. continue;
  1118. /*
  1119. * If the new CPU misses an advertised feature, we cannot proceed
  1120. * further, park the cpu.
  1121. */
  1122. if (!__this_cpu_has_cap(caps_list, caps->capability)) {
  1123. pr_crit("CPU%d: missing feature: %s\n",
  1124. smp_processor_id(), caps->desc);
  1125. cpu_die_early();
  1126. }
  1127. if (caps->enable)
  1128. caps->enable((void *)caps);
  1129. }
  1130. }
  1131. static void verify_sve_features(void)
  1132. {
  1133. u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
  1134. u64 zcr = read_zcr_features();
  1135. unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
  1136. unsigned int len = zcr & ZCR_ELx_LEN_MASK;
  1137. if (len < safe_len || sve_verify_vq_map()) {
  1138. pr_crit("CPU%d: SVE: required vector length(s) missing\n",
  1139. smp_processor_id());
  1140. cpu_die_early();
  1141. }
  1142. /* Add checks on other ZCR bits here if necessary */
  1143. }
  1144. /*
  1145. * Run through the enabled system capabilities and enable() it on this CPU.
  1146. * The capabilities were decided based on the available CPUs at the boot time.
  1147. * Any new CPU should match the system wide status of the capability. If the
  1148. * new CPU doesn't have a capability which the system now has enabled, we
  1149. * cannot do anything to fix it up and could cause unexpected failures. So
  1150. * we park the CPU.
  1151. */
  1152. static void verify_local_cpu_capabilities(void)
  1153. {
  1154. verify_local_cpu_errata_workarounds();
  1155. verify_local_cpu_features(arm64_features);
  1156. verify_local_elf_hwcaps(arm64_elf_hwcaps);
  1157. if (system_supports_32bit_el0())
  1158. verify_local_elf_hwcaps(compat_elf_hwcaps);
  1159. if (system_supports_sve())
  1160. verify_sve_features();
  1161. if (system_uses_ttbr0_pan())
  1162. pr_info("Emulating Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
  1163. }
  1164. void check_local_cpu_capabilities(void)
  1165. {
  1166. /*
  1167. * All secondary CPUs should conform to the early CPU features
  1168. * in use by the kernel based on boot CPU.
  1169. */
  1170. check_early_cpu_features();
  1171. /*
  1172. * If we haven't finalised the system capabilities, this CPU gets
  1173. * a chance to update the errata work arounds.
  1174. * Otherwise, this CPU should verify that it has all the system
  1175. * advertised capabilities.
  1176. */
  1177. if (!sys_caps_initialised)
  1178. update_cpu_errata_workarounds();
  1179. else
  1180. verify_local_cpu_capabilities();
  1181. }
  1182. static void __init setup_feature_capabilities(void)
  1183. {
  1184. update_cpu_capabilities(arm64_features, "detected feature:");
  1185. enable_cpu_capabilities(arm64_features);
  1186. }
  1187. DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
  1188. EXPORT_SYMBOL(arm64_const_caps_ready);
  1189. static void __init mark_const_caps_ready(void)
  1190. {
  1191. static_branch_enable(&arm64_const_caps_ready);
  1192. }
  1193. extern const struct arm64_cpu_capabilities arm64_errata[];
  1194. bool this_cpu_has_cap(unsigned int cap)
  1195. {
  1196. return (__this_cpu_has_cap(arm64_features, cap) ||
  1197. __this_cpu_has_cap(arm64_errata, cap));
  1198. }
  1199. void __init setup_cpu_features(void)
  1200. {
  1201. u32 cwg;
  1202. int cls;
  1203. /* Set the CPU feature capabilies */
  1204. setup_feature_capabilities();
  1205. enable_errata_workarounds();
  1206. mark_const_caps_ready();
  1207. setup_elf_hwcaps(arm64_elf_hwcaps);
  1208. if (system_supports_32bit_el0())
  1209. setup_elf_hwcaps(compat_elf_hwcaps);
  1210. sve_setup();
  1211. /* Advertise that we have computed the system capabilities */
  1212. set_sys_caps_initialised();
  1213. /*
  1214. * Check for sane CTR_EL0.CWG value.
  1215. */
  1216. cwg = cache_type_cwg();
  1217. cls = cache_line_size();
  1218. if (!cwg)
  1219. pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
  1220. cls);
  1221. if (L1_CACHE_BYTES < cls)
  1222. pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
  1223. L1_CACHE_BYTES, cls);
  1224. }
  1225. static bool __maybe_unused
  1226. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
  1227. {
  1228. return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
  1229. }
  1230. /*
  1231. * We emulate only the following system register space.
  1232. * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
  1233. * See Table C5-6 System instruction encodings for System register accesses,
  1234. * ARMv8 ARM(ARM DDI 0487A.f) for more details.
  1235. */
  1236. static inline bool __attribute_const__ is_emulated(u32 id)
  1237. {
  1238. return (sys_reg_Op0(id) == 0x3 &&
  1239. sys_reg_CRn(id) == 0x0 &&
  1240. sys_reg_Op1(id) == 0x0 &&
  1241. (sys_reg_CRm(id) == 0 ||
  1242. ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
  1243. }
  1244. /*
  1245. * With CRm == 0, reg should be one of :
  1246. * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
  1247. */
  1248. static inline int emulate_id_reg(u32 id, u64 *valp)
  1249. {
  1250. switch (id) {
  1251. case SYS_MIDR_EL1:
  1252. *valp = read_cpuid_id();
  1253. break;
  1254. case SYS_MPIDR_EL1:
  1255. *valp = SYS_MPIDR_SAFE_VAL;
  1256. break;
  1257. case SYS_REVIDR_EL1:
  1258. /* IMPLEMENTATION DEFINED values are emulated with 0 */
  1259. *valp = 0;
  1260. break;
  1261. default:
  1262. return -EINVAL;
  1263. }
  1264. return 0;
  1265. }
  1266. static int emulate_sys_reg(u32 id, u64 *valp)
  1267. {
  1268. struct arm64_ftr_reg *regp;
  1269. if (!is_emulated(id))
  1270. return -EINVAL;
  1271. if (sys_reg_CRm(id) == 0)
  1272. return emulate_id_reg(id, valp);
  1273. regp = get_arm64_ftr_reg(id);
  1274. if (regp)
  1275. *valp = arm64_ftr_reg_user_value(regp);
  1276. else
  1277. /*
  1278. * The untracked registers are either IMPLEMENTATION DEFINED
  1279. * (e.g, ID_AFR0_EL1) or reserved RAZ.
  1280. */
  1281. *valp = 0;
  1282. return 0;
  1283. }
  1284. static int emulate_mrs(struct pt_regs *regs, u32 insn)
  1285. {
  1286. int rc;
  1287. u32 sys_reg, dst;
  1288. u64 val;
  1289. /*
  1290. * sys_reg values are defined as used in mrs/msr instruction.
  1291. * shift the imm value to get the encoding.
  1292. */
  1293. sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
  1294. rc = emulate_sys_reg(sys_reg, &val);
  1295. if (!rc) {
  1296. dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
  1297. pt_regs_write_reg(regs, dst, val);
  1298. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  1299. }
  1300. return rc;
  1301. }
  1302. static struct undef_hook mrs_hook = {
  1303. .instr_mask = 0xfff00000,
  1304. .instr_val = 0xd5300000,
  1305. .pstate_mask = COMPAT_PSR_MODE_MASK,
  1306. .pstate_val = PSR_MODE_EL0t,
  1307. .fn = emulate_mrs,
  1308. };
  1309. static int __init enable_mrs_emulation(void)
  1310. {
  1311. register_undef_hook(&mrs_hook);
  1312. return 0;
  1313. }
  1314. core_initcall(enable_mrs_emulation);
  1315. int cpu_clear_disr(void *__unused)
  1316. {
  1317. /* Firmware may have left a deferred SError in this register. */
  1318. write_sysreg_s(0, SYS_DISR_EL1);
  1319. return 0;
  1320. }