cpufeature.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527
  1. /*
  2. * Contains CPU feature definitions
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "CPU features: " fmt
  19. #include <linux/bsearch.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/sort.h>
  22. #include <linux/stop_machine.h>
  23. #include <linux/types.h>
  24. #include <linux/mm.h>
  25. #include <asm/cpu.h>
  26. #include <asm/cpufeature.h>
  27. #include <asm/cpu_ops.h>
  28. #include <asm/fpsimd.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/processor.h>
  31. #include <asm/sysreg.h>
  32. #include <asm/traps.h>
  33. #include <asm/virt.h>
  34. unsigned long elf_hwcap __read_mostly;
  35. EXPORT_SYMBOL_GPL(elf_hwcap);
  36. #ifdef CONFIG_COMPAT
  37. #define COMPAT_ELF_HWCAP_DEFAULT \
  38. (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
  39. COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
  40. COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
  41. COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
  42. COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
  43. COMPAT_HWCAP_LPAE)
  44. unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
  45. unsigned int compat_elf_hwcap2 __read_mostly;
  46. #endif
  47. DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  48. EXPORT_SYMBOL(cpu_hwcaps);
  49. /*
  50. * Flag to indicate if we have computed the system wide
  51. * capabilities based on the boot time active CPUs. This
  52. * will be used to determine if a new booting CPU should
  53. * go through the verification process to make sure that it
  54. * supports the system capabilities, without using a hotplug
  55. * notifier.
  56. */
  57. static bool sys_caps_initialised;
  58. static inline void set_sys_caps_initialised(void)
  59. {
  60. sys_caps_initialised = true;
  61. }
  62. static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
  63. {
  64. /* file-wide pr_fmt adds "CPU features: " prefix */
  65. pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
  66. return 0;
  67. }
  68. static struct notifier_block cpu_hwcaps_notifier = {
  69. .notifier_call = dump_cpu_hwcaps
  70. };
  71. static int __init register_cpu_hwcaps_dumper(void)
  72. {
  73. atomic_notifier_chain_register(&panic_notifier_list,
  74. &cpu_hwcaps_notifier);
  75. return 0;
  76. }
  77. __initcall(register_cpu_hwcaps_dumper);
  78. DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
  79. EXPORT_SYMBOL(cpu_hwcap_keys);
  80. #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  81. { \
  82. .sign = SIGNED, \
  83. .visible = VISIBLE, \
  84. .strict = STRICT, \
  85. .type = TYPE, \
  86. .shift = SHIFT, \
  87. .width = WIDTH, \
  88. .safe_val = SAFE_VAL, \
  89. }
  90. /* Define a feature with unsigned values */
  91. #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  92. __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  93. /* Define a feature with a signed value */
  94. #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  95. __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  96. #define ARM64_FTR_END \
  97. { \
  98. .width = 0, \
  99. }
  100. /* meta feature for alternatives */
  101. static bool __maybe_unused
  102. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
  103. /*
  104. * NOTE: Any changes to the visibility of features should be kept in
  105. * sync with the documentation of the CPU feature register ABI.
  106. */
  107. static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
  108. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
  109. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
  110. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
  111. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
  112. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
  113. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
  114. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
  115. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
  116. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
  117. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
  118. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
  119. ARM64_FTR_END,
  120. };
  121. static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
  122. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
  123. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
  124. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
  125. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
  126. ARM64_FTR_END,
  127. };
  128. static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
  129. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
  130. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
  131. ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
  132. FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
  133. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
  134. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
  135. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
  136. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
  137. /* Linux doesn't care about the EL3 */
  138. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
  139. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
  140. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
  141. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
  142. ARM64_FTR_END,
  143. };
  144. static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
  145. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
  146. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
  147. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
  148. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
  149. /* Linux shouldn't care about secure memory */
  150. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
  151. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
  152. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
  153. /*
  154. * Differing PARange is fine as long as all peripherals and memory are mapped
  155. * within the minimum PARange of all CPUs
  156. */
  157. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
  158. ARM64_FTR_END,
  159. };
  160. static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
  161. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
  162. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
  163. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
  164. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
  165. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
  166. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
  167. ARM64_FTR_END,
  168. };
  169. static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
  170. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
  171. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
  172. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
  173. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
  174. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
  175. ARM64_FTR_END,
  176. };
  177. static const struct arm64_ftr_bits ftr_ctr[] = {
  178. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
  179. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
  180. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
  181. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
  182. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
  183. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
  184. /*
  185. * Linux can handle differing I-cache policies. Userspace JITs will
  186. * make use of *minLine.
  187. * If we have differing I-cache policies, report it as the weakest - VIPT.
  188. */
  189. ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
  190. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
  191. ARM64_FTR_END,
  192. };
  193. struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
  194. .name = "SYS_CTR_EL0",
  195. .ftr_bits = ftr_ctr
  196. };
  197. static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
  198. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
  199. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
  200. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
  201. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
  202. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
  203. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
  204. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
  205. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
  206. ARM64_FTR_END,
  207. };
  208. static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
  209. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
  210. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
  211. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
  212. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
  213. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
  214. /*
  215. * We can instantiate multiple PMU instances with different levels
  216. * of support.
  217. */
  218. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
  219. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
  220. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
  221. ARM64_FTR_END,
  222. };
  223. static const struct arm64_ftr_bits ftr_mvfr2[] = {
  224. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
  225. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
  226. ARM64_FTR_END,
  227. };
  228. static const struct arm64_ftr_bits ftr_dczid[] = {
  229. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
  230. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
  231. ARM64_FTR_END,
  232. };
  233. static const struct arm64_ftr_bits ftr_id_isar5[] = {
  234. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
  235. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
  236. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
  237. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
  238. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
  239. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
  240. ARM64_FTR_END,
  241. };
  242. static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
  243. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
  244. ARM64_FTR_END,
  245. };
  246. static const struct arm64_ftr_bits ftr_id_pfr0[] = {
  247. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
  248. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
  249. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
  250. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
  251. ARM64_FTR_END,
  252. };
  253. static const struct arm64_ftr_bits ftr_id_dfr0[] = {
  254. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  255. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
  256. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  257. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  258. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  259. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  260. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  261. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  262. ARM64_FTR_END,
  263. };
  264. static const struct arm64_ftr_bits ftr_zcr[] = {
  265. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
  266. ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
  267. ARM64_FTR_END,
  268. };
  269. /*
  270. * Common ftr bits for a 32bit register with all hidden, strict
  271. * attributes, with 4bit feature fields and a default safe value of
  272. * 0. Covers the following 32bit registers:
  273. * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
  274. */
  275. static const struct arm64_ftr_bits ftr_generic_32bits[] = {
  276. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  277. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
  278. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  279. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  280. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  281. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  282. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  283. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  284. ARM64_FTR_END,
  285. };
  286. /* Table for a single 32bit feature value */
  287. static const struct arm64_ftr_bits ftr_single32[] = {
  288. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
  289. ARM64_FTR_END,
  290. };
  291. static const struct arm64_ftr_bits ftr_raz[] = {
  292. ARM64_FTR_END,
  293. };
  294. #define ARM64_FTR_REG(id, table) { \
  295. .sys_id = id, \
  296. .reg = &(struct arm64_ftr_reg){ \
  297. .name = #id, \
  298. .ftr_bits = &((table)[0]), \
  299. }}
  300. static const struct __ftr_reg_entry {
  301. u32 sys_id;
  302. struct arm64_ftr_reg *reg;
  303. } arm64_ftr_regs[] = {
  304. /* Op1 = 0, CRn = 0, CRm = 1 */
  305. ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
  306. ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
  307. ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
  308. ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
  309. ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
  310. ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
  311. ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
  312. /* Op1 = 0, CRn = 0, CRm = 2 */
  313. ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
  314. ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
  315. ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
  316. ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
  317. ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
  318. ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
  319. ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
  320. /* Op1 = 0, CRn = 0, CRm = 3 */
  321. ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
  322. ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
  323. ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
  324. /* Op1 = 0, CRn = 0, CRm = 4 */
  325. ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
  326. ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
  327. ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
  328. /* Op1 = 0, CRn = 0, CRm = 5 */
  329. ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
  330. ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
  331. /* Op1 = 0, CRn = 0, CRm = 6 */
  332. ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
  333. ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
  334. /* Op1 = 0, CRn = 0, CRm = 7 */
  335. ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
  336. ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
  337. ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
  338. /* Op1 = 0, CRn = 1, CRm = 2 */
  339. ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
  340. /* Op1 = 3, CRn = 0, CRm = 0 */
  341. { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
  342. ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
  343. /* Op1 = 3, CRn = 14, CRm = 0 */
  344. ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
  345. };
  346. static int search_cmp_ftr_reg(const void *id, const void *regp)
  347. {
  348. return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
  349. }
  350. /*
  351. * get_arm64_ftr_reg - Lookup a feature register entry using its
  352. * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
  353. * ascending order of sys_id , we use binary search to find a matching
  354. * entry.
  355. *
  356. * returns - Upon success, matching ftr_reg entry for id.
  357. * - NULL on failure. It is upto the caller to decide
  358. * the impact of a failure.
  359. */
  360. static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
  361. {
  362. const struct __ftr_reg_entry *ret;
  363. ret = bsearch((const void *)(unsigned long)sys_id,
  364. arm64_ftr_regs,
  365. ARRAY_SIZE(arm64_ftr_regs),
  366. sizeof(arm64_ftr_regs[0]),
  367. search_cmp_ftr_reg);
  368. if (ret)
  369. return ret->reg;
  370. return NULL;
  371. }
  372. static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
  373. s64 ftr_val)
  374. {
  375. u64 mask = arm64_ftr_mask(ftrp);
  376. reg &= ~mask;
  377. reg |= (ftr_val << ftrp->shift) & mask;
  378. return reg;
  379. }
  380. static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
  381. s64 cur)
  382. {
  383. s64 ret = 0;
  384. switch (ftrp->type) {
  385. case FTR_EXACT:
  386. ret = ftrp->safe_val;
  387. break;
  388. case FTR_LOWER_SAFE:
  389. ret = new < cur ? new : cur;
  390. break;
  391. case FTR_HIGHER_SAFE:
  392. ret = new > cur ? new : cur;
  393. break;
  394. default:
  395. BUG();
  396. }
  397. return ret;
  398. }
  399. static void __init sort_ftr_regs(void)
  400. {
  401. int i;
  402. /* Check that the array is sorted so that we can do the binary search */
  403. for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
  404. BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
  405. }
  406. /*
  407. * Initialise the CPU feature register from Boot CPU values.
  408. * Also initiliases the strict_mask for the register.
  409. * Any bits that are not covered by an arm64_ftr_bits entry are considered
  410. * RES0 for the system-wide value, and must strictly match.
  411. */
  412. static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
  413. {
  414. u64 val = 0;
  415. u64 strict_mask = ~0x0ULL;
  416. u64 user_mask = 0;
  417. u64 valid_mask = 0;
  418. const struct arm64_ftr_bits *ftrp;
  419. struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
  420. BUG_ON(!reg);
  421. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  422. u64 ftr_mask = arm64_ftr_mask(ftrp);
  423. s64 ftr_new = arm64_ftr_value(ftrp, new);
  424. val = arm64_ftr_set_value(ftrp, val, ftr_new);
  425. valid_mask |= ftr_mask;
  426. if (!ftrp->strict)
  427. strict_mask &= ~ftr_mask;
  428. if (ftrp->visible)
  429. user_mask |= ftr_mask;
  430. else
  431. reg->user_val = arm64_ftr_set_value(ftrp,
  432. reg->user_val,
  433. ftrp->safe_val);
  434. }
  435. val &= valid_mask;
  436. reg->sys_val = val;
  437. reg->strict_mask = strict_mask;
  438. reg->user_mask = user_mask;
  439. }
  440. void __init init_cpu_features(struct cpuinfo_arm64 *info)
  441. {
  442. /* Before we start using the tables, make sure it is sorted */
  443. sort_ftr_regs();
  444. init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
  445. init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
  446. init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
  447. init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
  448. init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
  449. init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
  450. init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
  451. init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
  452. init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
  453. init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
  454. init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
  455. init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
  456. init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
  457. if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  458. init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
  459. init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
  460. init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
  461. init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
  462. init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
  463. init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
  464. init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
  465. init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
  466. init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
  467. init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
  468. init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
  469. init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
  470. init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
  471. init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
  472. init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
  473. init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
  474. }
  475. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  476. init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
  477. sve_init_vq_map();
  478. }
  479. }
  480. static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
  481. {
  482. const struct arm64_ftr_bits *ftrp;
  483. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  484. s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
  485. s64 ftr_new = arm64_ftr_value(ftrp, new);
  486. if (ftr_cur == ftr_new)
  487. continue;
  488. /* Find a safe value */
  489. ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
  490. reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
  491. }
  492. }
  493. static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
  494. {
  495. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
  496. BUG_ON(!regp);
  497. update_cpu_ftr_reg(regp, val);
  498. if ((boot & regp->strict_mask) == (val & regp->strict_mask))
  499. return 0;
  500. pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
  501. regp->name, boot, cpu, val);
  502. return 1;
  503. }
  504. /*
  505. * Update system wide CPU feature registers with the values from a
  506. * non-boot CPU. Also performs SANITY checks to make sure that there
  507. * aren't any insane variations from that of the boot CPU.
  508. */
  509. void update_cpu_features(int cpu,
  510. struct cpuinfo_arm64 *info,
  511. struct cpuinfo_arm64 *boot)
  512. {
  513. int taint = 0;
  514. /*
  515. * The kernel can handle differing I-cache policies, but otherwise
  516. * caches should look identical. Userspace JITs will make use of
  517. * *minLine.
  518. */
  519. taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
  520. info->reg_ctr, boot->reg_ctr);
  521. /*
  522. * Userspace may perform DC ZVA instructions. Mismatched block sizes
  523. * could result in too much or too little memory being zeroed if a
  524. * process is preempted and migrated between CPUs.
  525. */
  526. taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
  527. info->reg_dczid, boot->reg_dczid);
  528. /* If different, timekeeping will be broken (especially with KVM) */
  529. taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
  530. info->reg_cntfrq, boot->reg_cntfrq);
  531. /*
  532. * The kernel uses self-hosted debug features and expects CPUs to
  533. * support identical debug features. We presently need CTX_CMPs, WRPs,
  534. * and BRPs to be identical.
  535. * ID_AA64DFR1 is currently RES0.
  536. */
  537. taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
  538. info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
  539. taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
  540. info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
  541. /*
  542. * Even in big.LITTLE, processors should be identical instruction-set
  543. * wise.
  544. */
  545. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
  546. info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
  547. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
  548. info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
  549. /*
  550. * Differing PARange support is fine as long as all peripherals and
  551. * memory are mapped within the minimum PARange of all CPUs.
  552. * Linux should not care about secure memory.
  553. */
  554. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
  555. info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
  556. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
  557. info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
  558. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
  559. info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
  560. /*
  561. * EL3 is not our concern.
  562. * ID_AA64PFR1 is currently RES0.
  563. */
  564. taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
  565. info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
  566. taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
  567. info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
  568. taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
  569. info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
  570. /*
  571. * If we have AArch32, we care about 32-bit features for compat.
  572. * If the system doesn't support AArch32, don't update them.
  573. */
  574. if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  575. id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  576. taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
  577. info->reg_id_dfr0, boot->reg_id_dfr0);
  578. taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
  579. info->reg_id_isar0, boot->reg_id_isar0);
  580. taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
  581. info->reg_id_isar1, boot->reg_id_isar1);
  582. taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
  583. info->reg_id_isar2, boot->reg_id_isar2);
  584. taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
  585. info->reg_id_isar3, boot->reg_id_isar3);
  586. taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
  587. info->reg_id_isar4, boot->reg_id_isar4);
  588. taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
  589. info->reg_id_isar5, boot->reg_id_isar5);
  590. /*
  591. * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
  592. * ACTLR formats could differ across CPUs and therefore would have to
  593. * be trapped for virtualization anyway.
  594. */
  595. taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
  596. info->reg_id_mmfr0, boot->reg_id_mmfr0);
  597. taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
  598. info->reg_id_mmfr1, boot->reg_id_mmfr1);
  599. taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
  600. info->reg_id_mmfr2, boot->reg_id_mmfr2);
  601. taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
  602. info->reg_id_mmfr3, boot->reg_id_mmfr3);
  603. taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
  604. info->reg_id_pfr0, boot->reg_id_pfr0);
  605. taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
  606. info->reg_id_pfr1, boot->reg_id_pfr1);
  607. taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
  608. info->reg_mvfr0, boot->reg_mvfr0);
  609. taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
  610. info->reg_mvfr1, boot->reg_mvfr1);
  611. taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
  612. info->reg_mvfr2, boot->reg_mvfr2);
  613. }
  614. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  615. taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
  616. info->reg_zcr, boot->reg_zcr);
  617. /* Probe vector lengths, unless we already gave up on SVE */
  618. if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  619. !sys_caps_initialised)
  620. sve_update_vq_map();
  621. }
  622. /*
  623. * Mismatched CPU features are a recipe for disaster. Don't even
  624. * pretend to support them.
  625. */
  626. if (taint) {
  627. pr_warn_once("Unsupported CPU feature variation detected.\n");
  628. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  629. }
  630. }
  631. u64 read_sanitised_ftr_reg(u32 id)
  632. {
  633. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
  634. /* We shouldn't get a request for an unsupported register */
  635. BUG_ON(!regp);
  636. return regp->sys_val;
  637. }
  638. #define read_sysreg_case(r) \
  639. case r: return read_sysreg_s(r)
  640. /*
  641. * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
  642. * Read the system register on the current CPU
  643. */
  644. static u64 __read_sysreg_by_encoding(u32 sys_id)
  645. {
  646. switch (sys_id) {
  647. read_sysreg_case(SYS_ID_PFR0_EL1);
  648. read_sysreg_case(SYS_ID_PFR1_EL1);
  649. read_sysreg_case(SYS_ID_DFR0_EL1);
  650. read_sysreg_case(SYS_ID_MMFR0_EL1);
  651. read_sysreg_case(SYS_ID_MMFR1_EL1);
  652. read_sysreg_case(SYS_ID_MMFR2_EL1);
  653. read_sysreg_case(SYS_ID_MMFR3_EL1);
  654. read_sysreg_case(SYS_ID_ISAR0_EL1);
  655. read_sysreg_case(SYS_ID_ISAR1_EL1);
  656. read_sysreg_case(SYS_ID_ISAR2_EL1);
  657. read_sysreg_case(SYS_ID_ISAR3_EL1);
  658. read_sysreg_case(SYS_ID_ISAR4_EL1);
  659. read_sysreg_case(SYS_ID_ISAR5_EL1);
  660. read_sysreg_case(SYS_MVFR0_EL1);
  661. read_sysreg_case(SYS_MVFR1_EL1);
  662. read_sysreg_case(SYS_MVFR2_EL1);
  663. read_sysreg_case(SYS_ID_AA64PFR0_EL1);
  664. read_sysreg_case(SYS_ID_AA64PFR1_EL1);
  665. read_sysreg_case(SYS_ID_AA64DFR0_EL1);
  666. read_sysreg_case(SYS_ID_AA64DFR1_EL1);
  667. read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
  668. read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
  669. read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
  670. read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
  671. read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
  672. read_sysreg_case(SYS_CNTFRQ_EL0);
  673. read_sysreg_case(SYS_CTR_EL0);
  674. read_sysreg_case(SYS_DCZID_EL0);
  675. default:
  676. BUG();
  677. return 0;
  678. }
  679. }
  680. #include <linux/irqchip/arm-gic-v3.h>
  681. static bool
  682. feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
  683. {
  684. int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
  685. return val >= entry->min_field_value;
  686. }
  687. static bool
  688. has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
  689. {
  690. u64 val;
  691. WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
  692. if (scope == SCOPE_SYSTEM)
  693. val = read_sanitised_ftr_reg(entry->sys_reg);
  694. else
  695. val = __read_sysreg_by_encoding(entry->sys_reg);
  696. return feature_matches(val, entry);
  697. }
  698. static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
  699. {
  700. bool has_sre;
  701. if (!has_cpuid_feature(entry, scope))
  702. return false;
  703. has_sre = gic_enable_sre();
  704. if (!has_sre)
  705. pr_warn_once("%s present but disabled by higher exception level\n",
  706. entry->desc);
  707. return has_sre;
  708. }
  709. static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
  710. {
  711. u32 midr = read_cpuid_id();
  712. /* Cavium ThunderX pass 1.x and 2.x */
  713. return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
  714. MIDR_CPU_VAR_REV(0, 0),
  715. MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
  716. }
  717. static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
  718. {
  719. return is_kernel_in_hyp_mode();
  720. }
  721. static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
  722. int __unused)
  723. {
  724. phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
  725. /*
  726. * Activate the lower HYP offset only if:
  727. * - the idmap doesn't clash with it,
  728. * - the kernel is not running at EL2.
  729. */
  730. return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
  731. }
  732. static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
  733. {
  734. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  735. return cpuid_feature_extract_signed_field(pfr0,
  736. ID_AA64PFR0_FP_SHIFT) < 0;
  737. }
  738. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  739. static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
  740. static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
  741. int __unused)
  742. {
  743. char const *str = "command line option";
  744. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  745. /*
  746. * For reasons that aren't entirely clear, enabling KPTI on Cavium
  747. * ThunderX leads to apparent I-cache corruption of kernel text, which
  748. * ends as well as you might imagine. Don't even try.
  749. */
  750. if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
  751. str = "ARM64_WORKAROUND_CAVIUM_27456";
  752. __kpti_forced = -1;
  753. }
  754. /* Forced? */
  755. if (__kpti_forced) {
  756. pr_info_once("kernel page table isolation forced %s by %s\n",
  757. __kpti_forced > 0 ? "ON" : "OFF", str);
  758. return __kpti_forced > 0;
  759. }
  760. /* Useful for KASLR robustness */
  761. if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  762. return true;
  763. /* Don't force KPTI for CPUs that are not vulnerable */
  764. switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
  765. case MIDR_CAVIUM_THUNDERX2:
  766. case MIDR_BRCM_VULCAN:
  767. return false;
  768. }
  769. /* Defer to CPU feature registers */
  770. return !cpuid_feature_extract_unsigned_field(pfr0,
  771. ID_AA64PFR0_CSV3_SHIFT);
  772. }
  773. static int kpti_install_ng_mappings(void *__unused)
  774. {
  775. typedef void (kpti_remap_fn)(int, int, phys_addr_t);
  776. extern kpti_remap_fn idmap_kpti_install_ng_mappings;
  777. kpti_remap_fn *remap_fn;
  778. static bool kpti_applied = false;
  779. int cpu = smp_processor_id();
  780. if (kpti_applied)
  781. return 0;
  782. remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
  783. cpu_install_idmap();
  784. remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
  785. cpu_uninstall_idmap();
  786. if (!cpu)
  787. kpti_applied = true;
  788. return 0;
  789. }
  790. static int __init parse_kpti(char *str)
  791. {
  792. bool enabled;
  793. int ret = strtobool(str, &enabled);
  794. if (ret)
  795. return ret;
  796. __kpti_forced = enabled ? 1 : -1;
  797. return 0;
  798. }
  799. __setup("kpti=", parse_kpti);
  800. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  801. static int cpu_copy_el2regs(void *__unused)
  802. {
  803. /*
  804. * Copy register values that aren't redirected by hardware.
  805. *
  806. * Before code patching, we only set tpidr_el1, all CPUs need to copy
  807. * this value to tpidr_el2 before we patch the code. Once we've done
  808. * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
  809. * do anything here.
  810. */
  811. if (!alternatives_applied)
  812. write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
  813. return 0;
  814. }
  815. static const struct arm64_cpu_capabilities arm64_features[] = {
  816. {
  817. .desc = "GIC system register CPU interface",
  818. .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
  819. .def_scope = SCOPE_SYSTEM,
  820. .matches = has_useable_gicv3_cpuif,
  821. .sys_reg = SYS_ID_AA64PFR0_EL1,
  822. .field_pos = ID_AA64PFR0_GIC_SHIFT,
  823. .sign = FTR_UNSIGNED,
  824. .min_field_value = 1,
  825. },
  826. #ifdef CONFIG_ARM64_PAN
  827. {
  828. .desc = "Privileged Access Never",
  829. .capability = ARM64_HAS_PAN,
  830. .def_scope = SCOPE_SYSTEM,
  831. .matches = has_cpuid_feature,
  832. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  833. .field_pos = ID_AA64MMFR1_PAN_SHIFT,
  834. .sign = FTR_UNSIGNED,
  835. .min_field_value = 1,
  836. .enable = cpu_enable_pan,
  837. },
  838. #endif /* CONFIG_ARM64_PAN */
  839. #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
  840. {
  841. .desc = "LSE atomic instructions",
  842. .capability = ARM64_HAS_LSE_ATOMICS,
  843. .def_scope = SCOPE_SYSTEM,
  844. .matches = has_cpuid_feature,
  845. .sys_reg = SYS_ID_AA64ISAR0_EL1,
  846. .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
  847. .sign = FTR_UNSIGNED,
  848. .min_field_value = 2,
  849. },
  850. #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
  851. {
  852. .desc = "Software prefetching using PRFM",
  853. .capability = ARM64_HAS_NO_HW_PREFETCH,
  854. .def_scope = SCOPE_SYSTEM,
  855. .matches = has_no_hw_prefetch,
  856. },
  857. #ifdef CONFIG_ARM64_UAO
  858. {
  859. .desc = "User Access Override",
  860. .capability = ARM64_HAS_UAO,
  861. .def_scope = SCOPE_SYSTEM,
  862. .matches = has_cpuid_feature,
  863. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  864. .field_pos = ID_AA64MMFR2_UAO_SHIFT,
  865. .min_field_value = 1,
  866. /*
  867. * We rely on stop_machine() calling uao_thread_switch() to set
  868. * UAO immediately after patching.
  869. */
  870. },
  871. #endif /* CONFIG_ARM64_UAO */
  872. #ifdef CONFIG_ARM64_PAN
  873. {
  874. .capability = ARM64_ALT_PAN_NOT_UAO,
  875. .def_scope = SCOPE_SYSTEM,
  876. .matches = cpufeature_pan_not_uao,
  877. },
  878. #endif /* CONFIG_ARM64_PAN */
  879. {
  880. .desc = "Virtualization Host Extensions",
  881. .capability = ARM64_HAS_VIRT_HOST_EXTN,
  882. .def_scope = SCOPE_SYSTEM,
  883. .matches = runs_at_el2,
  884. .enable = cpu_copy_el2regs,
  885. },
  886. {
  887. .desc = "32-bit EL0 Support",
  888. .capability = ARM64_HAS_32BIT_EL0,
  889. .def_scope = SCOPE_SYSTEM,
  890. .matches = has_cpuid_feature,
  891. .sys_reg = SYS_ID_AA64PFR0_EL1,
  892. .sign = FTR_UNSIGNED,
  893. .field_pos = ID_AA64PFR0_EL0_SHIFT,
  894. .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
  895. },
  896. {
  897. .desc = "Reduced HYP mapping offset",
  898. .capability = ARM64_HYP_OFFSET_LOW,
  899. .def_scope = SCOPE_SYSTEM,
  900. .matches = hyp_offset_low,
  901. },
  902. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  903. {
  904. .desc = "Kernel page table isolation (KPTI)",
  905. .capability = ARM64_UNMAP_KERNEL_AT_EL0,
  906. .def_scope = SCOPE_SYSTEM,
  907. .matches = unmap_kernel_at_el0,
  908. .enable = kpti_install_ng_mappings,
  909. },
  910. #endif
  911. {
  912. /* FP/SIMD is not implemented */
  913. .capability = ARM64_HAS_NO_FPSIMD,
  914. .def_scope = SCOPE_SYSTEM,
  915. .min_field_value = 0,
  916. .matches = has_no_fpsimd,
  917. },
  918. #ifdef CONFIG_ARM64_PMEM
  919. {
  920. .desc = "Data cache clean to Point of Persistence",
  921. .capability = ARM64_HAS_DCPOP,
  922. .def_scope = SCOPE_SYSTEM,
  923. .matches = has_cpuid_feature,
  924. .sys_reg = SYS_ID_AA64ISAR1_EL1,
  925. .field_pos = ID_AA64ISAR1_DPB_SHIFT,
  926. .min_field_value = 1,
  927. },
  928. #endif
  929. #ifdef CONFIG_ARM64_SVE
  930. {
  931. .desc = "Scalable Vector Extension",
  932. .capability = ARM64_SVE,
  933. .def_scope = SCOPE_SYSTEM,
  934. .sys_reg = SYS_ID_AA64PFR0_EL1,
  935. .sign = FTR_UNSIGNED,
  936. .field_pos = ID_AA64PFR0_SVE_SHIFT,
  937. .min_field_value = ID_AA64PFR0_SVE,
  938. .matches = has_cpuid_feature,
  939. .enable = sve_kernel_enable,
  940. },
  941. #endif /* CONFIG_ARM64_SVE */
  942. #ifdef CONFIG_ARM64_RAS_EXTN
  943. {
  944. .desc = "RAS Extension Support",
  945. .capability = ARM64_HAS_RAS_EXTN,
  946. .def_scope = SCOPE_SYSTEM,
  947. .matches = has_cpuid_feature,
  948. .sys_reg = SYS_ID_AA64PFR0_EL1,
  949. .sign = FTR_UNSIGNED,
  950. .field_pos = ID_AA64PFR0_RAS_SHIFT,
  951. .min_field_value = ID_AA64PFR0_RAS_V1,
  952. .enable = cpu_clear_disr,
  953. },
  954. #endif /* CONFIG_ARM64_RAS_EXTN */
  955. {},
  956. };
  957. #define HWCAP_CAP(reg, field, s, min_value, type, cap) \
  958. { \
  959. .desc = #cap, \
  960. .def_scope = SCOPE_SYSTEM, \
  961. .matches = has_cpuid_feature, \
  962. .sys_reg = reg, \
  963. .field_pos = field, \
  964. .sign = s, \
  965. .min_field_value = min_value, \
  966. .hwcap_type = type, \
  967. .hwcap = cap, \
  968. }
  969. static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
  970. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
  971. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
  972. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
  973. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
  974. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
  975. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
  976. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
  977. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
  978. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
  979. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
  980. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
  981. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
  982. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
  983. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
  984. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
  985. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
  986. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
  987. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
  988. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
  989. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
  990. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
  991. #ifdef CONFIG_ARM64_SVE
  992. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
  993. #endif
  994. {},
  995. };
  996. static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
  997. #ifdef CONFIG_COMPAT
  998. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
  999. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
  1000. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
  1001. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
  1002. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
  1003. #endif
  1004. {},
  1005. };
  1006. static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1007. {
  1008. switch (cap->hwcap_type) {
  1009. case CAP_HWCAP:
  1010. elf_hwcap |= cap->hwcap;
  1011. break;
  1012. #ifdef CONFIG_COMPAT
  1013. case CAP_COMPAT_HWCAP:
  1014. compat_elf_hwcap |= (u32)cap->hwcap;
  1015. break;
  1016. case CAP_COMPAT_HWCAP2:
  1017. compat_elf_hwcap2 |= (u32)cap->hwcap;
  1018. break;
  1019. #endif
  1020. default:
  1021. WARN_ON(1);
  1022. break;
  1023. }
  1024. }
  1025. /* Check if we have a particular HWCAP enabled */
  1026. static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1027. {
  1028. bool rc;
  1029. switch (cap->hwcap_type) {
  1030. case CAP_HWCAP:
  1031. rc = (elf_hwcap & cap->hwcap) != 0;
  1032. break;
  1033. #ifdef CONFIG_COMPAT
  1034. case CAP_COMPAT_HWCAP:
  1035. rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
  1036. break;
  1037. case CAP_COMPAT_HWCAP2:
  1038. rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
  1039. break;
  1040. #endif
  1041. default:
  1042. WARN_ON(1);
  1043. rc = false;
  1044. }
  1045. return rc;
  1046. }
  1047. static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
  1048. {
  1049. /* We support emulation of accesses to CPU ID feature registers */
  1050. elf_hwcap |= HWCAP_CPUID;
  1051. for (; hwcaps->matches; hwcaps++)
  1052. if (hwcaps->matches(hwcaps, hwcaps->def_scope))
  1053. cap_set_elf_hwcap(hwcaps);
  1054. }
  1055. /*
  1056. * Check if the current CPU has a given feature capability.
  1057. * Should be called from non-preemptible context.
  1058. */
  1059. static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
  1060. unsigned int cap)
  1061. {
  1062. const struct arm64_cpu_capabilities *caps;
  1063. if (WARN_ON(preemptible()))
  1064. return false;
  1065. for (caps = cap_array; caps->matches; caps++)
  1066. if (caps->capability == cap &&
  1067. caps->matches(caps, SCOPE_LOCAL_CPU))
  1068. return true;
  1069. return false;
  1070. }
  1071. void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  1072. const char *info)
  1073. {
  1074. for (; caps->matches; caps++) {
  1075. if (!caps->matches(caps, caps->def_scope))
  1076. continue;
  1077. if (!cpus_have_cap(caps->capability) && caps->desc)
  1078. pr_info("%s %s\n", info, caps->desc);
  1079. cpus_set_cap(caps->capability);
  1080. }
  1081. }
  1082. /*
  1083. * Run through the enabled capabilities and enable() it on all active
  1084. * CPUs
  1085. */
  1086. void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
  1087. {
  1088. for (; caps->matches; caps++) {
  1089. unsigned int num = caps->capability;
  1090. if (!cpus_have_cap(num))
  1091. continue;
  1092. /* Ensure cpus_have_const_cap(num) works */
  1093. static_branch_enable(&cpu_hwcap_keys[num]);
  1094. if (caps->enable) {
  1095. /*
  1096. * Use stop_machine() as it schedules the work allowing
  1097. * us to modify PSTATE, instead of on_each_cpu() which
  1098. * uses an IPI, giving us a PSTATE that disappears when
  1099. * we return.
  1100. */
  1101. stop_machine(caps->enable, (void *)caps, cpu_online_mask);
  1102. }
  1103. }
  1104. }
  1105. /*
  1106. * Check for CPU features that are used in early boot
  1107. * based on the Boot CPU value.
  1108. */
  1109. static void check_early_cpu_features(void)
  1110. {
  1111. verify_cpu_run_el();
  1112. verify_cpu_asid_bits();
  1113. }
  1114. static void
  1115. verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
  1116. {
  1117. for (; caps->matches; caps++)
  1118. if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
  1119. pr_crit("CPU%d: missing HWCAP: %s\n",
  1120. smp_processor_id(), caps->desc);
  1121. cpu_die_early();
  1122. }
  1123. }
  1124. static void
  1125. verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
  1126. {
  1127. const struct arm64_cpu_capabilities *caps = caps_list;
  1128. for (; caps->matches; caps++) {
  1129. if (!cpus_have_cap(caps->capability))
  1130. continue;
  1131. /*
  1132. * If the new CPU misses an advertised feature, we cannot proceed
  1133. * further, park the cpu.
  1134. */
  1135. if (!__this_cpu_has_cap(caps_list, caps->capability)) {
  1136. pr_crit("CPU%d: missing feature: %s\n",
  1137. smp_processor_id(), caps->desc);
  1138. cpu_die_early();
  1139. }
  1140. if (caps->enable)
  1141. caps->enable((void *)caps);
  1142. }
  1143. }
  1144. static void verify_sve_features(void)
  1145. {
  1146. u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
  1147. u64 zcr = read_zcr_features();
  1148. unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
  1149. unsigned int len = zcr & ZCR_ELx_LEN_MASK;
  1150. if (len < safe_len || sve_verify_vq_map()) {
  1151. pr_crit("CPU%d: SVE: required vector length(s) missing\n",
  1152. smp_processor_id());
  1153. cpu_die_early();
  1154. }
  1155. /* Add checks on other ZCR bits here if necessary */
  1156. }
  1157. /*
  1158. * Run through the enabled system capabilities and enable() it on this CPU.
  1159. * The capabilities were decided based on the available CPUs at the boot time.
  1160. * Any new CPU should match the system wide status of the capability. If the
  1161. * new CPU doesn't have a capability which the system now has enabled, we
  1162. * cannot do anything to fix it up and could cause unexpected failures. So
  1163. * we park the CPU.
  1164. */
  1165. static void verify_local_cpu_capabilities(void)
  1166. {
  1167. verify_local_cpu_errata_workarounds();
  1168. verify_local_cpu_features(arm64_features);
  1169. verify_local_elf_hwcaps(arm64_elf_hwcaps);
  1170. if (system_supports_32bit_el0())
  1171. verify_local_elf_hwcaps(compat_elf_hwcaps);
  1172. if (system_supports_sve())
  1173. verify_sve_features();
  1174. }
  1175. void check_local_cpu_capabilities(void)
  1176. {
  1177. /*
  1178. * All secondary CPUs should conform to the early CPU features
  1179. * in use by the kernel based on boot CPU.
  1180. */
  1181. check_early_cpu_features();
  1182. /*
  1183. * If we haven't finalised the system capabilities, this CPU gets
  1184. * a chance to update the errata work arounds.
  1185. * Otherwise, this CPU should verify that it has all the system
  1186. * advertised capabilities.
  1187. */
  1188. if (!sys_caps_initialised)
  1189. update_cpu_errata_workarounds();
  1190. else
  1191. verify_local_cpu_capabilities();
  1192. }
  1193. static void __init setup_feature_capabilities(void)
  1194. {
  1195. update_cpu_capabilities(arm64_features, "detected:");
  1196. enable_cpu_capabilities(arm64_features);
  1197. }
  1198. DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
  1199. EXPORT_SYMBOL(arm64_const_caps_ready);
  1200. static void __init mark_const_caps_ready(void)
  1201. {
  1202. static_branch_enable(&arm64_const_caps_ready);
  1203. }
  1204. extern const struct arm64_cpu_capabilities arm64_errata[];
  1205. bool this_cpu_has_cap(unsigned int cap)
  1206. {
  1207. return (__this_cpu_has_cap(arm64_features, cap) ||
  1208. __this_cpu_has_cap(arm64_errata, cap));
  1209. }
  1210. void __init setup_cpu_features(void)
  1211. {
  1212. u32 cwg;
  1213. int cls;
  1214. /* Set the CPU feature capabilies */
  1215. setup_feature_capabilities();
  1216. enable_errata_workarounds();
  1217. mark_const_caps_ready();
  1218. setup_elf_hwcaps(arm64_elf_hwcaps);
  1219. if (system_supports_32bit_el0())
  1220. setup_elf_hwcaps(compat_elf_hwcaps);
  1221. if (system_uses_ttbr0_pan())
  1222. pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
  1223. sve_setup();
  1224. /* Advertise that we have computed the system capabilities */
  1225. set_sys_caps_initialised();
  1226. /*
  1227. * Check for sane CTR_EL0.CWG value.
  1228. */
  1229. cwg = cache_type_cwg();
  1230. cls = cache_line_size();
  1231. if (!cwg)
  1232. pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
  1233. cls);
  1234. if (L1_CACHE_BYTES < cls)
  1235. pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
  1236. L1_CACHE_BYTES, cls);
  1237. }
  1238. static bool __maybe_unused
  1239. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
  1240. {
  1241. return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
  1242. }
  1243. /*
  1244. * We emulate only the following system register space.
  1245. * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
  1246. * See Table C5-6 System instruction encodings for System register accesses,
  1247. * ARMv8 ARM(ARM DDI 0487A.f) for more details.
  1248. */
  1249. static inline bool __attribute_const__ is_emulated(u32 id)
  1250. {
  1251. return (sys_reg_Op0(id) == 0x3 &&
  1252. sys_reg_CRn(id) == 0x0 &&
  1253. sys_reg_Op1(id) == 0x0 &&
  1254. (sys_reg_CRm(id) == 0 ||
  1255. ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
  1256. }
  1257. /*
  1258. * With CRm == 0, reg should be one of :
  1259. * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
  1260. */
  1261. static inline int emulate_id_reg(u32 id, u64 *valp)
  1262. {
  1263. switch (id) {
  1264. case SYS_MIDR_EL1:
  1265. *valp = read_cpuid_id();
  1266. break;
  1267. case SYS_MPIDR_EL1:
  1268. *valp = SYS_MPIDR_SAFE_VAL;
  1269. break;
  1270. case SYS_REVIDR_EL1:
  1271. /* IMPLEMENTATION DEFINED values are emulated with 0 */
  1272. *valp = 0;
  1273. break;
  1274. default:
  1275. return -EINVAL;
  1276. }
  1277. return 0;
  1278. }
  1279. static int emulate_sys_reg(u32 id, u64 *valp)
  1280. {
  1281. struct arm64_ftr_reg *regp;
  1282. if (!is_emulated(id))
  1283. return -EINVAL;
  1284. if (sys_reg_CRm(id) == 0)
  1285. return emulate_id_reg(id, valp);
  1286. regp = get_arm64_ftr_reg(id);
  1287. if (regp)
  1288. *valp = arm64_ftr_reg_user_value(regp);
  1289. else
  1290. /*
  1291. * The untracked registers are either IMPLEMENTATION DEFINED
  1292. * (e.g, ID_AFR0_EL1) or reserved RAZ.
  1293. */
  1294. *valp = 0;
  1295. return 0;
  1296. }
  1297. static int emulate_mrs(struct pt_regs *regs, u32 insn)
  1298. {
  1299. int rc;
  1300. u32 sys_reg, dst;
  1301. u64 val;
  1302. /*
  1303. * sys_reg values are defined as used in mrs/msr instruction.
  1304. * shift the imm value to get the encoding.
  1305. */
  1306. sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
  1307. rc = emulate_sys_reg(sys_reg, &val);
  1308. if (!rc) {
  1309. dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
  1310. pt_regs_write_reg(regs, dst, val);
  1311. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  1312. }
  1313. return rc;
  1314. }
  1315. static struct undef_hook mrs_hook = {
  1316. .instr_mask = 0xfff00000,
  1317. .instr_val = 0xd5300000,
  1318. .pstate_mask = COMPAT_PSR_MODE_MASK,
  1319. .pstate_val = PSR_MODE_EL0t,
  1320. .fn = emulate_mrs,
  1321. };
  1322. static int __init enable_mrs_emulation(void)
  1323. {
  1324. register_undef_hook(&mrs_hook);
  1325. return 0;
  1326. }
  1327. core_initcall(enable_mrs_emulation);
  1328. int cpu_clear_disr(void *__unused)
  1329. {
  1330. /* Firmware may have left a deferred SError in this register. */
  1331. write_sysreg_s(0, SYS_DISR_EL1);
  1332. return 0;
  1333. }