cpufeature.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914
  1. /*
  2. * Contains CPU feature definitions
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "CPU features: " fmt
  19. #include <linux/bsearch.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/crash_dump.h>
  22. #include <linux/sort.h>
  23. #include <linux/stop_machine.h>
  24. #include <linux/types.h>
  25. #include <linux/mm.h>
  26. #include <asm/cpu.h>
  27. #include <asm/cpufeature.h>
  28. #include <asm/cpu_ops.h>
  29. #include <asm/fpsimd.h>
  30. #include <asm/mmu_context.h>
  31. #include <asm/processor.h>
  32. #include <asm/sysreg.h>
  33. #include <asm/traps.h>
  34. #include <asm/virt.h>
  35. unsigned long elf_hwcap __read_mostly;
  36. EXPORT_SYMBOL_GPL(elf_hwcap);
  37. #ifdef CONFIG_COMPAT
  38. #define COMPAT_ELF_HWCAP_DEFAULT \
  39. (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
  40. COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
  41. COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
  42. COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
  43. COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
  44. COMPAT_HWCAP_LPAE)
  45. unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
  46. unsigned int compat_elf_hwcap2 __read_mostly;
  47. #endif
  48. DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  49. EXPORT_SYMBOL(cpu_hwcaps);
  50. /*
  51. * Flag to indicate if we have computed the system wide
  52. * capabilities based on the boot time active CPUs. This
  53. * will be used to determine if a new booting CPU should
  54. * go through the verification process to make sure that it
  55. * supports the system capabilities, without using a hotplug
  56. * notifier.
  57. */
  58. static bool sys_caps_initialised;
  59. static inline void set_sys_caps_initialised(void)
  60. {
  61. sys_caps_initialised = true;
  62. }
  63. static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
  64. {
  65. /* file-wide pr_fmt adds "CPU features: " prefix */
  66. pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
  67. return 0;
  68. }
  69. static struct notifier_block cpu_hwcaps_notifier = {
  70. .notifier_call = dump_cpu_hwcaps
  71. };
  72. static int __init register_cpu_hwcaps_dumper(void)
  73. {
  74. atomic_notifier_chain_register(&panic_notifier_list,
  75. &cpu_hwcaps_notifier);
  76. return 0;
  77. }
  78. __initcall(register_cpu_hwcaps_dumper);
  79. DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
  80. EXPORT_SYMBOL(cpu_hwcap_keys);
  81. #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  82. { \
  83. .sign = SIGNED, \
  84. .visible = VISIBLE, \
  85. .strict = STRICT, \
  86. .type = TYPE, \
  87. .shift = SHIFT, \
  88. .width = WIDTH, \
  89. .safe_val = SAFE_VAL, \
  90. }
  91. /* Define a feature with unsigned values */
  92. #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  93. __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  94. /* Define a feature with a signed value */
  95. #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  96. __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  97. #define ARM64_FTR_END \
  98. { \
  99. .width = 0, \
  100. }
  101. /* meta feature for alternatives */
  102. static bool __maybe_unused
  103. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
  104. static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
  105. /*
  106. * NOTE: Any changes to the visibility of features should be kept in
  107. * sync with the documentation of the CPU feature register ABI.
  108. */
  109. static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
  110. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
  111. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
  112. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
  113. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
  114. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
  115. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
  116. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
  117. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
  118. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
  119. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
  120. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
  121. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
  122. ARM64_FTR_END,
  123. };
  124. static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
  125. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
  126. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
  127. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
  128. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
  129. ARM64_FTR_END,
  130. };
  131. static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
  132. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
  133. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
  134. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
  135. ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
  136. FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
  137. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
  138. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
  139. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
  140. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
  141. /* Linux doesn't care about the EL3 */
  142. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
  143. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
  144. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
  145. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
  146. ARM64_FTR_END,
  147. };
  148. static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
  149. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
  150. ARM64_FTR_END,
  151. };
  152. static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
  153. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
  154. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
  155. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
  156. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
  157. /* Linux shouldn't care about secure memory */
  158. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
  159. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
  160. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
  161. /*
  162. * Differing PARange is fine as long as all peripherals and memory are mapped
  163. * within the minimum PARange of all CPUs
  164. */
  165. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
  166. ARM64_FTR_END,
  167. };
  168. static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
  169. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
  170. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
  171. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
  172. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
  173. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
  174. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
  175. ARM64_FTR_END,
  176. };
  177. static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
  178. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
  179. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
  180. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
  181. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
  182. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
  183. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
  184. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
  185. ARM64_FTR_END,
  186. };
  187. static const struct arm64_ftr_bits ftr_ctr[] = {
  188. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
  189. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
  190. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
  191. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
  192. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
  193. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
  194. /*
  195. * Linux can handle differing I-cache policies. Userspace JITs will
  196. * make use of *minLine.
  197. * If we have differing I-cache policies, report it as the weakest - VIPT.
  198. */
  199. ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
  200. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
  201. ARM64_FTR_END,
  202. };
  203. struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
  204. .name = "SYS_CTR_EL0",
  205. .ftr_bits = ftr_ctr
  206. };
  207. static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
  208. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
  209. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
  210. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
  211. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
  212. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
  213. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
  214. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
  215. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
  216. ARM64_FTR_END,
  217. };
  218. static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
  219. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
  220. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
  221. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
  222. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
  223. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
  224. /*
  225. * We can instantiate multiple PMU instances with different levels
  226. * of support.
  227. */
  228. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
  229. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
  230. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
  231. ARM64_FTR_END,
  232. };
  233. static const struct arm64_ftr_bits ftr_mvfr2[] = {
  234. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
  235. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
  236. ARM64_FTR_END,
  237. };
  238. static const struct arm64_ftr_bits ftr_dczid[] = {
  239. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
  240. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
  241. ARM64_FTR_END,
  242. };
  243. static const struct arm64_ftr_bits ftr_id_isar5[] = {
  244. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
  245. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
  246. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
  247. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
  248. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
  249. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
  250. ARM64_FTR_END,
  251. };
  252. static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
  253. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
  254. ARM64_FTR_END,
  255. };
  256. static const struct arm64_ftr_bits ftr_id_pfr0[] = {
  257. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
  258. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
  259. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
  260. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
  261. ARM64_FTR_END,
  262. };
  263. static const struct arm64_ftr_bits ftr_id_dfr0[] = {
  264. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  265. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
  266. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  267. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  268. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  269. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  270. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  271. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  272. ARM64_FTR_END,
  273. };
  274. static const struct arm64_ftr_bits ftr_zcr[] = {
  275. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
  276. ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
  277. ARM64_FTR_END,
  278. };
  279. /*
  280. * Common ftr bits for a 32bit register with all hidden, strict
  281. * attributes, with 4bit feature fields and a default safe value of
  282. * 0. Covers the following 32bit registers:
  283. * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
  284. */
  285. static const struct arm64_ftr_bits ftr_generic_32bits[] = {
  286. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  287. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
  288. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  289. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  290. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  291. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  292. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  293. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  294. ARM64_FTR_END,
  295. };
  296. /* Table for a single 32bit feature value */
  297. static const struct arm64_ftr_bits ftr_single32[] = {
  298. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
  299. ARM64_FTR_END,
  300. };
  301. static const struct arm64_ftr_bits ftr_raz[] = {
  302. ARM64_FTR_END,
  303. };
  304. #define ARM64_FTR_REG(id, table) { \
  305. .sys_id = id, \
  306. .reg = &(struct arm64_ftr_reg){ \
  307. .name = #id, \
  308. .ftr_bits = &((table)[0]), \
  309. }}
  310. static const struct __ftr_reg_entry {
  311. u32 sys_id;
  312. struct arm64_ftr_reg *reg;
  313. } arm64_ftr_regs[] = {
  314. /* Op1 = 0, CRn = 0, CRm = 1 */
  315. ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
  316. ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
  317. ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
  318. ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
  319. ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
  320. ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
  321. ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
  322. /* Op1 = 0, CRn = 0, CRm = 2 */
  323. ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
  324. ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
  325. ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
  326. ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
  327. ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
  328. ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
  329. ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
  330. /* Op1 = 0, CRn = 0, CRm = 3 */
  331. ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
  332. ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
  333. ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
  334. /* Op1 = 0, CRn = 0, CRm = 4 */
  335. ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
  336. ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
  337. ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
  338. /* Op1 = 0, CRn = 0, CRm = 5 */
  339. ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
  340. ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
  341. /* Op1 = 0, CRn = 0, CRm = 6 */
  342. ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
  343. ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
  344. /* Op1 = 0, CRn = 0, CRm = 7 */
  345. ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
  346. ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
  347. ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
  348. /* Op1 = 0, CRn = 1, CRm = 2 */
  349. ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
  350. /* Op1 = 3, CRn = 0, CRm = 0 */
  351. { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
  352. ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
  353. /* Op1 = 3, CRn = 14, CRm = 0 */
  354. ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
  355. };
  356. static int search_cmp_ftr_reg(const void *id, const void *regp)
  357. {
  358. return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
  359. }
  360. /*
  361. * get_arm64_ftr_reg - Lookup a feature register entry using its
  362. * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
  363. * ascending order of sys_id , we use binary search to find a matching
  364. * entry.
  365. *
  366. * returns - Upon success, matching ftr_reg entry for id.
  367. * - NULL on failure. It is upto the caller to decide
  368. * the impact of a failure.
  369. */
  370. static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
  371. {
  372. const struct __ftr_reg_entry *ret;
  373. ret = bsearch((const void *)(unsigned long)sys_id,
  374. arm64_ftr_regs,
  375. ARRAY_SIZE(arm64_ftr_regs),
  376. sizeof(arm64_ftr_regs[0]),
  377. search_cmp_ftr_reg);
  378. if (ret)
  379. return ret->reg;
  380. return NULL;
  381. }
  382. static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
  383. s64 ftr_val)
  384. {
  385. u64 mask = arm64_ftr_mask(ftrp);
  386. reg &= ~mask;
  387. reg |= (ftr_val << ftrp->shift) & mask;
  388. return reg;
  389. }
  390. static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
  391. s64 cur)
  392. {
  393. s64 ret = 0;
  394. switch (ftrp->type) {
  395. case FTR_EXACT:
  396. ret = ftrp->safe_val;
  397. break;
  398. case FTR_LOWER_SAFE:
  399. ret = new < cur ? new : cur;
  400. break;
  401. case FTR_HIGHER_SAFE:
  402. ret = new > cur ? new : cur;
  403. break;
  404. default:
  405. BUG();
  406. }
  407. return ret;
  408. }
  409. static void __init sort_ftr_regs(void)
  410. {
  411. int i;
  412. /* Check that the array is sorted so that we can do the binary search */
  413. for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
  414. BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
  415. }
  416. /*
  417. * Initialise the CPU feature register from Boot CPU values.
  418. * Also initiliases the strict_mask for the register.
  419. * Any bits that are not covered by an arm64_ftr_bits entry are considered
  420. * RES0 for the system-wide value, and must strictly match.
  421. */
  422. static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
  423. {
  424. u64 val = 0;
  425. u64 strict_mask = ~0x0ULL;
  426. u64 user_mask = 0;
  427. u64 valid_mask = 0;
  428. const struct arm64_ftr_bits *ftrp;
  429. struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
  430. BUG_ON(!reg);
  431. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  432. u64 ftr_mask = arm64_ftr_mask(ftrp);
  433. s64 ftr_new = arm64_ftr_value(ftrp, new);
  434. val = arm64_ftr_set_value(ftrp, val, ftr_new);
  435. valid_mask |= ftr_mask;
  436. if (!ftrp->strict)
  437. strict_mask &= ~ftr_mask;
  438. if (ftrp->visible)
  439. user_mask |= ftr_mask;
  440. else
  441. reg->user_val = arm64_ftr_set_value(ftrp,
  442. reg->user_val,
  443. ftrp->safe_val);
  444. }
  445. val &= valid_mask;
  446. reg->sys_val = val;
  447. reg->strict_mask = strict_mask;
  448. reg->user_mask = user_mask;
  449. }
  450. extern const struct arm64_cpu_capabilities arm64_errata[];
  451. static void __init setup_boot_cpu_capabilities(void);
  452. void __init init_cpu_features(struct cpuinfo_arm64 *info)
  453. {
  454. /* Before we start using the tables, make sure it is sorted */
  455. sort_ftr_regs();
  456. init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
  457. init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
  458. init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
  459. init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
  460. init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
  461. init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
  462. init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
  463. init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
  464. init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
  465. init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
  466. init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
  467. init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
  468. init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
  469. if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  470. init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
  471. init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
  472. init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
  473. init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
  474. init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
  475. init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
  476. init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
  477. init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
  478. init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
  479. init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
  480. init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
  481. init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
  482. init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
  483. init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
  484. init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
  485. init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
  486. }
  487. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  488. init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
  489. sve_init_vq_map();
  490. }
  491. /*
  492. * Detect and enable early CPU capabilities based on the boot CPU,
  493. * after we have initialised the CPU feature infrastructure.
  494. */
  495. setup_boot_cpu_capabilities();
  496. }
  497. static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
  498. {
  499. const struct arm64_ftr_bits *ftrp;
  500. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  501. s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
  502. s64 ftr_new = arm64_ftr_value(ftrp, new);
  503. if (ftr_cur == ftr_new)
  504. continue;
  505. /* Find a safe value */
  506. ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
  507. reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
  508. }
  509. }
  510. static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
  511. {
  512. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
  513. BUG_ON(!regp);
  514. update_cpu_ftr_reg(regp, val);
  515. if ((boot & regp->strict_mask) == (val & regp->strict_mask))
  516. return 0;
  517. pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
  518. regp->name, boot, cpu, val);
  519. return 1;
  520. }
  521. /*
  522. * Update system wide CPU feature registers with the values from a
  523. * non-boot CPU. Also performs SANITY checks to make sure that there
  524. * aren't any insane variations from that of the boot CPU.
  525. */
  526. void update_cpu_features(int cpu,
  527. struct cpuinfo_arm64 *info,
  528. struct cpuinfo_arm64 *boot)
  529. {
  530. int taint = 0;
  531. /*
  532. * The kernel can handle differing I-cache policies, but otherwise
  533. * caches should look identical. Userspace JITs will make use of
  534. * *minLine.
  535. */
  536. taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
  537. info->reg_ctr, boot->reg_ctr);
  538. /*
  539. * Userspace may perform DC ZVA instructions. Mismatched block sizes
  540. * could result in too much or too little memory being zeroed if a
  541. * process is preempted and migrated between CPUs.
  542. */
  543. taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
  544. info->reg_dczid, boot->reg_dczid);
  545. /* If different, timekeeping will be broken (especially with KVM) */
  546. taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
  547. info->reg_cntfrq, boot->reg_cntfrq);
  548. /*
  549. * The kernel uses self-hosted debug features and expects CPUs to
  550. * support identical debug features. We presently need CTX_CMPs, WRPs,
  551. * and BRPs to be identical.
  552. * ID_AA64DFR1 is currently RES0.
  553. */
  554. taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
  555. info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
  556. taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
  557. info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
  558. /*
  559. * Even in big.LITTLE, processors should be identical instruction-set
  560. * wise.
  561. */
  562. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
  563. info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
  564. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
  565. info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
  566. /*
  567. * Differing PARange support is fine as long as all peripherals and
  568. * memory are mapped within the minimum PARange of all CPUs.
  569. * Linux should not care about secure memory.
  570. */
  571. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
  572. info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
  573. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
  574. info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
  575. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
  576. info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
  577. /*
  578. * EL3 is not our concern.
  579. */
  580. taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
  581. info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
  582. taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
  583. info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
  584. taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
  585. info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
  586. /*
  587. * If we have AArch32, we care about 32-bit features for compat.
  588. * If the system doesn't support AArch32, don't update them.
  589. */
  590. if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  591. id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  592. taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
  593. info->reg_id_dfr0, boot->reg_id_dfr0);
  594. taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
  595. info->reg_id_isar0, boot->reg_id_isar0);
  596. taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
  597. info->reg_id_isar1, boot->reg_id_isar1);
  598. taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
  599. info->reg_id_isar2, boot->reg_id_isar2);
  600. taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
  601. info->reg_id_isar3, boot->reg_id_isar3);
  602. taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
  603. info->reg_id_isar4, boot->reg_id_isar4);
  604. taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
  605. info->reg_id_isar5, boot->reg_id_isar5);
  606. /*
  607. * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
  608. * ACTLR formats could differ across CPUs and therefore would have to
  609. * be trapped for virtualization anyway.
  610. */
  611. taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
  612. info->reg_id_mmfr0, boot->reg_id_mmfr0);
  613. taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
  614. info->reg_id_mmfr1, boot->reg_id_mmfr1);
  615. taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
  616. info->reg_id_mmfr2, boot->reg_id_mmfr2);
  617. taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
  618. info->reg_id_mmfr3, boot->reg_id_mmfr3);
  619. taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
  620. info->reg_id_pfr0, boot->reg_id_pfr0);
  621. taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
  622. info->reg_id_pfr1, boot->reg_id_pfr1);
  623. taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
  624. info->reg_mvfr0, boot->reg_mvfr0);
  625. taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
  626. info->reg_mvfr1, boot->reg_mvfr1);
  627. taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
  628. info->reg_mvfr2, boot->reg_mvfr2);
  629. }
  630. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  631. taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
  632. info->reg_zcr, boot->reg_zcr);
  633. /* Probe vector lengths, unless we already gave up on SVE */
  634. if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  635. !sys_caps_initialised)
  636. sve_update_vq_map();
  637. }
  638. /*
  639. * Mismatched CPU features are a recipe for disaster. Don't even
  640. * pretend to support them.
  641. */
  642. if (taint) {
  643. pr_warn_once("Unsupported CPU feature variation detected.\n");
  644. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  645. }
  646. }
  647. u64 read_sanitised_ftr_reg(u32 id)
  648. {
  649. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
  650. /* We shouldn't get a request for an unsupported register */
  651. BUG_ON(!regp);
  652. return regp->sys_val;
  653. }
  654. #define read_sysreg_case(r) \
  655. case r: return read_sysreg_s(r)
  656. /*
  657. * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
  658. * Read the system register on the current CPU
  659. */
  660. static u64 __read_sysreg_by_encoding(u32 sys_id)
  661. {
  662. switch (sys_id) {
  663. read_sysreg_case(SYS_ID_PFR0_EL1);
  664. read_sysreg_case(SYS_ID_PFR1_EL1);
  665. read_sysreg_case(SYS_ID_DFR0_EL1);
  666. read_sysreg_case(SYS_ID_MMFR0_EL1);
  667. read_sysreg_case(SYS_ID_MMFR1_EL1);
  668. read_sysreg_case(SYS_ID_MMFR2_EL1);
  669. read_sysreg_case(SYS_ID_MMFR3_EL1);
  670. read_sysreg_case(SYS_ID_ISAR0_EL1);
  671. read_sysreg_case(SYS_ID_ISAR1_EL1);
  672. read_sysreg_case(SYS_ID_ISAR2_EL1);
  673. read_sysreg_case(SYS_ID_ISAR3_EL1);
  674. read_sysreg_case(SYS_ID_ISAR4_EL1);
  675. read_sysreg_case(SYS_ID_ISAR5_EL1);
  676. read_sysreg_case(SYS_MVFR0_EL1);
  677. read_sysreg_case(SYS_MVFR1_EL1);
  678. read_sysreg_case(SYS_MVFR2_EL1);
  679. read_sysreg_case(SYS_ID_AA64PFR0_EL1);
  680. read_sysreg_case(SYS_ID_AA64PFR1_EL1);
  681. read_sysreg_case(SYS_ID_AA64DFR0_EL1);
  682. read_sysreg_case(SYS_ID_AA64DFR1_EL1);
  683. read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
  684. read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
  685. read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
  686. read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
  687. read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
  688. read_sysreg_case(SYS_CNTFRQ_EL0);
  689. read_sysreg_case(SYS_CTR_EL0);
  690. read_sysreg_case(SYS_DCZID_EL0);
  691. default:
  692. BUG();
  693. return 0;
  694. }
  695. }
  696. #include <linux/irqchip/arm-gic-v3.h>
  697. static bool
  698. feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
  699. {
  700. int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
  701. return val >= entry->min_field_value;
  702. }
  703. static bool
  704. has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
  705. {
  706. u64 val;
  707. WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
  708. if (scope == SCOPE_SYSTEM)
  709. val = read_sanitised_ftr_reg(entry->sys_reg);
  710. else
  711. val = __read_sysreg_by_encoding(entry->sys_reg);
  712. return feature_matches(val, entry);
  713. }
  714. static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
  715. {
  716. bool has_sre;
  717. if (!has_cpuid_feature(entry, scope))
  718. return false;
  719. has_sre = gic_enable_sre();
  720. if (!has_sre)
  721. pr_warn_once("%s present but disabled by higher exception level\n",
  722. entry->desc);
  723. return has_sre;
  724. }
  725. static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
  726. {
  727. u32 midr = read_cpuid_id();
  728. /* Cavium ThunderX pass 1.x and 2.x */
  729. return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
  730. MIDR_CPU_VAR_REV(0, 0),
  731. MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
  732. }
  733. static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
  734. {
  735. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  736. return cpuid_feature_extract_signed_field(pfr0,
  737. ID_AA64PFR0_FP_SHIFT) < 0;
  738. }
  739. static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
  740. int scope)
  741. {
  742. u64 ctr;
  743. if (scope == SCOPE_SYSTEM)
  744. ctr = arm64_ftr_reg_ctrel0.sys_val;
  745. else
  746. ctr = read_cpuid_effective_cachetype();
  747. return ctr & BIT(CTR_IDC_SHIFT);
  748. }
  749. static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
  750. {
  751. /*
  752. * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
  753. * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
  754. * to the CTR_EL0 on this CPU and emulate it with the real/safe
  755. * value.
  756. */
  757. if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
  758. sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
  759. }
  760. static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
  761. int scope)
  762. {
  763. u64 ctr;
  764. if (scope == SCOPE_SYSTEM)
  765. ctr = arm64_ftr_reg_ctrel0.sys_val;
  766. else
  767. ctr = read_cpuid_cachetype();
  768. return ctr & BIT(CTR_DIC_SHIFT);
  769. }
  770. static bool __maybe_unused
  771. has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
  772. {
  773. /*
  774. * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
  775. * may share TLB entries with a CPU stuck in the crashed
  776. * kernel.
  777. */
  778. if (is_kdump_kernel())
  779. return false;
  780. return has_cpuid_feature(entry, scope);
  781. }
  782. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  783. static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
  784. static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
  785. int scope)
  786. {
  787. /* List of CPUs that are not vulnerable and don't need KPTI */
  788. static const struct midr_range kpti_safe_list[] = {
  789. MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
  790. MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
  791. { /* sentinel */ }
  792. };
  793. char const *str = "command line option";
  794. /*
  795. * For reasons that aren't entirely clear, enabling KPTI on Cavium
  796. * ThunderX leads to apparent I-cache corruption of kernel text, which
  797. * ends as well as you might imagine. Don't even try.
  798. */
  799. if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
  800. str = "ARM64_WORKAROUND_CAVIUM_27456";
  801. __kpti_forced = -1;
  802. }
  803. /* Forced? */
  804. if (__kpti_forced) {
  805. pr_info_once("kernel page table isolation forced %s by %s\n",
  806. __kpti_forced > 0 ? "ON" : "OFF", str);
  807. return __kpti_forced > 0;
  808. }
  809. /* Useful for KASLR robustness */
  810. if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  811. return true;
  812. /* Don't force KPTI for CPUs that are not vulnerable */
  813. if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
  814. return false;
  815. /* Defer to CPU feature registers */
  816. return !has_cpuid_feature(entry, scope);
  817. }
  818. static void
  819. kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
  820. {
  821. typedef void (kpti_remap_fn)(int, int, phys_addr_t);
  822. extern kpti_remap_fn idmap_kpti_install_ng_mappings;
  823. kpti_remap_fn *remap_fn;
  824. static bool kpti_applied = false;
  825. int cpu = smp_processor_id();
  826. if (kpti_applied)
  827. return;
  828. remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
  829. cpu_install_idmap();
  830. remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
  831. cpu_uninstall_idmap();
  832. if (!cpu)
  833. kpti_applied = true;
  834. return;
  835. }
  836. static int __init parse_kpti(char *str)
  837. {
  838. bool enabled;
  839. int ret = strtobool(str, &enabled);
  840. if (ret)
  841. return ret;
  842. __kpti_forced = enabled ? 1 : -1;
  843. return 0;
  844. }
  845. early_param("kpti", parse_kpti);
  846. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  847. #ifdef CONFIG_ARM64_HW_AFDBM
  848. static inline void __cpu_enable_hw_dbm(void)
  849. {
  850. u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
  851. write_sysreg(tcr, tcr_el1);
  852. isb();
  853. }
  854. static bool cpu_has_broken_dbm(void)
  855. {
  856. /* List of CPUs which have broken DBM support. */
  857. static const struct midr_range cpus[] = {
  858. #ifdef CONFIG_ARM64_ERRATUM_1024718
  859. MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
  860. #endif
  861. {},
  862. };
  863. return is_midr_in_range_list(read_cpuid_id(), cpus);
  864. }
  865. static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
  866. {
  867. return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
  868. !cpu_has_broken_dbm();
  869. }
  870. static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
  871. {
  872. if (cpu_can_use_dbm(cap))
  873. __cpu_enable_hw_dbm();
  874. }
  875. static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
  876. int __unused)
  877. {
  878. static bool detected = false;
  879. /*
  880. * DBM is a non-conflicting feature. i.e, the kernel can safely
  881. * run a mix of CPUs with and without the feature. So, we
  882. * unconditionally enable the capability to allow any late CPU
  883. * to use the feature. We only enable the control bits on the
  884. * CPU, if it actually supports.
  885. *
  886. * We have to make sure we print the "feature" detection only
  887. * when at least one CPU actually uses it. So check if this CPU
  888. * can actually use it and print the message exactly once.
  889. *
  890. * This is safe as all CPUs (including secondary CPUs - due to the
  891. * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
  892. * goes through the "matches" check exactly once. Also if a CPU
  893. * matches the criteria, it is guaranteed that the CPU will turn
  894. * the DBM on, as the capability is unconditionally enabled.
  895. */
  896. if (!detected && cpu_can_use_dbm(cap)) {
  897. detected = true;
  898. pr_info("detected: Hardware dirty bit management\n");
  899. }
  900. return true;
  901. }
  902. #endif
  903. #ifdef CONFIG_ARM64_VHE
  904. static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
  905. {
  906. return is_kernel_in_hyp_mode();
  907. }
  908. static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
  909. {
  910. /*
  911. * Copy register values that aren't redirected by hardware.
  912. *
  913. * Before code patching, we only set tpidr_el1, all CPUs need to copy
  914. * this value to tpidr_el2 before we patch the code. Once we've done
  915. * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
  916. * do anything here.
  917. */
  918. if (!alternatives_applied)
  919. write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
  920. }
  921. #endif
  922. static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
  923. {
  924. u64 val = read_sysreg_s(SYS_CLIDR_EL1);
  925. /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
  926. WARN_ON(val & (7 << 27 | 7 << 21));
  927. }
  928. #ifdef CONFIG_ARM64_SSBD
  929. static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
  930. {
  931. if (user_mode(regs))
  932. return 1;
  933. if (instr & BIT(PSTATE_Imm_shift))
  934. regs->pstate |= PSR_SSBS_BIT;
  935. else
  936. regs->pstate &= ~PSR_SSBS_BIT;
  937. arm64_skip_faulting_instruction(regs, 4);
  938. return 0;
  939. }
  940. static struct undef_hook ssbs_emulation_hook = {
  941. .instr_mask = ~(1U << PSTATE_Imm_shift),
  942. .instr_val = 0xd500401f | PSTATE_SSBS,
  943. .fn = ssbs_emulation_handler,
  944. };
  945. static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
  946. {
  947. static bool undef_hook_registered = false;
  948. static DEFINE_SPINLOCK(hook_lock);
  949. spin_lock(&hook_lock);
  950. if (!undef_hook_registered) {
  951. register_undef_hook(&ssbs_emulation_hook);
  952. undef_hook_registered = true;
  953. }
  954. spin_unlock(&hook_lock);
  955. if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
  956. sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
  957. arm64_set_ssbd_mitigation(false);
  958. } else {
  959. arm64_set_ssbd_mitigation(true);
  960. }
  961. }
  962. #endif /* CONFIG_ARM64_SSBD */
  963. #ifdef CONFIG_ARM64_PAN
  964. static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
  965. {
  966. /*
  967. * We modify PSTATE. This won't work from irq context as the PSTATE
  968. * is discarded once we return from the exception.
  969. */
  970. WARN_ON_ONCE(in_interrupt());
  971. sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
  972. asm(SET_PSTATE_PAN(1));
  973. }
  974. #endif /* CONFIG_ARM64_PAN */
  975. #ifdef CONFIG_ARM64_RAS_EXTN
  976. static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
  977. {
  978. /* Firmware may have left a deferred SError in this register. */
  979. write_sysreg_s(0, SYS_DISR_EL1);
  980. }
  981. #endif /* CONFIG_ARM64_RAS_EXTN */
  982. static const struct arm64_cpu_capabilities arm64_features[] = {
  983. {
  984. .desc = "GIC system register CPU interface",
  985. .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
  986. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  987. .matches = has_useable_gicv3_cpuif,
  988. .sys_reg = SYS_ID_AA64PFR0_EL1,
  989. .field_pos = ID_AA64PFR0_GIC_SHIFT,
  990. .sign = FTR_UNSIGNED,
  991. .min_field_value = 1,
  992. },
  993. #ifdef CONFIG_ARM64_PAN
  994. {
  995. .desc = "Privileged Access Never",
  996. .capability = ARM64_HAS_PAN,
  997. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  998. .matches = has_cpuid_feature,
  999. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  1000. .field_pos = ID_AA64MMFR1_PAN_SHIFT,
  1001. .sign = FTR_UNSIGNED,
  1002. .min_field_value = 1,
  1003. .cpu_enable = cpu_enable_pan,
  1004. },
  1005. #endif /* CONFIG_ARM64_PAN */
  1006. #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
  1007. {
  1008. .desc = "LSE atomic instructions",
  1009. .capability = ARM64_HAS_LSE_ATOMICS,
  1010. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1011. .matches = has_cpuid_feature,
  1012. .sys_reg = SYS_ID_AA64ISAR0_EL1,
  1013. .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
  1014. .sign = FTR_UNSIGNED,
  1015. .min_field_value = 2,
  1016. },
  1017. #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
  1018. {
  1019. .desc = "Software prefetching using PRFM",
  1020. .capability = ARM64_HAS_NO_HW_PREFETCH,
  1021. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  1022. .matches = has_no_hw_prefetch,
  1023. },
  1024. #ifdef CONFIG_ARM64_UAO
  1025. {
  1026. .desc = "User Access Override",
  1027. .capability = ARM64_HAS_UAO,
  1028. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1029. .matches = has_cpuid_feature,
  1030. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  1031. .field_pos = ID_AA64MMFR2_UAO_SHIFT,
  1032. .min_field_value = 1,
  1033. /*
  1034. * We rely on stop_machine() calling uao_thread_switch() to set
  1035. * UAO immediately after patching.
  1036. */
  1037. },
  1038. #endif /* CONFIG_ARM64_UAO */
  1039. #ifdef CONFIG_ARM64_PAN
  1040. {
  1041. .capability = ARM64_ALT_PAN_NOT_UAO,
  1042. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1043. .matches = cpufeature_pan_not_uao,
  1044. },
  1045. #endif /* CONFIG_ARM64_PAN */
  1046. #ifdef CONFIG_ARM64_VHE
  1047. {
  1048. .desc = "Virtualization Host Extensions",
  1049. .capability = ARM64_HAS_VIRT_HOST_EXTN,
  1050. .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
  1051. .matches = runs_at_el2,
  1052. .cpu_enable = cpu_copy_el2regs,
  1053. },
  1054. #endif /* CONFIG_ARM64_VHE */
  1055. {
  1056. .desc = "32-bit EL0 Support",
  1057. .capability = ARM64_HAS_32BIT_EL0,
  1058. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1059. .matches = has_cpuid_feature,
  1060. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1061. .sign = FTR_UNSIGNED,
  1062. .field_pos = ID_AA64PFR0_EL0_SHIFT,
  1063. .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
  1064. },
  1065. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  1066. {
  1067. .desc = "Kernel page table isolation (KPTI)",
  1068. .capability = ARM64_UNMAP_KERNEL_AT_EL0,
  1069. .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
  1070. /*
  1071. * The ID feature fields below are used to indicate that
  1072. * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
  1073. * more details.
  1074. */
  1075. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1076. .field_pos = ID_AA64PFR0_CSV3_SHIFT,
  1077. .min_field_value = 1,
  1078. .matches = unmap_kernel_at_el0,
  1079. .cpu_enable = kpti_install_ng_mappings,
  1080. },
  1081. #endif
  1082. {
  1083. /* FP/SIMD is not implemented */
  1084. .capability = ARM64_HAS_NO_FPSIMD,
  1085. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1086. .min_field_value = 0,
  1087. .matches = has_no_fpsimd,
  1088. },
  1089. #ifdef CONFIG_ARM64_PMEM
  1090. {
  1091. .desc = "Data cache clean to Point of Persistence",
  1092. .capability = ARM64_HAS_DCPOP,
  1093. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1094. .matches = has_cpuid_feature,
  1095. .sys_reg = SYS_ID_AA64ISAR1_EL1,
  1096. .field_pos = ID_AA64ISAR1_DPB_SHIFT,
  1097. .min_field_value = 1,
  1098. },
  1099. #endif
  1100. #ifdef CONFIG_ARM64_SVE
  1101. {
  1102. .desc = "Scalable Vector Extension",
  1103. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1104. .capability = ARM64_SVE,
  1105. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1106. .sign = FTR_UNSIGNED,
  1107. .field_pos = ID_AA64PFR0_SVE_SHIFT,
  1108. .min_field_value = ID_AA64PFR0_SVE,
  1109. .matches = has_cpuid_feature,
  1110. .cpu_enable = sve_kernel_enable,
  1111. },
  1112. #endif /* CONFIG_ARM64_SVE */
  1113. #ifdef CONFIG_ARM64_RAS_EXTN
  1114. {
  1115. .desc = "RAS Extension Support",
  1116. .capability = ARM64_HAS_RAS_EXTN,
  1117. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1118. .matches = has_cpuid_feature,
  1119. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1120. .sign = FTR_UNSIGNED,
  1121. .field_pos = ID_AA64PFR0_RAS_SHIFT,
  1122. .min_field_value = ID_AA64PFR0_RAS_V1,
  1123. .cpu_enable = cpu_clear_disr,
  1124. },
  1125. #endif /* CONFIG_ARM64_RAS_EXTN */
  1126. {
  1127. .desc = "Data cache clean to the PoU not required for I/D coherence",
  1128. .capability = ARM64_HAS_CACHE_IDC,
  1129. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1130. .matches = has_cache_idc,
  1131. .cpu_enable = cpu_emulate_effective_ctr,
  1132. },
  1133. {
  1134. .desc = "Instruction cache invalidation not required for I/D coherence",
  1135. .capability = ARM64_HAS_CACHE_DIC,
  1136. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1137. .matches = has_cache_dic,
  1138. },
  1139. {
  1140. .desc = "Stage-2 Force Write-Back",
  1141. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1142. .capability = ARM64_HAS_STAGE2_FWB,
  1143. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  1144. .sign = FTR_UNSIGNED,
  1145. .field_pos = ID_AA64MMFR2_FWB_SHIFT,
  1146. .min_field_value = 1,
  1147. .matches = has_cpuid_feature,
  1148. .cpu_enable = cpu_has_fwb,
  1149. },
  1150. #ifdef CONFIG_ARM64_HW_AFDBM
  1151. {
  1152. /*
  1153. * Since we turn this on always, we don't want the user to
  1154. * think that the feature is available when it may not be.
  1155. * So hide the description.
  1156. *
  1157. * .desc = "Hardware pagetable Dirty Bit Management",
  1158. *
  1159. */
  1160. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  1161. .capability = ARM64_HW_DBM,
  1162. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  1163. .sign = FTR_UNSIGNED,
  1164. .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
  1165. .min_field_value = 2,
  1166. .matches = has_hw_dbm,
  1167. .cpu_enable = cpu_enable_hw_dbm,
  1168. },
  1169. #endif
  1170. {
  1171. .desc = "CRC32 instructions",
  1172. .capability = ARM64_HAS_CRC32,
  1173. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1174. .matches = has_cpuid_feature,
  1175. .sys_reg = SYS_ID_AA64ISAR0_EL1,
  1176. .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
  1177. .min_field_value = 1,
  1178. },
  1179. #ifdef CONFIG_ARM64_SSBD
  1180. {
  1181. .desc = "Speculative Store Bypassing Safe (SSBS)",
  1182. .capability = ARM64_SSBS,
  1183. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  1184. .matches = has_cpuid_feature,
  1185. .sys_reg = SYS_ID_AA64PFR1_EL1,
  1186. .field_pos = ID_AA64PFR1_SSBS_SHIFT,
  1187. .sign = FTR_UNSIGNED,
  1188. .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
  1189. .cpu_enable = cpu_enable_ssbs,
  1190. },
  1191. #endif
  1192. #ifdef CONFIG_ARM64_CNP
  1193. {
  1194. .desc = "Common not Private translations",
  1195. .capability = ARM64_HAS_CNP,
  1196. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1197. .matches = has_useable_cnp,
  1198. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  1199. .sign = FTR_UNSIGNED,
  1200. .field_pos = ID_AA64MMFR2_CNP_SHIFT,
  1201. .min_field_value = 1,
  1202. .cpu_enable = cpu_enable_cnp,
  1203. },
  1204. #endif
  1205. {},
  1206. };
  1207. #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
  1208. { \
  1209. .desc = #cap, \
  1210. .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
  1211. .matches = has_cpuid_feature, \
  1212. .sys_reg = reg, \
  1213. .field_pos = field, \
  1214. .sign = s, \
  1215. .min_field_value = min_value, \
  1216. .hwcap_type = cap_type, \
  1217. .hwcap = cap, \
  1218. }
  1219. static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
  1220. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
  1221. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
  1222. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
  1223. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
  1224. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
  1225. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
  1226. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
  1227. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
  1228. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
  1229. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
  1230. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
  1231. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
  1232. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
  1233. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
  1234. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
  1235. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
  1236. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
  1237. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
  1238. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
  1239. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
  1240. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
  1241. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
  1242. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
  1243. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
  1244. HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
  1245. #ifdef CONFIG_ARM64_SVE
  1246. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
  1247. #endif
  1248. HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
  1249. {},
  1250. };
  1251. static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
  1252. #ifdef CONFIG_COMPAT
  1253. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
  1254. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
  1255. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
  1256. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
  1257. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
  1258. #endif
  1259. {},
  1260. };
  1261. static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1262. {
  1263. switch (cap->hwcap_type) {
  1264. case CAP_HWCAP:
  1265. elf_hwcap |= cap->hwcap;
  1266. break;
  1267. #ifdef CONFIG_COMPAT
  1268. case CAP_COMPAT_HWCAP:
  1269. compat_elf_hwcap |= (u32)cap->hwcap;
  1270. break;
  1271. case CAP_COMPAT_HWCAP2:
  1272. compat_elf_hwcap2 |= (u32)cap->hwcap;
  1273. break;
  1274. #endif
  1275. default:
  1276. WARN_ON(1);
  1277. break;
  1278. }
  1279. }
  1280. /* Check if we have a particular HWCAP enabled */
  1281. static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1282. {
  1283. bool rc;
  1284. switch (cap->hwcap_type) {
  1285. case CAP_HWCAP:
  1286. rc = (elf_hwcap & cap->hwcap) != 0;
  1287. break;
  1288. #ifdef CONFIG_COMPAT
  1289. case CAP_COMPAT_HWCAP:
  1290. rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
  1291. break;
  1292. case CAP_COMPAT_HWCAP2:
  1293. rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
  1294. break;
  1295. #endif
  1296. default:
  1297. WARN_ON(1);
  1298. rc = false;
  1299. }
  1300. return rc;
  1301. }
  1302. static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
  1303. {
  1304. /* We support emulation of accesses to CPU ID feature registers */
  1305. elf_hwcap |= HWCAP_CPUID;
  1306. for (; hwcaps->matches; hwcaps++)
  1307. if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
  1308. cap_set_elf_hwcap(hwcaps);
  1309. }
  1310. /*
  1311. * Check if the current CPU has a given feature capability.
  1312. * Should be called from non-preemptible context.
  1313. */
  1314. static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
  1315. unsigned int cap)
  1316. {
  1317. const struct arm64_cpu_capabilities *caps;
  1318. if (WARN_ON(preemptible()))
  1319. return false;
  1320. for (caps = cap_array; caps->matches; caps++)
  1321. if (caps->capability == cap)
  1322. return caps->matches(caps, SCOPE_LOCAL_CPU);
  1323. return false;
  1324. }
  1325. static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  1326. u16 scope_mask, const char *info)
  1327. {
  1328. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1329. for (; caps->matches; caps++) {
  1330. if (!(caps->type & scope_mask) ||
  1331. !caps->matches(caps, cpucap_default_scope(caps)))
  1332. continue;
  1333. if (!cpus_have_cap(caps->capability) && caps->desc)
  1334. pr_info("%s %s\n", info, caps->desc);
  1335. cpus_set_cap(caps->capability);
  1336. }
  1337. }
  1338. static void update_cpu_capabilities(u16 scope_mask)
  1339. {
  1340. __update_cpu_capabilities(arm64_errata, scope_mask,
  1341. "enabling workaround for");
  1342. __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
  1343. }
  1344. static int __enable_cpu_capability(void *arg)
  1345. {
  1346. const struct arm64_cpu_capabilities *cap = arg;
  1347. cap->cpu_enable(cap);
  1348. return 0;
  1349. }
  1350. /*
  1351. * Run through the enabled capabilities and enable() it on all active
  1352. * CPUs
  1353. */
  1354. static void __init
  1355. __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  1356. u16 scope_mask)
  1357. {
  1358. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1359. for (; caps->matches; caps++) {
  1360. unsigned int num = caps->capability;
  1361. if (!(caps->type & scope_mask) || !cpus_have_cap(num))
  1362. continue;
  1363. /* Ensure cpus_have_const_cap(num) works */
  1364. static_branch_enable(&cpu_hwcap_keys[num]);
  1365. if (caps->cpu_enable) {
  1366. /*
  1367. * Capabilities with SCOPE_BOOT_CPU scope are finalised
  1368. * before any secondary CPU boots. Thus, each secondary
  1369. * will enable the capability as appropriate via
  1370. * check_local_cpu_capabilities(). The only exception is
  1371. * the boot CPU, for which the capability must be
  1372. * enabled here. This approach avoids costly
  1373. * stop_machine() calls for this case.
  1374. *
  1375. * Otherwise, use stop_machine() as it schedules the
  1376. * work allowing us to modify PSTATE, instead of
  1377. * on_each_cpu() which uses an IPI, giving us a PSTATE
  1378. * that disappears when we return.
  1379. */
  1380. if (scope_mask & SCOPE_BOOT_CPU)
  1381. caps->cpu_enable(caps);
  1382. else
  1383. stop_machine(__enable_cpu_capability,
  1384. (void *)caps, cpu_online_mask);
  1385. }
  1386. }
  1387. }
  1388. static void __init enable_cpu_capabilities(u16 scope_mask)
  1389. {
  1390. __enable_cpu_capabilities(arm64_errata, scope_mask);
  1391. __enable_cpu_capabilities(arm64_features, scope_mask);
  1392. }
  1393. /*
  1394. * Run through the list of capabilities to check for conflicts.
  1395. * If the system has already detected a capability, take necessary
  1396. * action on this CPU.
  1397. *
  1398. * Returns "false" on conflicts.
  1399. */
  1400. static bool
  1401. __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
  1402. u16 scope_mask)
  1403. {
  1404. bool cpu_has_cap, system_has_cap;
  1405. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1406. for (; caps->matches; caps++) {
  1407. if (!(caps->type & scope_mask))
  1408. continue;
  1409. cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
  1410. system_has_cap = cpus_have_cap(caps->capability);
  1411. if (system_has_cap) {
  1412. /*
  1413. * Check if the new CPU misses an advertised feature,
  1414. * which is not safe to miss.
  1415. */
  1416. if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
  1417. break;
  1418. /*
  1419. * We have to issue cpu_enable() irrespective of
  1420. * whether the CPU has it or not, as it is enabeld
  1421. * system wide. It is upto the call back to take
  1422. * appropriate action on this CPU.
  1423. */
  1424. if (caps->cpu_enable)
  1425. caps->cpu_enable(caps);
  1426. } else {
  1427. /*
  1428. * Check if the CPU has this capability if it isn't
  1429. * safe to have when the system doesn't.
  1430. */
  1431. if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
  1432. break;
  1433. }
  1434. }
  1435. if (caps->matches) {
  1436. pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
  1437. smp_processor_id(), caps->capability,
  1438. caps->desc, system_has_cap, cpu_has_cap);
  1439. return false;
  1440. }
  1441. return true;
  1442. }
  1443. static bool verify_local_cpu_caps(u16 scope_mask)
  1444. {
  1445. return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
  1446. __verify_local_cpu_caps(arm64_features, scope_mask);
  1447. }
  1448. /*
  1449. * Check for CPU features that are used in early boot
  1450. * based on the Boot CPU value.
  1451. */
  1452. static void check_early_cpu_features(void)
  1453. {
  1454. verify_cpu_asid_bits();
  1455. /*
  1456. * Early features are used by the kernel already. If there
  1457. * is a conflict, we cannot proceed further.
  1458. */
  1459. if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
  1460. cpu_panic_kernel();
  1461. }
  1462. static void
  1463. verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
  1464. {
  1465. for (; caps->matches; caps++)
  1466. if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
  1467. pr_crit("CPU%d: missing HWCAP: %s\n",
  1468. smp_processor_id(), caps->desc);
  1469. cpu_die_early();
  1470. }
  1471. }
  1472. static void verify_sve_features(void)
  1473. {
  1474. u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
  1475. u64 zcr = read_zcr_features();
  1476. unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
  1477. unsigned int len = zcr & ZCR_ELx_LEN_MASK;
  1478. if (len < safe_len || sve_verify_vq_map()) {
  1479. pr_crit("CPU%d: SVE: required vector length(s) missing\n",
  1480. smp_processor_id());
  1481. cpu_die_early();
  1482. }
  1483. /* Add checks on other ZCR bits here if necessary */
  1484. }
  1485. /*
  1486. * Run through the enabled system capabilities and enable() it on this CPU.
  1487. * The capabilities were decided based on the available CPUs at the boot time.
  1488. * Any new CPU should match the system wide status of the capability. If the
  1489. * new CPU doesn't have a capability which the system now has enabled, we
  1490. * cannot do anything to fix it up and could cause unexpected failures. So
  1491. * we park the CPU.
  1492. */
  1493. static void verify_local_cpu_capabilities(void)
  1494. {
  1495. /*
  1496. * The capabilities with SCOPE_BOOT_CPU are checked from
  1497. * check_early_cpu_features(), as they need to be verified
  1498. * on all secondary CPUs.
  1499. */
  1500. if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
  1501. cpu_die_early();
  1502. verify_local_elf_hwcaps(arm64_elf_hwcaps);
  1503. if (system_supports_32bit_el0())
  1504. verify_local_elf_hwcaps(compat_elf_hwcaps);
  1505. if (system_supports_sve())
  1506. verify_sve_features();
  1507. }
  1508. void check_local_cpu_capabilities(void)
  1509. {
  1510. /*
  1511. * All secondary CPUs should conform to the early CPU features
  1512. * in use by the kernel based on boot CPU.
  1513. */
  1514. check_early_cpu_features();
  1515. /*
  1516. * If we haven't finalised the system capabilities, this CPU gets
  1517. * a chance to update the errata work arounds and local features.
  1518. * Otherwise, this CPU should verify that it has all the system
  1519. * advertised capabilities.
  1520. */
  1521. if (!sys_caps_initialised)
  1522. update_cpu_capabilities(SCOPE_LOCAL_CPU);
  1523. else
  1524. verify_local_cpu_capabilities();
  1525. }
  1526. static void __init setup_boot_cpu_capabilities(void)
  1527. {
  1528. /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
  1529. update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
  1530. /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
  1531. enable_cpu_capabilities(SCOPE_BOOT_CPU);
  1532. }
  1533. DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
  1534. EXPORT_SYMBOL(arm64_const_caps_ready);
  1535. static void __init mark_const_caps_ready(void)
  1536. {
  1537. static_branch_enable(&arm64_const_caps_ready);
  1538. }
  1539. extern const struct arm64_cpu_capabilities arm64_errata[];
  1540. bool this_cpu_has_cap(unsigned int cap)
  1541. {
  1542. return (__this_cpu_has_cap(arm64_features, cap) ||
  1543. __this_cpu_has_cap(arm64_errata, cap));
  1544. }
  1545. static void __init setup_system_capabilities(void)
  1546. {
  1547. /*
  1548. * We have finalised the system-wide safe feature
  1549. * registers, finalise the capabilities that depend
  1550. * on it. Also enable all the available capabilities,
  1551. * that are not enabled already.
  1552. */
  1553. update_cpu_capabilities(SCOPE_SYSTEM);
  1554. enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
  1555. }
  1556. void __init setup_cpu_features(void)
  1557. {
  1558. u32 cwg;
  1559. setup_system_capabilities();
  1560. mark_const_caps_ready();
  1561. setup_elf_hwcaps(arm64_elf_hwcaps);
  1562. if (system_supports_32bit_el0())
  1563. setup_elf_hwcaps(compat_elf_hwcaps);
  1564. if (system_uses_ttbr0_pan())
  1565. pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
  1566. sve_setup();
  1567. minsigstksz_setup();
  1568. /* Advertise that we have computed the system capabilities */
  1569. set_sys_caps_initialised();
  1570. /*
  1571. * Check for sane CTR_EL0.CWG value.
  1572. */
  1573. cwg = cache_type_cwg();
  1574. if (!cwg)
  1575. pr_warn("No Cache Writeback Granule information, assuming %d\n",
  1576. ARCH_DMA_MINALIGN);
  1577. }
  1578. static bool __maybe_unused
  1579. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
  1580. {
  1581. return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
  1582. }
  1583. static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
  1584. {
  1585. cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
  1586. }
  1587. /*
  1588. * We emulate only the following system register space.
  1589. * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
  1590. * See Table C5-6 System instruction encodings for System register accesses,
  1591. * ARMv8 ARM(ARM DDI 0487A.f) for more details.
  1592. */
  1593. static inline bool __attribute_const__ is_emulated(u32 id)
  1594. {
  1595. return (sys_reg_Op0(id) == 0x3 &&
  1596. sys_reg_CRn(id) == 0x0 &&
  1597. sys_reg_Op1(id) == 0x0 &&
  1598. (sys_reg_CRm(id) == 0 ||
  1599. ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
  1600. }
  1601. /*
  1602. * With CRm == 0, reg should be one of :
  1603. * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
  1604. */
  1605. static inline int emulate_id_reg(u32 id, u64 *valp)
  1606. {
  1607. switch (id) {
  1608. case SYS_MIDR_EL1:
  1609. *valp = read_cpuid_id();
  1610. break;
  1611. case SYS_MPIDR_EL1:
  1612. *valp = SYS_MPIDR_SAFE_VAL;
  1613. break;
  1614. case SYS_REVIDR_EL1:
  1615. /* IMPLEMENTATION DEFINED values are emulated with 0 */
  1616. *valp = 0;
  1617. break;
  1618. default:
  1619. return -EINVAL;
  1620. }
  1621. return 0;
  1622. }
  1623. static int emulate_sys_reg(u32 id, u64 *valp)
  1624. {
  1625. struct arm64_ftr_reg *regp;
  1626. if (!is_emulated(id))
  1627. return -EINVAL;
  1628. if (sys_reg_CRm(id) == 0)
  1629. return emulate_id_reg(id, valp);
  1630. regp = get_arm64_ftr_reg(id);
  1631. if (regp)
  1632. *valp = arm64_ftr_reg_user_value(regp);
  1633. else
  1634. /*
  1635. * The untracked registers are either IMPLEMENTATION DEFINED
  1636. * (e.g, ID_AFR0_EL1) or reserved RAZ.
  1637. */
  1638. *valp = 0;
  1639. return 0;
  1640. }
  1641. int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
  1642. {
  1643. int rc;
  1644. u64 val;
  1645. rc = emulate_sys_reg(sys_reg, &val);
  1646. if (!rc) {
  1647. pt_regs_write_reg(regs, rt, val);
  1648. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  1649. }
  1650. return rc;
  1651. }
  1652. static int emulate_mrs(struct pt_regs *regs, u32 insn)
  1653. {
  1654. u32 sys_reg, rt;
  1655. /*
  1656. * sys_reg values are defined as used in mrs/msr instruction.
  1657. * shift the imm value to get the encoding.
  1658. */
  1659. sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
  1660. rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
  1661. return do_emulate_mrs(regs, sys_reg, rt);
  1662. }
  1663. static struct undef_hook mrs_hook = {
  1664. .instr_mask = 0xfff00000,
  1665. .instr_val = 0xd5300000,
  1666. .pstate_mask = PSR_AA32_MODE_MASK,
  1667. .pstate_val = PSR_MODE_EL0t,
  1668. .fn = emulate_mrs,
  1669. };
  1670. static int __init enable_mrs_emulation(void)
  1671. {
  1672. register_undef_hook(&mrs_hook);
  1673. return 0;
  1674. }
  1675. core_initcall(enable_mrs_emulation);