cpufeature.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743
  1. /*
  2. * Contains CPU feature definitions
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "CPU features: " fmt
  19. #include <linux/bsearch.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/sort.h>
  22. #include <linux/stop_machine.h>
  23. #include <linux/types.h>
  24. #include <linux/mm.h>
  25. #include <asm/cpu.h>
  26. #include <asm/cpufeature.h>
  27. #include <asm/cpu_ops.h>
  28. #include <asm/fpsimd.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/processor.h>
  31. #include <asm/sysreg.h>
  32. #include <asm/traps.h>
  33. #include <asm/virt.h>
  34. unsigned long elf_hwcap __read_mostly;
  35. EXPORT_SYMBOL_GPL(elf_hwcap);
  36. #ifdef CONFIG_COMPAT
  37. #define COMPAT_ELF_HWCAP_DEFAULT \
  38. (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
  39. COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
  40. COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
  41. COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
  42. COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
  43. COMPAT_HWCAP_LPAE)
  44. unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
  45. unsigned int compat_elf_hwcap2 __read_mostly;
  46. #endif
  47. DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  48. EXPORT_SYMBOL(cpu_hwcaps);
  49. /*
  50. * Flag to indicate if we have computed the system wide
  51. * capabilities based on the boot time active CPUs. This
  52. * will be used to determine if a new booting CPU should
  53. * go through the verification process to make sure that it
  54. * supports the system capabilities, without using a hotplug
  55. * notifier.
  56. */
  57. static bool sys_caps_initialised;
  58. static inline void set_sys_caps_initialised(void)
  59. {
  60. sys_caps_initialised = true;
  61. }
  62. static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
  63. {
  64. /* file-wide pr_fmt adds "CPU features: " prefix */
  65. pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
  66. return 0;
  67. }
  68. static struct notifier_block cpu_hwcaps_notifier = {
  69. .notifier_call = dump_cpu_hwcaps
  70. };
  71. static int __init register_cpu_hwcaps_dumper(void)
  72. {
  73. atomic_notifier_chain_register(&panic_notifier_list,
  74. &cpu_hwcaps_notifier);
  75. return 0;
  76. }
  77. __initcall(register_cpu_hwcaps_dumper);
  78. DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
  79. EXPORT_SYMBOL(cpu_hwcap_keys);
  80. #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  81. { \
  82. .sign = SIGNED, \
  83. .visible = VISIBLE, \
  84. .strict = STRICT, \
  85. .type = TYPE, \
  86. .shift = SHIFT, \
  87. .width = WIDTH, \
  88. .safe_val = SAFE_VAL, \
  89. }
  90. /* Define a feature with unsigned values */
  91. #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  92. __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  93. /* Define a feature with a signed value */
  94. #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  95. __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  96. #define ARM64_FTR_END \
  97. { \
  98. .width = 0, \
  99. }
  100. /* meta feature for alternatives */
  101. static bool __maybe_unused
  102. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
  103. /*
  104. * NOTE: Any changes to the visibility of features should be kept in
  105. * sync with the documentation of the CPU feature register ABI.
  106. */
  107. static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
  108. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
  109. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
  110. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
  111. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
  112. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
  113. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
  114. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
  115. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
  116. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
  117. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
  118. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
  119. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
  120. ARM64_FTR_END,
  121. };
  122. static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
  123. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
  124. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
  125. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
  126. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
  127. ARM64_FTR_END,
  128. };
  129. static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
  130. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
  131. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
  132. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
  133. ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
  134. FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
  135. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
  136. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
  137. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
  138. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
  139. /* Linux doesn't care about the EL3 */
  140. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
  141. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
  142. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
  143. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
  144. ARM64_FTR_END,
  145. };
  146. static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
  147. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
  148. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
  149. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
  150. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
  151. /* Linux shouldn't care about secure memory */
  152. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
  153. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
  154. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
  155. /*
  156. * Differing PARange is fine as long as all peripherals and memory are mapped
  157. * within the minimum PARange of all CPUs
  158. */
  159. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
  160. ARM64_FTR_END,
  161. };
  162. static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
  163. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
  164. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
  165. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
  166. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
  167. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
  168. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
  169. ARM64_FTR_END,
  170. };
  171. static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
  172. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
  173. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
  174. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
  175. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
  176. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
  177. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
  178. ARM64_FTR_END,
  179. };
  180. static const struct arm64_ftr_bits ftr_ctr[] = {
  181. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
  182. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
  183. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
  184. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
  185. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
  186. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
  187. /*
  188. * Linux can handle differing I-cache policies. Userspace JITs will
  189. * make use of *minLine.
  190. * If we have differing I-cache policies, report it as the weakest - VIPT.
  191. */
  192. ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
  193. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
  194. ARM64_FTR_END,
  195. };
  196. struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
  197. .name = "SYS_CTR_EL0",
  198. .ftr_bits = ftr_ctr
  199. };
  200. static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
  201. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
  202. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
  203. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
  204. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
  205. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
  206. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
  207. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
  208. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
  209. ARM64_FTR_END,
  210. };
  211. static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
  212. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
  213. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
  214. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
  215. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
  216. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
  217. /*
  218. * We can instantiate multiple PMU instances with different levels
  219. * of support.
  220. */
  221. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
  222. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
  223. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
  224. ARM64_FTR_END,
  225. };
  226. static const struct arm64_ftr_bits ftr_mvfr2[] = {
  227. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
  228. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
  229. ARM64_FTR_END,
  230. };
  231. static const struct arm64_ftr_bits ftr_dczid[] = {
  232. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
  233. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
  234. ARM64_FTR_END,
  235. };
  236. static const struct arm64_ftr_bits ftr_id_isar5[] = {
  237. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
  238. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
  239. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
  240. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
  241. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
  242. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
  243. ARM64_FTR_END,
  244. };
  245. static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
  246. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
  247. ARM64_FTR_END,
  248. };
  249. static const struct arm64_ftr_bits ftr_id_pfr0[] = {
  250. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
  251. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
  252. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
  253. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
  254. ARM64_FTR_END,
  255. };
  256. static const struct arm64_ftr_bits ftr_id_dfr0[] = {
  257. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  258. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
  259. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  260. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  261. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  262. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  263. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  264. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  265. ARM64_FTR_END,
  266. };
  267. static const struct arm64_ftr_bits ftr_zcr[] = {
  268. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
  269. ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
  270. ARM64_FTR_END,
  271. };
  272. /*
  273. * Common ftr bits for a 32bit register with all hidden, strict
  274. * attributes, with 4bit feature fields and a default safe value of
  275. * 0. Covers the following 32bit registers:
  276. * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
  277. */
  278. static const struct arm64_ftr_bits ftr_generic_32bits[] = {
  279. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  280. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
  281. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  282. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  283. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  284. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  285. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  286. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  287. ARM64_FTR_END,
  288. };
  289. /* Table for a single 32bit feature value */
  290. static const struct arm64_ftr_bits ftr_single32[] = {
  291. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
  292. ARM64_FTR_END,
  293. };
  294. static const struct arm64_ftr_bits ftr_raz[] = {
  295. ARM64_FTR_END,
  296. };
  297. #define ARM64_FTR_REG(id, table) { \
  298. .sys_id = id, \
  299. .reg = &(struct arm64_ftr_reg){ \
  300. .name = #id, \
  301. .ftr_bits = &((table)[0]), \
  302. }}
  303. static const struct __ftr_reg_entry {
  304. u32 sys_id;
  305. struct arm64_ftr_reg *reg;
  306. } arm64_ftr_regs[] = {
  307. /* Op1 = 0, CRn = 0, CRm = 1 */
  308. ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
  309. ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
  310. ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
  311. ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
  312. ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
  313. ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
  314. ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
  315. /* Op1 = 0, CRn = 0, CRm = 2 */
  316. ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
  317. ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
  318. ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
  319. ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
  320. ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
  321. ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
  322. ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
  323. /* Op1 = 0, CRn = 0, CRm = 3 */
  324. ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
  325. ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
  326. ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
  327. /* Op1 = 0, CRn = 0, CRm = 4 */
  328. ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
  329. ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
  330. ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
  331. /* Op1 = 0, CRn = 0, CRm = 5 */
  332. ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
  333. ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
  334. /* Op1 = 0, CRn = 0, CRm = 6 */
  335. ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
  336. ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
  337. /* Op1 = 0, CRn = 0, CRm = 7 */
  338. ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
  339. ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
  340. ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
  341. /* Op1 = 0, CRn = 1, CRm = 2 */
  342. ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
  343. /* Op1 = 3, CRn = 0, CRm = 0 */
  344. { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
  345. ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
  346. /* Op1 = 3, CRn = 14, CRm = 0 */
  347. ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
  348. };
  349. static int search_cmp_ftr_reg(const void *id, const void *regp)
  350. {
  351. return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
  352. }
  353. /*
  354. * get_arm64_ftr_reg - Lookup a feature register entry using its
  355. * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
  356. * ascending order of sys_id , we use binary search to find a matching
  357. * entry.
  358. *
  359. * returns - Upon success, matching ftr_reg entry for id.
  360. * - NULL on failure. It is upto the caller to decide
  361. * the impact of a failure.
  362. */
  363. static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
  364. {
  365. const struct __ftr_reg_entry *ret;
  366. ret = bsearch((const void *)(unsigned long)sys_id,
  367. arm64_ftr_regs,
  368. ARRAY_SIZE(arm64_ftr_regs),
  369. sizeof(arm64_ftr_regs[0]),
  370. search_cmp_ftr_reg);
  371. if (ret)
  372. return ret->reg;
  373. return NULL;
  374. }
  375. static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
  376. s64 ftr_val)
  377. {
  378. u64 mask = arm64_ftr_mask(ftrp);
  379. reg &= ~mask;
  380. reg |= (ftr_val << ftrp->shift) & mask;
  381. return reg;
  382. }
  383. static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
  384. s64 cur)
  385. {
  386. s64 ret = 0;
  387. switch (ftrp->type) {
  388. case FTR_EXACT:
  389. ret = ftrp->safe_val;
  390. break;
  391. case FTR_LOWER_SAFE:
  392. ret = new < cur ? new : cur;
  393. break;
  394. case FTR_HIGHER_SAFE:
  395. ret = new > cur ? new : cur;
  396. break;
  397. default:
  398. BUG();
  399. }
  400. return ret;
  401. }
  402. static void __init sort_ftr_regs(void)
  403. {
  404. int i;
  405. /* Check that the array is sorted so that we can do the binary search */
  406. for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
  407. BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
  408. }
  409. /*
  410. * Initialise the CPU feature register from Boot CPU values.
  411. * Also initiliases the strict_mask for the register.
  412. * Any bits that are not covered by an arm64_ftr_bits entry are considered
  413. * RES0 for the system-wide value, and must strictly match.
  414. */
  415. static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
  416. {
  417. u64 val = 0;
  418. u64 strict_mask = ~0x0ULL;
  419. u64 user_mask = 0;
  420. u64 valid_mask = 0;
  421. const struct arm64_ftr_bits *ftrp;
  422. struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
  423. BUG_ON(!reg);
  424. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  425. u64 ftr_mask = arm64_ftr_mask(ftrp);
  426. s64 ftr_new = arm64_ftr_value(ftrp, new);
  427. val = arm64_ftr_set_value(ftrp, val, ftr_new);
  428. valid_mask |= ftr_mask;
  429. if (!ftrp->strict)
  430. strict_mask &= ~ftr_mask;
  431. if (ftrp->visible)
  432. user_mask |= ftr_mask;
  433. else
  434. reg->user_val = arm64_ftr_set_value(ftrp,
  435. reg->user_val,
  436. ftrp->safe_val);
  437. }
  438. val &= valid_mask;
  439. reg->sys_val = val;
  440. reg->strict_mask = strict_mask;
  441. reg->user_mask = user_mask;
  442. }
  443. extern const struct arm64_cpu_capabilities arm64_errata[];
  444. static void __init setup_boot_cpu_capabilities(void);
  445. void __init init_cpu_features(struct cpuinfo_arm64 *info)
  446. {
  447. /* Before we start using the tables, make sure it is sorted */
  448. sort_ftr_regs();
  449. init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
  450. init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
  451. init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
  452. init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
  453. init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
  454. init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
  455. init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
  456. init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
  457. init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
  458. init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
  459. init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
  460. init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
  461. init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
  462. if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  463. init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
  464. init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
  465. init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
  466. init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
  467. init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
  468. init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
  469. init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
  470. init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
  471. init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
  472. init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
  473. init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
  474. init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
  475. init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
  476. init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
  477. init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
  478. init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
  479. }
  480. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  481. init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
  482. sve_init_vq_map();
  483. }
  484. /*
  485. * Detect and enable early CPU capabilities based on the boot CPU,
  486. * after we have initialised the CPU feature infrastructure.
  487. */
  488. setup_boot_cpu_capabilities();
  489. }
  490. static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
  491. {
  492. const struct arm64_ftr_bits *ftrp;
  493. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  494. s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
  495. s64 ftr_new = arm64_ftr_value(ftrp, new);
  496. if (ftr_cur == ftr_new)
  497. continue;
  498. /* Find a safe value */
  499. ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
  500. reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
  501. }
  502. }
  503. static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
  504. {
  505. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
  506. BUG_ON(!regp);
  507. update_cpu_ftr_reg(regp, val);
  508. if ((boot & regp->strict_mask) == (val & regp->strict_mask))
  509. return 0;
  510. pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
  511. regp->name, boot, cpu, val);
  512. return 1;
  513. }
  514. /*
  515. * Update system wide CPU feature registers with the values from a
  516. * non-boot CPU. Also performs SANITY checks to make sure that there
  517. * aren't any insane variations from that of the boot CPU.
  518. */
  519. void update_cpu_features(int cpu,
  520. struct cpuinfo_arm64 *info,
  521. struct cpuinfo_arm64 *boot)
  522. {
  523. int taint = 0;
  524. /*
  525. * The kernel can handle differing I-cache policies, but otherwise
  526. * caches should look identical. Userspace JITs will make use of
  527. * *minLine.
  528. */
  529. taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
  530. info->reg_ctr, boot->reg_ctr);
  531. /*
  532. * Userspace may perform DC ZVA instructions. Mismatched block sizes
  533. * could result in too much or too little memory being zeroed if a
  534. * process is preempted and migrated between CPUs.
  535. */
  536. taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
  537. info->reg_dczid, boot->reg_dczid);
  538. /* If different, timekeeping will be broken (especially with KVM) */
  539. taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
  540. info->reg_cntfrq, boot->reg_cntfrq);
  541. /*
  542. * The kernel uses self-hosted debug features and expects CPUs to
  543. * support identical debug features. We presently need CTX_CMPs, WRPs,
  544. * and BRPs to be identical.
  545. * ID_AA64DFR1 is currently RES0.
  546. */
  547. taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
  548. info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
  549. taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
  550. info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
  551. /*
  552. * Even in big.LITTLE, processors should be identical instruction-set
  553. * wise.
  554. */
  555. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
  556. info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
  557. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
  558. info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
  559. /*
  560. * Differing PARange support is fine as long as all peripherals and
  561. * memory are mapped within the minimum PARange of all CPUs.
  562. * Linux should not care about secure memory.
  563. */
  564. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
  565. info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
  566. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
  567. info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
  568. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
  569. info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
  570. /*
  571. * EL3 is not our concern.
  572. * ID_AA64PFR1 is currently RES0.
  573. */
  574. taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
  575. info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
  576. taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
  577. info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
  578. taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
  579. info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
  580. /*
  581. * If we have AArch32, we care about 32-bit features for compat.
  582. * If the system doesn't support AArch32, don't update them.
  583. */
  584. if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  585. id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  586. taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
  587. info->reg_id_dfr0, boot->reg_id_dfr0);
  588. taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
  589. info->reg_id_isar0, boot->reg_id_isar0);
  590. taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
  591. info->reg_id_isar1, boot->reg_id_isar1);
  592. taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
  593. info->reg_id_isar2, boot->reg_id_isar2);
  594. taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
  595. info->reg_id_isar3, boot->reg_id_isar3);
  596. taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
  597. info->reg_id_isar4, boot->reg_id_isar4);
  598. taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
  599. info->reg_id_isar5, boot->reg_id_isar5);
  600. /*
  601. * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
  602. * ACTLR formats could differ across CPUs and therefore would have to
  603. * be trapped for virtualization anyway.
  604. */
  605. taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
  606. info->reg_id_mmfr0, boot->reg_id_mmfr0);
  607. taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
  608. info->reg_id_mmfr1, boot->reg_id_mmfr1);
  609. taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
  610. info->reg_id_mmfr2, boot->reg_id_mmfr2);
  611. taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
  612. info->reg_id_mmfr3, boot->reg_id_mmfr3);
  613. taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
  614. info->reg_id_pfr0, boot->reg_id_pfr0);
  615. taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
  616. info->reg_id_pfr1, boot->reg_id_pfr1);
  617. taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
  618. info->reg_mvfr0, boot->reg_mvfr0);
  619. taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
  620. info->reg_mvfr1, boot->reg_mvfr1);
  621. taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
  622. info->reg_mvfr2, boot->reg_mvfr2);
  623. }
  624. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  625. taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
  626. info->reg_zcr, boot->reg_zcr);
  627. /* Probe vector lengths, unless we already gave up on SVE */
  628. if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  629. !sys_caps_initialised)
  630. sve_update_vq_map();
  631. }
  632. /*
  633. * Mismatched CPU features are a recipe for disaster. Don't even
  634. * pretend to support them.
  635. */
  636. if (taint) {
  637. pr_warn_once("Unsupported CPU feature variation detected.\n");
  638. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  639. }
  640. }
  641. u64 read_sanitised_ftr_reg(u32 id)
  642. {
  643. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
  644. /* We shouldn't get a request for an unsupported register */
  645. BUG_ON(!regp);
  646. return regp->sys_val;
  647. }
  648. #define read_sysreg_case(r) \
  649. case r: return read_sysreg_s(r)
  650. /*
  651. * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
  652. * Read the system register on the current CPU
  653. */
  654. static u64 __read_sysreg_by_encoding(u32 sys_id)
  655. {
  656. switch (sys_id) {
  657. read_sysreg_case(SYS_ID_PFR0_EL1);
  658. read_sysreg_case(SYS_ID_PFR1_EL1);
  659. read_sysreg_case(SYS_ID_DFR0_EL1);
  660. read_sysreg_case(SYS_ID_MMFR0_EL1);
  661. read_sysreg_case(SYS_ID_MMFR1_EL1);
  662. read_sysreg_case(SYS_ID_MMFR2_EL1);
  663. read_sysreg_case(SYS_ID_MMFR3_EL1);
  664. read_sysreg_case(SYS_ID_ISAR0_EL1);
  665. read_sysreg_case(SYS_ID_ISAR1_EL1);
  666. read_sysreg_case(SYS_ID_ISAR2_EL1);
  667. read_sysreg_case(SYS_ID_ISAR3_EL1);
  668. read_sysreg_case(SYS_ID_ISAR4_EL1);
  669. read_sysreg_case(SYS_ID_ISAR5_EL1);
  670. read_sysreg_case(SYS_MVFR0_EL1);
  671. read_sysreg_case(SYS_MVFR1_EL1);
  672. read_sysreg_case(SYS_MVFR2_EL1);
  673. read_sysreg_case(SYS_ID_AA64PFR0_EL1);
  674. read_sysreg_case(SYS_ID_AA64PFR1_EL1);
  675. read_sysreg_case(SYS_ID_AA64DFR0_EL1);
  676. read_sysreg_case(SYS_ID_AA64DFR1_EL1);
  677. read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
  678. read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
  679. read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
  680. read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
  681. read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
  682. read_sysreg_case(SYS_CNTFRQ_EL0);
  683. read_sysreg_case(SYS_CTR_EL0);
  684. read_sysreg_case(SYS_DCZID_EL0);
  685. default:
  686. BUG();
  687. return 0;
  688. }
  689. }
  690. #include <linux/irqchip/arm-gic-v3.h>
  691. static bool
  692. feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
  693. {
  694. int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
  695. return val >= entry->min_field_value;
  696. }
  697. static bool
  698. has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
  699. {
  700. u64 val;
  701. WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
  702. if (scope == SCOPE_SYSTEM)
  703. val = read_sanitised_ftr_reg(entry->sys_reg);
  704. else
  705. val = __read_sysreg_by_encoding(entry->sys_reg);
  706. return feature_matches(val, entry);
  707. }
  708. static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
  709. {
  710. bool has_sre;
  711. if (!has_cpuid_feature(entry, scope))
  712. return false;
  713. has_sre = gic_enable_sre();
  714. if (!has_sre)
  715. pr_warn_once("%s present but disabled by higher exception level\n",
  716. entry->desc);
  717. return has_sre;
  718. }
  719. static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
  720. {
  721. u32 midr = read_cpuid_id();
  722. /* Cavium ThunderX pass 1.x and 2.x */
  723. return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
  724. MIDR_CPU_VAR_REV(0, 0),
  725. MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
  726. }
  727. static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
  728. {
  729. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  730. return cpuid_feature_extract_signed_field(pfr0,
  731. ID_AA64PFR0_FP_SHIFT) < 0;
  732. }
  733. static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
  734. int __unused)
  735. {
  736. return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT);
  737. }
  738. static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
  739. int __unused)
  740. {
  741. return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
  742. }
  743. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  744. static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
  745. static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
  746. int scope)
  747. {
  748. /* List of CPUs that are not vulnerable and don't need KPTI */
  749. static const struct midr_range kpti_safe_list[] = {
  750. MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
  751. MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
  752. { /* sentinel */ }
  753. };
  754. char const *str = "command line option";
  755. /*
  756. * For reasons that aren't entirely clear, enabling KPTI on Cavium
  757. * ThunderX leads to apparent I-cache corruption of kernel text, which
  758. * ends as well as you might imagine. Don't even try.
  759. */
  760. if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
  761. str = "ARM64_WORKAROUND_CAVIUM_27456";
  762. __kpti_forced = -1;
  763. }
  764. /* Forced? */
  765. if (__kpti_forced) {
  766. pr_info_once("kernel page table isolation forced %s by %s\n",
  767. __kpti_forced > 0 ? "ON" : "OFF", str);
  768. return __kpti_forced > 0;
  769. }
  770. /* Useful for KASLR robustness */
  771. if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  772. return true;
  773. /* Don't force KPTI for CPUs that are not vulnerable */
  774. if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
  775. return false;
  776. /* Defer to CPU feature registers */
  777. return !has_cpuid_feature(entry, scope);
  778. }
  779. static void
  780. kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
  781. {
  782. typedef void (kpti_remap_fn)(int, int, phys_addr_t);
  783. extern kpti_remap_fn idmap_kpti_install_ng_mappings;
  784. kpti_remap_fn *remap_fn;
  785. static bool kpti_applied = false;
  786. int cpu = smp_processor_id();
  787. if (kpti_applied)
  788. return;
  789. remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
  790. cpu_install_idmap();
  791. remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
  792. cpu_uninstall_idmap();
  793. if (!cpu)
  794. kpti_applied = true;
  795. return;
  796. }
  797. static int __init parse_kpti(char *str)
  798. {
  799. bool enabled;
  800. int ret = strtobool(str, &enabled);
  801. if (ret)
  802. return ret;
  803. __kpti_forced = enabled ? 1 : -1;
  804. return 0;
  805. }
  806. early_param("kpti", parse_kpti);
  807. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  808. #ifdef CONFIG_ARM64_HW_AFDBM
  809. static inline void __cpu_enable_hw_dbm(void)
  810. {
  811. u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
  812. write_sysreg(tcr, tcr_el1);
  813. isb();
  814. }
  815. static bool cpu_has_broken_dbm(void)
  816. {
  817. /* List of CPUs which have broken DBM support. */
  818. static const struct midr_range cpus[] = {
  819. #ifdef CONFIG_ARM64_ERRATUM_1024718
  820. MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
  821. #endif
  822. {},
  823. };
  824. return is_midr_in_range_list(read_cpuid_id(), cpus);
  825. }
  826. static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
  827. {
  828. return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
  829. !cpu_has_broken_dbm();
  830. }
  831. static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
  832. {
  833. if (cpu_can_use_dbm(cap))
  834. __cpu_enable_hw_dbm();
  835. }
  836. static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
  837. int __unused)
  838. {
  839. static bool detected = false;
  840. /*
  841. * DBM is a non-conflicting feature. i.e, the kernel can safely
  842. * run a mix of CPUs with and without the feature. So, we
  843. * unconditionally enable the capability to allow any late CPU
  844. * to use the feature. We only enable the control bits on the
  845. * CPU, if it actually supports.
  846. *
  847. * We have to make sure we print the "feature" detection only
  848. * when at least one CPU actually uses it. So check if this CPU
  849. * can actually use it and print the message exactly once.
  850. *
  851. * This is safe as all CPUs (including secondary CPUs - due to the
  852. * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
  853. * goes through the "matches" check exactly once. Also if a CPU
  854. * matches the criteria, it is guaranteed that the CPU will turn
  855. * the DBM on, as the capability is unconditionally enabled.
  856. */
  857. if (!detected && cpu_can_use_dbm(cap)) {
  858. detected = true;
  859. pr_info("detected: Hardware dirty bit management\n");
  860. }
  861. return true;
  862. }
  863. #endif
  864. #ifdef CONFIG_ARM64_VHE
  865. static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
  866. {
  867. return is_kernel_in_hyp_mode();
  868. }
  869. static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
  870. {
  871. /*
  872. * Copy register values that aren't redirected by hardware.
  873. *
  874. * Before code patching, we only set tpidr_el1, all CPUs need to copy
  875. * this value to tpidr_el2 before we patch the code. Once we've done
  876. * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
  877. * do anything here.
  878. */
  879. if (!alternatives_applied)
  880. write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
  881. }
  882. #endif
  883. static const struct arm64_cpu_capabilities arm64_features[] = {
  884. {
  885. .desc = "GIC system register CPU interface",
  886. .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
  887. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  888. .matches = has_useable_gicv3_cpuif,
  889. .sys_reg = SYS_ID_AA64PFR0_EL1,
  890. .field_pos = ID_AA64PFR0_GIC_SHIFT,
  891. .sign = FTR_UNSIGNED,
  892. .min_field_value = 1,
  893. },
  894. #ifdef CONFIG_ARM64_PAN
  895. {
  896. .desc = "Privileged Access Never",
  897. .capability = ARM64_HAS_PAN,
  898. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  899. .matches = has_cpuid_feature,
  900. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  901. .field_pos = ID_AA64MMFR1_PAN_SHIFT,
  902. .sign = FTR_UNSIGNED,
  903. .min_field_value = 1,
  904. .cpu_enable = cpu_enable_pan,
  905. },
  906. #endif /* CONFIG_ARM64_PAN */
  907. #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
  908. {
  909. .desc = "LSE atomic instructions",
  910. .capability = ARM64_HAS_LSE_ATOMICS,
  911. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  912. .matches = has_cpuid_feature,
  913. .sys_reg = SYS_ID_AA64ISAR0_EL1,
  914. .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
  915. .sign = FTR_UNSIGNED,
  916. .min_field_value = 2,
  917. },
  918. #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
  919. {
  920. .desc = "Software prefetching using PRFM",
  921. .capability = ARM64_HAS_NO_HW_PREFETCH,
  922. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  923. .matches = has_no_hw_prefetch,
  924. },
  925. #ifdef CONFIG_ARM64_UAO
  926. {
  927. .desc = "User Access Override",
  928. .capability = ARM64_HAS_UAO,
  929. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  930. .matches = has_cpuid_feature,
  931. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  932. .field_pos = ID_AA64MMFR2_UAO_SHIFT,
  933. .min_field_value = 1,
  934. /*
  935. * We rely on stop_machine() calling uao_thread_switch() to set
  936. * UAO immediately after patching.
  937. */
  938. },
  939. #endif /* CONFIG_ARM64_UAO */
  940. #ifdef CONFIG_ARM64_PAN
  941. {
  942. .capability = ARM64_ALT_PAN_NOT_UAO,
  943. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  944. .matches = cpufeature_pan_not_uao,
  945. },
  946. #endif /* CONFIG_ARM64_PAN */
  947. #ifdef CONFIG_ARM64_VHE
  948. {
  949. .desc = "Virtualization Host Extensions",
  950. .capability = ARM64_HAS_VIRT_HOST_EXTN,
  951. .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
  952. .matches = runs_at_el2,
  953. .cpu_enable = cpu_copy_el2regs,
  954. },
  955. #endif /* CONFIG_ARM64_VHE */
  956. {
  957. .desc = "32-bit EL0 Support",
  958. .capability = ARM64_HAS_32BIT_EL0,
  959. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  960. .matches = has_cpuid_feature,
  961. .sys_reg = SYS_ID_AA64PFR0_EL1,
  962. .sign = FTR_UNSIGNED,
  963. .field_pos = ID_AA64PFR0_EL0_SHIFT,
  964. .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
  965. },
  966. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  967. {
  968. .desc = "Kernel page table isolation (KPTI)",
  969. .capability = ARM64_UNMAP_KERNEL_AT_EL0,
  970. .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
  971. /*
  972. * The ID feature fields below are used to indicate that
  973. * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
  974. * more details.
  975. */
  976. .sys_reg = SYS_ID_AA64PFR0_EL1,
  977. .field_pos = ID_AA64PFR0_CSV3_SHIFT,
  978. .min_field_value = 1,
  979. .matches = unmap_kernel_at_el0,
  980. .cpu_enable = kpti_install_ng_mappings,
  981. },
  982. #endif
  983. {
  984. /* FP/SIMD is not implemented */
  985. .capability = ARM64_HAS_NO_FPSIMD,
  986. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  987. .min_field_value = 0,
  988. .matches = has_no_fpsimd,
  989. },
  990. #ifdef CONFIG_ARM64_PMEM
  991. {
  992. .desc = "Data cache clean to Point of Persistence",
  993. .capability = ARM64_HAS_DCPOP,
  994. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  995. .matches = has_cpuid_feature,
  996. .sys_reg = SYS_ID_AA64ISAR1_EL1,
  997. .field_pos = ID_AA64ISAR1_DPB_SHIFT,
  998. .min_field_value = 1,
  999. },
  1000. #endif
  1001. #ifdef CONFIG_ARM64_SVE
  1002. {
  1003. .desc = "Scalable Vector Extension",
  1004. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1005. .capability = ARM64_SVE,
  1006. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1007. .sign = FTR_UNSIGNED,
  1008. .field_pos = ID_AA64PFR0_SVE_SHIFT,
  1009. .min_field_value = ID_AA64PFR0_SVE,
  1010. .matches = has_cpuid_feature,
  1011. .cpu_enable = sve_kernel_enable,
  1012. },
  1013. #endif /* CONFIG_ARM64_SVE */
  1014. #ifdef CONFIG_ARM64_RAS_EXTN
  1015. {
  1016. .desc = "RAS Extension Support",
  1017. .capability = ARM64_HAS_RAS_EXTN,
  1018. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1019. .matches = has_cpuid_feature,
  1020. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1021. .sign = FTR_UNSIGNED,
  1022. .field_pos = ID_AA64PFR0_RAS_SHIFT,
  1023. .min_field_value = ID_AA64PFR0_RAS_V1,
  1024. .cpu_enable = cpu_clear_disr,
  1025. },
  1026. #endif /* CONFIG_ARM64_RAS_EXTN */
  1027. {
  1028. .desc = "Data cache clean to the PoU not required for I/D coherence",
  1029. .capability = ARM64_HAS_CACHE_IDC,
  1030. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1031. .matches = has_cache_idc,
  1032. },
  1033. {
  1034. .desc = "Instruction cache invalidation not required for I/D coherence",
  1035. .capability = ARM64_HAS_CACHE_DIC,
  1036. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1037. .matches = has_cache_dic,
  1038. },
  1039. #ifdef CONFIG_ARM64_HW_AFDBM
  1040. {
  1041. /*
  1042. * Since we turn this on always, we don't want the user to
  1043. * think that the feature is available when it may not be.
  1044. * So hide the description.
  1045. *
  1046. * .desc = "Hardware pagetable Dirty Bit Management",
  1047. *
  1048. */
  1049. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  1050. .capability = ARM64_HW_DBM,
  1051. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  1052. .sign = FTR_UNSIGNED,
  1053. .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
  1054. .min_field_value = 2,
  1055. .matches = has_hw_dbm,
  1056. .cpu_enable = cpu_enable_hw_dbm,
  1057. },
  1058. #endif
  1059. {},
  1060. };
  1061. #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
  1062. { \
  1063. .desc = #cap, \
  1064. .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
  1065. .matches = has_cpuid_feature, \
  1066. .sys_reg = reg, \
  1067. .field_pos = field, \
  1068. .sign = s, \
  1069. .min_field_value = min_value, \
  1070. .hwcap_type = cap_type, \
  1071. .hwcap = cap, \
  1072. }
  1073. static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
  1074. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
  1075. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
  1076. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
  1077. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
  1078. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
  1079. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
  1080. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
  1081. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
  1082. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
  1083. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
  1084. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
  1085. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
  1086. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
  1087. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
  1088. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
  1089. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
  1090. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
  1091. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
  1092. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
  1093. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
  1094. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
  1095. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
  1096. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
  1097. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
  1098. HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
  1099. #ifdef CONFIG_ARM64_SVE
  1100. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
  1101. #endif
  1102. {},
  1103. };
  1104. static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
  1105. #ifdef CONFIG_COMPAT
  1106. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
  1107. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
  1108. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
  1109. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
  1110. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
  1111. #endif
  1112. {},
  1113. };
  1114. static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1115. {
  1116. switch (cap->hwcap_type) {
  1117. case CAP_HWCAP:
  1118. elf_hwcap |= cap->hwcap;
  1119. break;
  1120. #ifdef CONFIG_COMPAT
  1121. case CAP_COMPAT_HWCAP:
  1122. compat_elf_hwcap |= (u32)cap->hwcap;
  1123. break;
  1124. case CAP_COMPAT_HWCAP2:
  1125. compat_elf_hwcap2 |= (u32)cap->hwcap;
  1126. break;
  1127. #endif
  1128. default:
  1129. WARN_ON(1);
  1130. break;
  1131. }
  1132. }
  1133. /* Check if we have a particular HWCAP enabled */
  1134. static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1135. {
  1136. bool rc;
  1137. switch (cap->hwcap_type) {
  1138. case CAP_HWCAP:
  1139. rc = (elf_hwcap & cap->hwcap) != 0;
  1140. break;
  1141. #ifdef CONFIG_COMPAT
  1142. case CAP_COMPAT_HWCAP:
  1143. rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
  1144. break;
  1145. case CAP_COMPAT_HWCAP2:
  1146. rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
  1147. break;
  1148. #endif
  1149. default:
  1150. WARN_ON(1);
  1151. rc = false;
  1152. }
  1153. return rc;
  1154. }
  1155. static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
  1156. {
  1157. /* We support emulation of accesses to CPU ID feature registers */
  1158. elf_hwcap |= HWCAP_CPUID;
  1159. for (; hwcaps->matches; hwcaps++)
  1160. if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
  1161. cap_set_elf_hwcap(hwcaps);
  1162. }
  1163. /*
  1164. * Check if the current CPU has a given feature capability.
  1165. * Should be called from non-preemptible context.
  1166. */
  1167. static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
  1168. unsigned int cap)
  1169. {
  1170. const struct arm64_cpu_capabilities *caps;
  1171. if (WARN_ON(preemptible()))
  1172. return false;
  1173. for (caps = cap_array; caps->matches; caps++)
  1174. if (caps->capability == cap)
  1175. return caps->matches(caps, SCOPE_LOCAL_CPU);
  1176. return false;
  1177. }
  1178. static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  1179. u16 scope_mask, const char *info)
  1180. {
  1181. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1182. for (; caps->matches; caps++) {
  1183. if (!(caps->type & scope_mask) ||
  1184. !caps->matches(caps, cpucap_default_scope(caps)))
  1185. continue;
  1186. if (!cpus_have_cap(caps->capability) && caps->desc)
  1187. pr_info("%s %s\n", info, caps->desc);
  1188. cpus_set_cap(caps->capability);
  1189. }
  1190. }
  1191. static void update_cpu_capabilities(u16 scope_mask)
  1192. {
  1193. __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
  1194. __update_cpu_capabilities(arm64_errata, scope_mask,
  1195. "enabling workaround for");
  1196. }
  1197. static int __enable_cpu_capability(void *arg)
  1198. {
  1199. const struct arm64_cpu_capabilities *cap = arg;
  1200. cap->cpu_enable(cap);
  1201. return 0;
  1202. }
  1203. /*
  1204. * Run through the enabled capabilities and enable() it on all active
  1205. * CPUs
  1206. */
  1207. static void __init
  1208. __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  1209. u16 scope_mask)
  1210. {
  1211. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1212. for (; caps->matches; caps++) {
  1213. unsigned int num = caps->capability;
  1214. if (!(caps->type & scope_mask) || !cpus_have_cap(num))
  1215. continue;
  1216. /* Ensure cpus_have_const_cap(num) works */
  1217. static_branch_enable(&cpu_hwcap_keys[num]);
  1218. if (caps->cpu_enable) {
  1219. /*
  1220. * Capabilities with SCOPE_BOOT_CPU scope are finalised
  1221. * before any secondary CPU boots. Thus, each secondary
  1222. * will enable the capability as appropriate via
  1223. * check_local_cpu_capabilities(). The only exception is
  1224. * the boot CPU, for which the capability must be
  1225. * enabled here. This approach avoids costly
  1226. * stop_machine() calls for this case.
  1227. *
  1228. * Otherwise, use stop_machine() as it schedules the
  1229. * work allowing us to modify PSTATE, instead of
  1230. * on_each_cpu() which uses an IPI, giving us a PSTATE
  1231. * that disappears when we return.
  1232. */
  1233. if (scope_mask & SCOPE_BOOT_CPU)
  1234. caps->cpu_enable(caps);
  1235. else
  1236. stop_machine(__enable_cpu_capability,
  1237. (void *)caps, cpu_online_mask);
  1238. }
  1239. }
  1240. }
  1241. static void __init enable_cpu_capabilities(u16 scope_mask)
  1242. {
  1243. __enable_cpu_capabilities(arm64_features, scope_mask);
  1244. __enable_cpu_capabilities(arm64_errata, scope_mask);
  1245. }
  1246. /*
  1247. * Run through the list of capabilities to check for conflicts.
  1248. * If the system has already detected a capability, take necessary
  1249. * action on this CPU.
  1250. *
  1251. * Returns "false" on conflicts.
  1252. */
  1253. static bool
  1254. __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
  1255. u16 scope_mask)
  1256. {
  1257. bool cpu_has_cap, system_has_cap;
  1258. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1259. for (; caps->matches; caps++) {
  1260. if (!(caps->type & scope_mask))
  1261. continue;
  1262. cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
  1263. system_has_cap = cpus_have_cap(caps->capability);
  1264. if (system_has_cap) {
  1265. /*
  1266. * Check if the new CPU misses an advertised feature,
  1267. * which is not safe to miss.
  1268. */
  1269. if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
  1270. break;
  1271. /*
  1272. * We have to issue cpu_enable() irrespective of
  1273. * whether the CPU has it or not, as it is enabeld
  1274. * system wide. It is upto the call back to take
  1275. * appropriate action on this CPU.
  1276. */
  1277. if (caps->cpu_enable)
  1278. caps->cpu_enable(caps);
  1279. } else {
  1280. /*
  1281. * Check if the CPU has this capability if it isn't
  1282. * safe to have when the system doesn't.
  1283. */
  1284. if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
  1285. break;
  1286. }
  1287. }
  1288. if (caps->matches) {
  1289. pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
  1290. smp_processor_id(), caps->capability,
  1291. caps->desc, system_has_cap, cpu_has_cap);
  1292. return false;
  1293. }
  1294. return true;
  1295. }
  1296. static bool verify_local_cpu_caps(u16 scope_mask)
  1297. {
  1298. return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
  1299. __verify_local_cpu_caps(arm64_features, scope_mask);
  1300. }
  1301. /*
  1302. * Check for CPU features that are used in early boot
  1303. * based on the Boot CPU value.
  1304. */
  1305. static void check_early_cpu_features(void)
  1306. {
  1307. verify_cpu_asid_bits();
  1308. /*
  1309. * Early features are used by the kernel already. If there
  1310. * is a conflict, we cannot proceed further.
  1311. */
  1312. if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
  1313. cpu_panic_kernel();
  1314. }
  1315. static void
  1316. verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
  1317. {
  1318. for (; caps->matches; caps++)
  1319. if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
  1320. pr_crit("CPU%d: missing HWCAP: %s\n",
  1321. smp_processor_id(), caps->desc);
  1322. cpu_die_early();
  1323. }
  1324. }
  1325. static void verify_sve_features(void)
  1326. {
  1327. u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
  1328. u64 zcr = read_zcr_features();
  1329. unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
  1330. unsigned int len = zcr & ZCR_ELx_LEN_MASK;
  1331. if (len < safe_len || sve_verify_vq_map()) {
  1332. pr_crit("CPU%d: SVE: required vector length(s) missing\n",
  1333. smp_processor_id());
  1334. cpu_die_early();
  1335. }
  1336. /* Add checks on other ZCR bits here if necessary */
  1337. }
  1338. /*
  1339. * Run through the enabled system capabilities and enable() it on this CPU.
  1340. * The capabilities were decided based on the available CPUs at the boot time.
  1341. * Any new CPU should match the system wide status of the capability. If the
  1342. * new CPU doesn't have a capability which the system now has enabled, we
  1343. * cannot do anything to fix it up and could cause unexpected failures. So
  1344. * we park the CPU.
  1345. */
  1346. static void verify_local_cpu_capabilities(void)
  1347. {
  1348. /*
  1349. * The capabilities with SCOPE_BOOT_CPU are checked from
  1350. * check_early_cpu_features(), as they need to be verified
  1351. * on all secondary CPUs.
  1352. */
  1353. if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
  1354. cpu_die_early();
  1355. verify_local_elf_hwcaps(arm64_elf_hwcaps);
  1356. if (system_supports_32bit_el0())
  1357. verify_local_elf_hwcaps(compat_elf_hwcaps);
  1358. if (system_supports_sve())
  1359. verify_sve_features();
  1360. }
  1361. void check_local_cpu_capabilities(void)
  1362. {
  1363. /*
  1364. * All secondary CPUs should conform to the early CPU features
  1365. * in use by the kernel based on boot CPU.
  1366. */
  1367. check_early_cpu_features();
  1368. /*
  1369. * If we haven't finalised the system capabilities, this CPU gets
  1370. * a chance to update the errata work arounds and local features.
  1371. * Otherwise, this CPU should verify that it has all the system
  1372. * advertised capabilities.
  1373. */
  1374. if (!sys_caps_initialised)
  1375. update_cpu_capabilities(SCOPE_LOCAL_CPU);
  1376. else
  1377. verify_local_cpu_capabilities();
  1378. }
  1379. static void __init setup_boot_cpu_capabilities(void)
  1380. {
  1381. /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
  1382. update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
  1383. /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
  1384. enable_cpu_capabilities(SCOPE_BOOT_CPU);
  1385. }
  1386. DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
  1387. EXPORT_SYMBOL(arm64_const_caps_ready);
  1388. static void __init mark_const_caps_ready(void)
  1389. {
  1390. static_branch_enable(&arm64_const_caps_ready);
  1391. }
  1392. extern const struct arm64_cpu_capabilities arm64_errata[];
  1393. bool this_cpu_has_cap(unsigned int cap)
  1394. {
  1395. return (__this_cpu_has_cap(arm64_features, cap) ||
  1396. __this_cpu_has_cap(arm64_errata, cap));
  1397. }
  1398. static void __init setup_system_capabilities(void)
  1399. {
  1400. /*
  1401. * We have finalised the system-wide safe feature
  1402. * registers, finalise the capabilities that depend
  1403. * on it. Also enable all the available capabilities,
  1404. * that are not enabled already.
  1405. */
  1406. update_cpu_capabilities(SCOPE_SYSTEM);
  1407. enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
  1408. }
  1409. void __init setup_cpu_features(void)
  1410. {
  1411. u32 cwg;
  1412. setup_system_capabilities();
  1413. mark_const_caps_ready();
  1414. setup_elf_hwcaps(arm64_elf_hwcaps);
  1415. if (system_supports_32bit_el0())
  1416. setup_elf_hwcaps(compat_elf_hwcaps);
  1417. if (system_uses_ttbr0_pan())
  1418. pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
  1419. sve_setup();
  1420. minsigstksz_setup();
  1421. /* Advertise that we have computed the system capabilities */
  1422. set_sys_caps_initialised();
  1423. /*
  1424. * Check for sane CTR_EL0.CWG value.
  1425. */
  1426. cwg = cache_type_cwg();
  1427. if (!cwg)
  1428. pr_warn("No Cache Writeback Granule information, assuming %d\n",
  1429. ARCH_DMA_MINALIGN);
  1430. }
  1431. static bool __maybe_unused
  1432. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
  1433. {
  1434. return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
  1435. }
  1436. /*
  1437. * We emulate only the following system register space.
  1438. * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
  1439. * See Table C5-6 System instruction encodings for System register accesses,
  1440. * ARMv8 ARM(ARM DDI 0487A.f) for more details.
  1441. */
  1442. static inline bool __attribute_const__ is_emulated(u32 id)
  1443. {
  1444. return (sys_reg_Op0(id) == 0x3 &&
  1445. sys_reg_CRn(id) == 0x0 &&
  1446. sys_reg_Op1(id) == 0x0 &&
  1447. (sys_reg_CRm(id) == 0 ||
  1448. ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
  1449. }
  1450. /*
  1451. * With CRm == 0, reg should be one of :
  1452. * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
  1453. */
  1454. static inline int emulate_id_reg(u32 id, u64 *valp)
  1455. {
  1456. switch (id) {
  1457. case SYS_MIDR_EL1:
  1458. *valp = read_cpuid_id();
  1459. break;
  1460. case SYS_MPIDR_EL1:
  1461. *valp = SYS_MPIDR_SAFE_VAL;
  1462. break;
  1463. case SYS_REVIDR_EL1:
  1464. /* IMPLEMENTATION DEFINED values are emulated with 0 */
  1465. *valp = 0;
  1466. break;
  1467. default:
  1468. return -EINVAL;
  1469. }
  1470. return 0;
  1471. }
  1472. static int emulate_sys_reg(u32 id, u64 *valp)
  1473. {
  1474. struct arm64_ftr_reg *regp;
  1475. if (!is_emulated(id))
  1476. return -EINVAL;
  1477. if (sys_reg_CRm(id) == 0)
  1478. return emulate_id_reg(id, valp);
  1479. regp = get_arm64_ftr_reg(id);
  1480. if (regp)
  1481. *valp = arm64_ftr_reg_user_value(regp);
  1482. else
  1483. /*
  1484. * The untracked registers are either IMPLEMENTATION DEFINED
  1485. * (e.g, ID_AFR0_EL1) or reserved RAZ.
  1486. */
  1487. *valp = 0;
  1488. return 0;
  1489. }
  1490. static int emulate_mrs(struct pt_regs *regs, u32 insn)
  1491. {
  1492. int rc;
  1493. u32 sys_reg, dst;
  1494. u64 val;
  1495. /*
  1496. * sys_reg values are defined as used in mrs/msr instruction.
  1497. * shift the imm value to get the encoding.
  1498. */
  1499. sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
  1500. rc = emulate_sys_reg(sys_reg, &val);
  1501. if (!rc) {
  1502. dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
  1503. pt_regs_write_reg(regs, dst, val);
  1504. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  1505. }
  1506. return rc;
  1507. }
  1508. static struct undef_hook mrs_hook = {
  1509. .instr_mask = 0xfff00000,
  1510. .instr_val = 0xd5300000,
  1511. .pstate_mask = COMPAT_PSR_MODE_MASK,
  1512. .pstate_val = PSR_MODE_EL0t,
  1513. .fn = emulate_mrs,
  1514. };
  1515. static int __init enable_mrs_emulation(void)
  1516. {
  1517. register_undef_hook(&mrs_hook);
  1518. return 0;
  1519. }
  1520. core_initcall(enable_mrs_emulation);
  1521. void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
  1522. {
  1523. /* Firmware may have left a deferred SError in this register. */
  1524. write_sysreg_s(0, SYS_DISR_EL1);
  1525. }