cpufeature.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891
  1. /*
  2. * Contains CPU feature definitions
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "CPU features: " fmt
  19. #include <linux/bsearch.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/sort.h>
  22. #include <linux/stop_machine.h>
  23. #include <linux/types.h>
  24. #include <linux/mm.h>
  25. #include <linux/cpu.h>
  26. #include <asm/cpu.h>
  27. #include <asm/cpufeature.h>
  28. #include <asm/cpu_ops.h>
  29. #include <asm/fpsimd.h>
  30. #include <asm/mmu_context.h>
  31. #include <asm/processor.h>
  32. #include <asm/sysreg.h>
  33. #include <asm/traps.h>
  34. #include <asm/virt.h>
  35. unsigned long elf_hwcap __read_mostly;
  36. EXPORT_SYMBOL_GPL(elf_hwcap);
  37. #ifdef CONFIG_COMPAT
  38. #define COMPAT_ELF_HWCAP_DEFAULT \
  39. (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
  40. COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
  41. COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
  42. COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
  43. COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
  44. COMPAT_HWCAP_LPAE)
  45. unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
  46. unsigned int compat_elf_hwcap2 __read_mostly;
  47. #endif
  48. DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  49. EXPORT_SYMBOL(cpu_hwcaps);
  50. /*
  51. * Flag to indicate if we have computed the system wide
  52. * capabilities based on the boot time active CPUs. This
  53. * will be used to determine if a new booting CPU should
  54. * go through the verification process to make sure that it
  55. * supports the system capabilities, without using a hotplug
  56. * notifier.
  57. */
  58. static bool sys_caps_initialised;
  59. static inline void set_sys_caps_initialised(void)
  60. {
  61. sys_caps_initialised = true;
  62. }
  63. static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
  64. {
  65. /* file-wide pr_fmt adds "CPU features: " prefix */
  66. pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
  67. return 0;
  68. }
  69. static struct notifier_block cpu_hwcaps_notifier = {
  70. .notifier_call = dump_cpu_hwcaps
  71. };
  72. static int __init register_cpu_hwcaps_dumper(void)
  73. {
  74. atomic_notifier_chain_register(&panic_notifier_list,
  75. &cpu_hwcaps_notifier);
  76. return 0;
  77. }
  78. __initcall(register_cpu_hwcaps_dumper);
  79. DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
  80. EXPORT_SYMBOL(cpu_hwcap_keys);
  81. #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  82. { \
  83. .sign = SIGNED, \
  84. .visible = VISIBLE, \
  85. .strict = STRICT, \
  86. .type = TYPE, \
  87. .shift = SHIFT, \
  88. .width = WIDTH, \
  89. .safe_val = SAFE_VAL, \
  90. }
  91. /* Define a feature with unsigned values */
  92. #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  93. __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  94. /* Define a feature with a signed value */
  95. #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  96. __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  97. #define ARM64_FTR_END \
  98. { \
  99. .width = 0, \
  100. }
  101. /* meta feature for alternatives */
  102. static bool __maybe_unused
  103. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
  104. /*
  105. * NOTE: Any changes to the visibility of features should be kept in
  106. * sync with the documentation of the CPU feature register ABI.
  107. */
  108. static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
  109. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
  110. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
  111. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
  112. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
  113. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
  114. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
  115. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
  116. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
  117. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
  118. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
  119. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
  120. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
  121. ARM64_FTR_END,
  122. };
  123. static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
  124. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
  125. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
  126. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
  127. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
  128. ARM64_FTR_END,
  129. };
  130. static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
  131. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
  132. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
  133. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
  134. ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
  135. FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
  136. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
  137. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
  138. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
  139. S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
  140. /* Linux doesn't care about the EL3 */
  141. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
  142. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
  143. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
  144. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
  145. ARM64_FTR_END,
  146. };
  147. static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
  148. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
  149. ARM64_FTR_END,
  150. };
  151. static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
  152. /*
  153. * We already refuse to boot CPUs that don't support our configured
  154. * page size, so we can only detect mismatches for a page size other
  155. * than the one we're currently using. Unfortunately, SoCs like this
  156. * exist in the wild so, even though we don't like it, we'll have to go
  157. * along with it and treat them as non-strict.
  158. */
  159. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
  160. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
  161. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
  162. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
  163. /* Linux shouldn't care about secure memory */
  164. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
  165. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
  166. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
  167. /*
  168. * Differing PARange is fine as long as all peripherals and memory are mapped
  169. * within the minimum PARange of all CPUs
  170. */
  171. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
  172. ARM64_FTR_END,
  173. };
  174. static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
  175. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
  176. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
  177. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
  178. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
  179. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
  180. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
  181. ARM64_FTR_END,
  182. };
  183. static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
  184. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
  185. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
  186. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
  187. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
  188. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
  189. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
  190. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
  191. ARM64_FTR_END,
  192. };
  193. static const struct arm64_ftr_bits ftr_ctr[] = {
  194. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
  195. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
  196. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
  197. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
  198. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
  199. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
  200. /*
  201. * Linux can handle differing I-cache policies. Userspace JITs will
  202. * make use of *minLine.
  203. * If we have differing I-cache policies, report it as the weakest - VIPT.
  204. */
  205. ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
  206. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
  207. ARM64_FTR_END,
  208. };
  209. struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
  210. .name = "SYS_CTR_EL0",
  211. .ftr_bits = ftr_ctr
  212. };
  213. static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
  214. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
  215. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
  216. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
  217. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
  218. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
  219. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
  220. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
  221. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
  222. ARM64_FTR_END,
  223. };
  224. static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
  225. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
  226. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
  227. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
  228. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
  229. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
  230. /*
  231. * We can instantiate multiple PMU instances with different levels
  232. * of support.
  233. */
  234. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
  235. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
  236. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
  237. ARM64_FTR_END,
  238. };
  239. static const struct arm64_ftr_bits ftr_mvfr2[] = {
  240. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
  241. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
  242. ARM64_FTR_END,
  243. };
  244. static const struct arm64_ftr_bits ftr_dczid[] = {
  245. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
  246. ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
  247. ARM64_FTR_END,
  248. };
  249. static const struct arm64_ftr_bits ftr_id_isar5[] = {
  250. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
  251. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
  252. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
  253. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
  254. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
  255. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
  256. ARM64_FTR_END,
  257. };
  258. static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
  259. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
  260. ARM64_FTR_END,
  261. };
  262. static const struct arm64_ftr_bits ftr_id_pfr0[] = {
  263. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
  264. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
  265. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
  266. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
  267. ARM64_FTR_END,
  268. };
  269. static const struct arm64_ftr_bits ftr_id_dfr0[] = {
  270. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  271. S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
  272. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  273. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  274. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  275. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  276. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  277. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  278. ARM64_FTR_END,
  279. };
  280. static const struct arm64_ftr_bits ftr_zcr[] = {
  281. ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
  282. ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
  283. ARM64_FTR_END,
  284. };
  285. /*
  286. * Common ftr bits for a 32bit register with all hidden, strict
  287. * attributes, with 4bit feature fields and a default safe value of
  288. * 0. Covers the following 32bit registers:
  289. * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
  290. */
  291. static const struct arm64_ftr_bits ftr_generic_32bits[] = {
  292. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  293. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
  294. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  295. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  296. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  297. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  298. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  299. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  300. ARM64_FTR_END,
  301. };
  302. /* Table for a single 32bit feature value */
  303. static const struct arm64_ftr_bits ftr_single32[] = {
  304. ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
  305. ARM64_FTR_END,
  306. };
  307. static const struct arm64_ftr_bits ftr_raz[] = {
  308. ARM64_FTR_END,
  309. };
  310. #define ARM64_FTR_REG(id, table) { \
  311. .sys_id = id, \
  312. .reg = &(struct arm64_ftr_reg){ \
  313. .name = #id, \
  314. .ftr_bits = &((table)[0]), \
  315. }}
  316. static const struct __ftr_reg_entry {
  317. u32 sys_id;
  318. struct arm64_ftr_reg *reg;
  319. } arm64_ftr_regs[] = {
  320. /* Op1 = 0, CRn = 0, CRm = 1 */
  321. ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
  322. ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
  323. ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
  324. ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
  325. ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
  326. ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
  327. ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
  328. /* Op1 = 0, CRn = 0, CRm = 2 */
  329. ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
  330. ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
  331. ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
  332. ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
  333. ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
  334. ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
  335. ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
  336. /* Op1 = 0, CRn = 0, CRm = 3 */
  337. ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
  338. ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
  339. ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
  340. /* Op1 = 0, CRn = 0, CRm = 4 */
  341. ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
  342. ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
  343. ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
  344. /* Op1 = 0, CRn = 0, CRm = 5 */
  345. ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
  346. ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
  347. /* Op1 = 0, CRn = 0, CRm = 6 */
  348. ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
  349. ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
  350. /* Op1 = 0, CRn = 0, CRm = 7 */
  351. ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
  352. ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
  353. ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
  354. /* Op1 = 0, CRn = 1, CRm = 2 */
  355. ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
  356. /* Op1 = 3, CRn = 0, CRm = 0 */
  357. { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
  358. ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
  359. /* Op1 = 3, CRn = 14, CRm = 0 */
  360. ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
  361. };
  362. static int search_cmp_ftr_reg(const void *id, const void *regp)
  363. {
  364. return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
  365. }
  366. /*
  367. * get_arm64_ftr_reg - Lookup a feature register entry using its
  368. * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
  369. * ascending order of sys_id , we use binary search to find a matching
  370. * entry.
  371. *
  372. * returns - Upon success, matching ftr_reg entry for id.
  373. * - NULL on failure. It is upto the caller to decide
  374. * the impact of a failure.
  375. */
  376. static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
  377. {
  378. const struct __ftr_reg_entry *ret;
  379. ret = bsearch((const void *)(unsigned long)sys_id,
  380. arm64_ftr_regs,
  381. ARRAY_SIZE(arm64_ftr_regs),
  382. sizeof(arm64_ftr_regs[0]),
  383. search_cmp_ftr_reg);
  384. if (ret)
  385. return ret->reg;
  386. return NULL;
  387. }
  388. static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
  389. s64 ftr_val)
  390. {
  391. u64 mask = arm64_ftr_mask(ftrp);
  392. reg &= ~mask;
  393. reg |= (ftr_val << ftrp->shift) & mask;
  394. return reg;
  395. }
  396. static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
  397. s64 cur)
  398. {
  399. s64 ret = 0;
  400. switch (ftrp->type) {
  401. case FTR_EXACT:
  402. ret = ftrp->safe_val;
  403. break;
  404. case FTR_LOWER_SAFE:
  405. ret = new < cur ? new : cur;
  406. break;
  407. case FTR_HIGHER_OR_ZERO_SAFE:
  408. if (!cur || !new)
  409. break;
  410. /* Fallthrough */
  411. case FTR_HIGHER_SAFE:
  412. ret = new > cur ? new : cur;
  413. break;
  414. default:
  415. BUG();
  416. }
  417. return ret;
  418. }
  419. static void __init sort_ftr_regs(void)
  420. {
  421. int i;
  422. /* Check that the array is sorted so that we can do the binary search */
  423. for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
  424. BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
  425. }
  426. /*
  427. * Initialise the CPU feature register from Boot CPU values.
  428. * Also initiliases the strict_mask for the register.
  429. * Any bits that are not covered by an arm64_ftr_bits entry are considered
  430. * RES0 for the system-wide value, and must strictly match.
  431. */
  432. static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
  433. {
  434. u64 val = 0;
  435. u64 strict_mask = ~0x0ULL;
  436. u64 user_mask = 0;
  437. u64 valid_mask = 0;
  438. const struct arm64_ftr_bits *ftrp;
  439. struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
  440. BUG_ON(!reg);
  441. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  442. u64 ftr_mask = arm64_ftr_mask(ftrp);
  443. s64 ftr_new = arm64_ftr_value(ftrp, new);
  444. val = arm64_ftr_set_value(ftrp, val, ftr_new);
  445. valid_mask |= ftr_mask;
  446. if (!ftrp->strict)
  447. strict_mask &= ~ftr_mask;
  448. if (ftrp->visible)
  449. user_mask |= ftr_mask;
  450. else
  451. reg->user_val = arm64_ftr_set_value(ftrp,
  452. reg->user_val,
  453. ftrp->safe_val);
  454. }
  455. val &= valid_mask;
  456. reg->sys_val = val;
  457. reg->strict_mask = strict_mask;
  458. reg->user_mask = user_mask;
  459. }
  460. extern const struct arm64_cpu_capabilities arm64_errata[];
  461. static void __init setup_boot_cpu_capabilities(void);
  462. void __init init_cpu_features(struct cpuinfo_arm64 *info)
  463. {
  464. /* Before we start using the tables, make sure it is sorted */
  465. sort_ftr_regs();
  466. init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
  467. init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
  468. init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
  469. init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
  470. init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
  471. init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
  472. init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
  473. init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
  474. init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
  475. init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
  476. init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
  477. init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
  478. init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
  479. if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  480. init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
  481. init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
  482. init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
  483. init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
  484. init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
  485. init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
  486. init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
  487. init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
  488. init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
  489. init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
  490. init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
  491. init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
  492. init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
  493. init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
  494. init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
  495. init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
  496. }
  497. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  498. init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
  499. sve_init_vq_map();
  500. }
  501. /*
  502. * Detect and enable early CPU capabilities based on the boot CPU,
  503. * after we have initialised the CPU feature infrastructure.
  504. */
  505. setup_boot_cpu_capabilities();
  506. }
  507. static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
  508. {
  509. const struct arm64_ftr_bits *ftrp;
  510. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  511. s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
  512. s64 ftr_new = arm64_ftr_value(ftrp, new);
  513. if (ftr_cur == ftr_new)
  514. continue;
  515. /* Find a safe value */
  516. ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
  517. reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
  518. }
  519. }
  520. static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
  521. {
  522. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
  523. BUG_ON(!regp);
  524. update_cpu_ftr_reg(regp, val);
  525. if ((boot & regp->strict_mask) == (val & regp->strict_mask))
  526. return 0;
  527. pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
  528. regp->name, boot, cpu, val);
  529. return 1;
  530. }
  531. /*
  532. * Update system wide CPU feature registers with the values from a
  533. * non-boot CPU. Also performs SANITY checks to make sure that there
  534. * aren't any insane variations from that of the boot CPU.
  535. */
  536. void update_cpu_features(int cpu,
  537. struct cpuinfo_arm64 *info,
  538. struct cpuinfo_arm64 *boot)
  539. {
  540. int taint = 0;
  541. /*
  542. * The kernel can handle differing I-cache policies, but otherwise
  543. * caches should look identical. Userspace JITs will make use of
  544. * *minLine.
  545. */
  546. taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
  547. info->reg_ctr, boot->reg_ctr);
  548. /*
  549. * Userspace may perform DC ZVA instructions. Mismatched block sizes
  550. * could result in too much or too little memory being zeroed if a
  551. * process is preempted and migrated between CPUs.
  552. */
  553. taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
  554. info->reg_dczid, boot->reg_dczid);
  555. /* If different, timekeeping will be broken (especially with KVM) */
  556. taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
  557. info->reg_cntfrq, boot->reg_cntfrq);
  558. /*
  559. * The kernel uses self-hosted debug features and expects CPUs to
  560. * support identical debug features. We presently need CTX_CMPs, WRPs,
  561. * and BRPs to be identical.
  562. * ID_AA64DFR1 is currently RES0.
  563. */
  564. taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
  565. info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
  566. taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
  567. info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
  568. /*
  569. * Even in big.LITTLE, processors should be identical instruction-set
  570. * wise.
  571. */
  572. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
  573. info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
  574. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
  575. info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
  576. /*
  577. * Differing PARange support is fine as long as all peripherals and
  578. * memory are mapped within the minimum PARange of all CPUs.
  579. * Linux should not care about secure memory.
  580. */
  581. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
  582. info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
  583. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
  584. info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
  585. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
  586. info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
  587. /*
  588. * EL3 is not our concern.
  589. */
  590. taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
  591. info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
  592. taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
  593. info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
  594. taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
  595. info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
  596. /*
  597. * If we have AArch32, we care about 32-bit features for compat.
  598. * If the system doesn't support AArch32, don't update them.
  599. */
  600. if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  601. id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  602. taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
  603. info->reg_id_dfr0, boot->reg_id_dfr0);
  604. taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
  605. info->reg_id_isar0, boot->reg_id_isar0);
  606. taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
  607. info->reg_id_isar1, boot->reg_id_isar1);
  608. taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
  609. info->reg_id_isar2, boot->reg_id_isar2);
  610. taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
  611. info->reg_id_isar3, boot->reg_id_isar3);
  612. taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
  613. info->reg_id_isar4, boot->reg_id_isar4);
  614. taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
  615. info->reg_id_isar5, boot->reg_id_isar5);
  616. /*
  617. * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
  618. * ACTLR formats could differ across CPUs and therefore would have to
  619. * be trapped for virtualization anyway.
  620. */
  621. taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
  622. info->reg_id_mmfr0, boot->reg_id_mmfr0);
  623. taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
  624. info->reg_id_mmfr1, boot->reg_id_mmfr1);
  625. taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
  626. info->reg_id_mmfr2, boot->reg_id_mmfr2);
  627. taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
  628. info->reg_id_mmfr3, boot->reg_id_mmfr3);
  629. taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
  630. info->reg_id_pfr0, boot->reg_id_pfr0);
  631. taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
  632. info->reg_id_pfr1, boot->reg_id_pfr1);
  633. taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
  634. info->reg_mvfr0, boot->reg_mvfr0);
  635. taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
  636. info->reg_mvfr1, boot->reg_mvfr1);
  637. taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
  638. info->reg_mvfr2, boot->reg_mvfr2);
  639. }
  640. if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
  641. taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
  642. info->reg_zcr, boot->reg_zcr);
  643. /* Probe vector lengths, unless we already gave up on SVE */
  644. if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
  645. !sys_caps_initialised)
  646. sve_update_vq_map();
  647. }
  648. /*
  649. * Mismatched CPU features are a recipe for disaster. Don't even
  650. * pretend to support them.
  651. */
  652. if (taint) {
  653. pr_warn_once("Unsupported CPU feature variation detected.\n");
  654. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  655. }
  656. }
  657. u64 read_sanitised_ftr_reg(u32 id)
  658. {
  659. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
  660. /* We shouldn't get a request for an unsupported register */
  661. BUG_ON(!regp);
  662. return regp->sys_val;
  663. }
  664. #define read_sysreg_case(r) \
  665. case r: return read_sysreg_s(r)
  666. /*
  667. * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
  668. * Read the system register on the current CPU
  669. */
  670. static u64 __read_sysreg_by_encoding(u32 sys_id)
  671. {
  672. switch (sys_id) {
  673. read_sysreg_case(SYS_ID_PFR0_EL1);
  674. read_sysreg_case(SYS_ID_PFR1_EL1);
  675. read_sysreg_case(SYS_ID_DFR0_EL1);
  676. read_sysreg_case(SYS_ID_MMFR0_EL1);
  677. read_sysreg_case(SYS_ID_MMFR1_EL1);
  678. read_sysreg_case(SYS_ID_MMFR2_EL1);
  679. read_sysreg_case(SYS_ID_MMFR3_EL1);
  680. read_sysreg_case(SYS_ID_ISAR0_EL1);
  681. read_sysreg_case(SYS_ID_ISAR1_EL1);
  682. read_sysreg_case(SYS_ID_ISAR2_EL1);
  683. read_sysreg_case(SYS_ID_ISAR3_EL1);
  684. read_sysreg_case(SYS_ID_ISAR4_EL1);
  685. read_sysreg_case(SYS_ID_ISAR5_EL1);
  686. read_sysreg_case(SYS_MVFR0_EL1);
  687. read_sysreg_case(SYS_MVFR1_EL1);
  688. read_sysreg_case(SYS_MVFR2_EL1);
  689. read_sysreg_case(SYS_ID_AA64PFR0_EL1);
  690. read_sysreg_case(SYS_ID_AA64PFR1_EL1);
  691. read_sysreg_case(SYS_ID_AA64DFR0_EL1);
  692. read_sysreg_case(SYS_ID_AA64DFR1_EL1);
  693. read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
  694. read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
  695. read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
  696. read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
  697. read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
  698. read_sysreg_case(SYS_CNTFRQ_EL0);
  699. read_sysreg_case(SYS_CTR_EL0);
  700. read_sysreg_case(SYS_DCZID_EL0);
  701. default:
  702. BUG();
  703. return 0;
  704. }
  705. }
  706. #include <linux/irqchip/arm-gic-v3.h>
  707. static bool
  708. feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
  709. {
  710. int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
  711. return val >= entry->min_field_value;
  712. }
  713. static bool
  714. has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
  715. {
  716. u64 val;
  717. WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
  718. if (scope == SCOPE_SYSTEM)
  719. val = read_sanitised_ftr_reg(entry->sys_reg);
  720. else
  721. val = __read_sysreg_by_encoding(entry->sys_reg);
  722. return feature_matches(val, entry);
  723. }
  724. static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
  725. {
  726. bool has_sre;
  727. if (!has_cpuid_feature(entry, scope))
  728. return false;
  729. has_sre = gic_enable_sre();
  730. if (!has_sre)
  731. pr_warn_once("%s present but disabled by higher exception level\n",
  732. entry->desc);
  733. return has_sre;
  734. }
  735. static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
  736. {
  737. u32 midr = read_cpuid_id();
  738. /* Cavium ThunderX pass 1.x and 2.x */
  739. return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
  740. MIDR_CPU_VAR_REV(0, 0),
  741. MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
  742. }
  743. static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
  744. {
  745. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  746. return cpuid_feature_extract_signed_field(pfr0,
  747. ID_AA64PFR0_FP_SHIFT) < 0;
  748. }
  749. static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
  750. int scope)
  751. {
  752. u64 ctr;
  753. if (scope == SCOPE_SYSTEM)
  754. ctr = arm64_ftr_reg_ctrel0.sys_val;
  755. else
  756. ctr = read_cpuid_cachetype();
  757. return ctr & BIT(CTR_IDC_SHIFT);
  758. }
  759. static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
  760. int scope)
  761. {
  762. u64 ctr;
  763. if (scope == SCOPE_SYSTEM)
  764. ctr = arm64_ftr_reg_ctrel0.sys_val;
  765. else
  766. ctr = read_cpuid_cachetype();
  767. return ctr & BIT(CTR_DIC_SHIFT);
  768. }
  769. static bool __meltdown_safe = true;
  770. static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
  771. static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
  772. int scope)
  773. {
  774. /* List of CPUs that are not vulnerable and don't need KPTI */
  775. static const struct midr_range kpti_safe_list[] = {
  776. MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
  777. MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
  778. MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
  779. MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
  780. MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
  781. MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
  782. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  783. MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
  784. { /* sentinel */ }
  785. };
  786. char const *str = "kpti command line option";
  787. bool meltdown_safe;
  788. meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
  789. /* Defer to CPU feature registers */
  790. if (has_cpuid_feature(entry, scope))
  791. meltdown_safe = true;
  792. if (!meltdown_safe)
  793. __meltdown_safe = false;
  794. /*
  795. * For reasons that aren't entirely clear, enabling KPTI on Cavium
  796. * ThunderX leads to apparent I-cache corruption of kernel text, which
  797. * ends as well as you might imagine. Don't even try.
  798. */
  799. if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
  800. str = "ARM64_WORKAROUND_CAVIUM_27456";
  801. __kpti_forced = -1;
  802. }
  803. /* Useful for KASLR robustness */
  804. if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
  805. if (!__kpti_forced) {
  806. str = "KASLR";
  807. __kpti_forced = 1;
  808. }
  809. }
  810. if (cpu_mitigations_off() && !__kpti_forced) {
  811. str = "mitigations=off";
  812. __kpti_forced = -1;
  813. }
  814. if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
  815. pr_info_once("kernel page table isolation disabled by kernel configuration\n");
  816. return false;
  817. }
  818. /* Forced? */
  819. if (__kpti_forced) {
  820. pr_info_once("kernel page table isolation forced %s by %s\n",
  821. __kpti_forced > 0 ? "ON" : "OFF", str);
  822. return __kpti_forced > 0;
  823. }
  824. return !meltdown_safe;
  825. }
  826. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  827. static void
  828. kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
  829. {
  830. typedef void (kpti_remap_fn)(int, int, phys_addr_t);
  831. extern kpti_remap_fn idmap_kpti_install_ng_mappings;
  832. kpti_remap_fn *remap_fn;
  833. static bool kpti_applied = false;
  834. int cpu = smp_processor_id();
  835. if (kpti_applied)
  836. return;
  837. remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
  838. cpu_install_idmap();
  839. remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
  840. cpu_uninstall_idmap();
  841. if (!cpu)
  842. kpti_applied = true;
  843. return;
  844. }
  845. #else
  846. static void
  847. kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
  848. {
  849. }
  850. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  851. static int __init parse_kpti(char *str)
  852. {
  853. bool enabled;
  854. int ret = strtobool(str, &enabled);
  855. if (ret)
  856. return ret;
  857. __kpti_forced = enabled ? 1 : -1;
  858. return 0;
  859. }
  860. early_param("kpti", parse_kpti);
  861. #ifdef CONFIG_ARM64_HW_AFDBM
  862. static inline void __cpu_enable_hw_dbm(void)
  863. {
  864. u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
  865. write_sysreg(tcr, tcr_el1);
  866. isb();
  867. }
  868. static bool cpu_has_broken_dbm(void)
  869. {
  870. /* List of CPUs which have broken DBM support. */
  871. static const struct midr_range cpus[] = {
  872. #ifdef CONFIG_ARM64_ERRATUM_1024718
  873. MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
  874. #endif
  875. {},
  876. };
  877. return is_midr_in_range_list(read_cpuid_id(), cpus);
  878. }
  879. static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
  880. {
  881. return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
  882. !cpu_has_broken_dbm();
  883. }
  884. static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
  885. {
  886. if (cpu_can_use_dbm(cap))
  887. __cpu_enable_hw_dbm();
  888. }
  889. static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
  890. int __unused)
  891. {
  892. static bool detected = false;
  893. /*
  894. * DBM is a non-conflicting feature. i.e, the kernel can safely
  895. * run a mix of CPUs with and without the feature. So, we
  896. * unconditionally enable the capability to allow any late CPU
  897. * to use the feature. We only enable the control bits on the
  898. * CPU, if it actually supports.
  899. *
  900. * We have to make sure we print the "feature" detection only
  901. * when at least one CPU actually uses it. So check if this CPU
  902. * can actually use it and print the message exactly once.
  903. *
  904. * This is safe as all CPUs (including secondary CPUs - due to the
  905. * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
  906. * goes through the "matches" check exactly once. Also if a CPU
  907. * matches the criteria, it is guaranteed that the CPU will turn
  908. * the DBM on, as the capability is unconditionally enabled.
  909. */
  910. if (!detected && cpu_can_use_dbm(cap)) {
  911. detected = true;
  912. pr_info("detected: Hardware dirty bit management\n");
  913. }
  914. return true;
  915. }
  916. #endif
  917. #ifdef CONFIG_ARM64_VHE
  918. static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
  919. {
  920. return is_kernel_in_hyp_mode();
  921. }
  922. static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
  923. {
  924. /*
  925. * Copy register values that aren't redirected by hardware.
  926. *
  927. * Before code patching, we only set tpidr_el1, all CPUs need to copy
  928. * this value to tpidr_el2 before we patch the code. Once we've done
  929. * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
  930. * do anything here.
  931. */
  932. if (!alternatives_applied)
  933. write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
  934. }
  935. #endif
  936. static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
  937. {
  938. u64 val = read_sysreg_s(SYS_CLIDR_EL1);
  939. /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
  940. WARN_ON(val & (7 << 27 | 7 << 21));
  941. }
  942. #ifdef CONFIG_ARM64_SSBD
  943. static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
  944. {
  945. if (user_mode(regs))
  946. return 1;
  947. if (instr & BIT(CRm_shift))
  948. regs->pstate |= PSR_SSBS_BIT;
  949. else
  950. regs->pstate &= ~PSR_SSBS_BIT;
  951. arm64_skip_faulting_instruction(regs, 4);
  952. return 0;
  953. }
  954. static struct undef_hook ssbs_emulation_hook = {
  955. .instr_mask = ~(1U << CRm_shift),
  956. .instr_val = 0xd500001f | REG_PSTATE_SSBS_IMM,
  957. .fn = ssbs_emulation_handler,
  958. };
  959. static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
  960. {
  961. static bool undef_hook_registered = false;
  962. static DEFINE_SPINLOCK(hook_lock);
  963. spin_lock(&hook_lock);
  964. if (!undef_hook_registered) {
  965. register_undef_hook(&ssbs_emulation_hook);
  966. undef_hook_registered = true;
  967. }
  968. spin_unlock(&hook_lock);
  969. if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
  970. sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
  971. arm64_set_ssbd_mitigation(false);
  972. } else {
  973. arm64_set_ssbd_mitigation(true);
  974. }
  975. }
  976. #endif /* CONFIG_ARM64_SSBD */
  977. static const struct arm64_cpu_capabilities arm64_features[] = {
  978. {
  979. .desc = "GIC system register CPU interface",
  980. .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
  981. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  982. .matches = has_useable_gicv3_cpuif,
  983. .sys_reg = SYS_ID_AA64PFR0_EL1,
  984. .field_pos = ID_AA64PFR0_GIC_SHIFT,
  985. .sign = FTR_UNSIGNED,
  986. .min_field_value = 1,
  987. },
  988. #ifdef CONFIG_ARM64_PAN
  989. {
  990. .desc = "Privileged Access Never",
  991. .capability = ARM64_HAS_PAN,
  992. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  993. .matches = has_cpuid_feature,
  994. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  995. .field_pos = ID_AA64MMFR1_PAN_SHIFT,
  996. .sign = FTR_UNSIGNED,
  997. .min_field_value = 1,
  998. .cpu_enable = cpu_enable_pan,
  999. },
  1000. #endif /* CONFIG_ARM64_PAN */
  1001. #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
  1002. {
  1003. .desc = "LSE atomic instructions",
  1004. .capability = ARM64_HAS_LSE_ATOMICS,
  1005. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1006. .matches = has_cpuid_feature,
  1007. .sys_reg = SYS_ID_AA64ISAR0_EL1,
  1008. .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
  1009. .sign = FTR_UNSIGNED,
  1010. .min_field_value = 2,
  1011. },
  1012. #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
  1013. {
  1014. .desc = "Software prefetching using PRFM",
  1015. .capability = ARM64_HAS_NO_HW_PREFETCH,
  1016. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  1017. .matches = has_no_hw_prefetch,
  1018. },
  1019. #ifdef CONFIG_ARM64_UAO
  1020. {
  1021. .desc = "User Access Override",
  1022. .capability = ARM64_HAS_UAO,
  1023. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1024. .matches = has_cpuid_feature,
  1025. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  1026. .field_pos = ID_AA64MMFR2_UAO_SHIFT,
  1027. .min_field_value = 1,
  1028. /*
  1029. * We rely on stop_machine() calling uao_thread_switch() to set
  1030. * UAO immediately after patching.
  1031. */
  1032. },
  1033. #endif /* CONFIG_ARM64_UAO */
  1034. #ifdef CONFIG_ARM64_PAN
  1035. {
  1036. .capability = ARM64_ALT_PAN_NOT_UAO,
  1037. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1038. .matches = cpufeature_pan_not_uao,
  1039. },
  1040. #endif /* CONFIG_ARM64_PAN */
  1041. #ifdef CONFIG_ARM64_VHE
  1042. {
  1043. .desc = "Virtualization Host Extensions",
  1044. .capability = ARM64_HAS_VIRT_HOST_EXTN,
  1045. .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
  1046. .matches = runs_at_el2,
  1047. .cpu_enable = cpu_copy_el2regs,
  1048. },
  1049. #endif /* CONFIG_ARM64_VHE */
  1050. {
  1051. .desc = "32-bit EL0 Support",
  1052. .capability = ARM64_HAS_32BIT_EL0,
  1053. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1054. .matches = has_cpuid_feature,
  1055. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1056. .sign = FTR_UNSIGNED,
  1057. .field_pos = ID_AA64PFR0_EL0_SHIFT,
  1058. .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
  1059. },
  1060. {
  1061. .desc = "Kernel page table isolation (KPTI)",
  1062. .capability = ARM64_UNMAP_KERNEL_AT_EL0,
  1063. .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
  1064. /*
  1065. * The ID feature fields below are used to indicate that
  1066. * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
  1067. * more details.
  1068. */
  1069. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1070. .field_pos = ID_AA64PFR0_CSV3_SHIFT,
  1071. .min_field_value = 1,
  1072. .matches = unmap_kernel_at_el0,
  1073. .cpu_enable = kpti_install_ng_mappings,
  1074. },
  1075. {
  1076. /* FP/SIMD is not implemented */
  1077. .capability = ARM64_HAS_NO_FPSIMD,
  1078. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1079. .min_field_value = 0,
  1080. .matches = has_no_fpsimd,
  1081. },
  1082. #ifdef CONFIG_ARM64_PMEM
  1083. {
  1084. .desc = "Data cache clean to Point of Persistence",
  1085. .capability = ARM64_HAS_DCPOP,
  1086. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1087. .matches = has_cpuid_feature,
  1088. .sys_reg = SYS_ID_AA64ISAR1_EL1,
  1089. .field_pos = ID_AA64ISAR1_DPB_SHIFT,
  1090. .min_field_value = 1,
  1091. },
  1092. #endif
  1093. #ifdef CONFIG_ARM64_SVE
  1094. {
  1095. .desc = "Scalable Vector Extension",
  1096. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1097. .capability = ARM64_SVE,
  1098. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1099. .sign = FTR_UNSIGNED,
  1100. .field_pos = ID_AA64PFR0_SVE_SHIFT,
  1101. .min_field_value = ID_AA64PFR0_SVE,
  1102. .matches = has_cpuid_feature,
  1103. .cpu_enable = sve_kernel_enable,
  1104. },
  1105. #endif /* CONFIG_ARM64_SVE */
  1106. #ifdef CONFIG_ARM64_RAS_EXTN
  1107. {
  1108. .desc = "RAS Extension Support",
  1109. .capability = ARM64_HAS_RAS_EXTN,
  1110. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1111. .matches = has_cpuid_feature,
  1112. .sys_reg = SYS_ID_AA64PFR0_EL1,
  1113. .sign = FTR_UNSIGNED,
  1114. .field_pos = ID_AA64PFR0_RAS_SHIFT,
  1115. .min_field_value = ID_AA64PFR0_RAS_V1,
  1116. .cpu_enable = cpu_clear_disr,
  1117. },
  1118. #endif /* CONFIG_ARM64_RAS_EXTN */
  1119. {
  1120. .desc = "Data cache clean to the PoU not required for I/D coherence",
  1121. .capability = ARM64_HAS_CACHE_IDC,
  1122. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1123. .matches = has_cache_idc,
  1124. },
  1125. {
  1126. .desc = "Instruction cache invalidation not required for I/D coherence",
  1127. .capability = ARM64_HAS_CACHE_DIC,
  1128. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1129. .matches = has_cache_dic,
  1130. },
  1131. {
  1132. .desc = "Stage-2 Force Write-Back",
  1133. .type = ARM64_CPUCAP_SYSTEM_FEATURE,
  1134. .capability = ARM64_HAS_STAGE2_FWB,
  1135. .sys_reg = SYS_ID_AA64MMFR2_EL1,
  1136. .sign = FTR_UNSIGNED,
  1137. .field_pos = ID_AA64MMFR2_FWB_SHIFT,
  1138. .min_field_value = 1,
  1139. .matches = has_cpuid_feature,
  1140. .cpu_enable = cpu_has_fwb,
  1141. },
  1142. #ifdef CONFIG_ARM64_HW_AFDBM
  1143. {
  1144. /*
  1145. * Since we turn this on always, we don't want the user to
  1146. * think that the feature is available when it may not be.
  1147. * So hide the description.
  1148. *
  1149. * .desc = "Hardware pagetable Dirty Bit Management",
  1150. *
  1151. */
  1152. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  1153. .capability = ARM64_HW_DBM,
  1154. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  1155. .sign = FTR_UNSIGNED,
  1156. .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
  1157. .min_field_value = 2,
  1158. .matches = has_hw_dbm,
  1159. .cpu_enable = cpu_enable_hw_dbm,
  1160. },
  1161. #endif
  1162. #ifdef CONFIG_ARM64_SSBD
  1163. {
  1164. .desc = "Speculative Store Bypassing Safe (SSBS)",
  1165. .capability = ARM64_SSBS,
  1166. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  1167. .matches = has_cpuid_feature,
  1168. .sys_reg = SYS_ID_AA64PFR1_EL1,
  1169. .field_pos = ID_AA64PFR1_SSBS_SHIFT,
  1170. .sign = FTR_UNSIGNED,
  1171. .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
  1172. .cpu_enable = cpu_enable_ssbs,
  1173. },
  1174. #endif
  1175. {},
  1176. };
  1177. #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
  1178. { \
  1179. .desc = #cap, \
  1180. .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
  1181. .matches = has_cpuid_feature, \
  1182. .sys_reg = reg, \
  1183. .field_pos = field, \
  1184. .sign = s, \
  1185. .min_field_value = min_value, \
  1186. .hwcap_type = cap_type, \
  1187. .hwcap = cap, \
  1188. }
  1189. static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
  1190. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
  1191. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
  1192. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
  1193. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
  1194. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
  1195. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
  1196. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
  1197. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
  1198. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
  1199. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
  1200. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
  1201. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
  1202. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
  1203. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
  1204. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
  1205. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
  1206. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
  1207. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
  1208. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
  1209. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
  1210. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
  1211. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
  1212. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
  1213. HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
  1214. HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
  1215. #ifdef CONFIG_ARM64_SVE
  1216. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
  1217. #endif
  1218. HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
  1219. {},
  1220. };
  1221. static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
  1222. #ifdef CONFIG_COMPAT
  1223. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
  1224. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
  1225. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
  1226. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
  1227. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
  1228. #endif
  1229. {},
  1230. };
  1231. static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1232. {
  1233. switch (cap->hwcap_type) {
  1234. case CAP_HWCAP:
  1235. elf_hwcap |= cap->hwcap;
  1236. break;
  1237. #ifdef CONFIG_COMPAT
  1238. case CAP_COMPAT_HWCAP:
  1239. compat_elf_hwcap |= (u32)cap->hwcap;
  1240. break;
  1241. case CAP_COMPAT_HWCAP2:
  1242. compat_elf_hwcap2 |= (u32)cap->hwcap;
  1243. break;
  1244. #endif
  1245. default:
  1246. WARN_ON(1);
  1247. break;
  1248. }
  1249. }
  1250. /* Check if we have a particular HWCAP enabled */
  1251. static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
  1252. {
  1253. bool rc;
  1254. switch (cap->hwcap_type) {
  1255. case CAP_HWCAP:
  1256. rc = (elf_hwcap & cap->hwcap) != 0;
  1257. break;
  1258. #ifdef CONFIG_COMPAT
  1259. case CAP_COMPAT_HWCAP:
  1260. rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
  1261. break;
  1262. case CAP_COMPAT_HWCAP2:
  1263. rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
  1264. break;
  1265. #endif
  1266. default:
  1267. WARN_ON(1);
  1268. rc = false;
  1269. }
  1270. return rc;
  1271. }
  1272. static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
  1273. {
  1274. /* We support emulation of accesses to CPU ID feature registers */
  1275. elf_hwcap |= HWCAP_CPUID;
  1276. for (; hwcaps->matches; hwcaps++)
  1277. if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
  1278. cap_set_elf_hwcap(hwcaps);
  1279. }
  1280. /*
  1281. * Check if the current CPU has a given feature capability.
  1282. * Should be called from non-preemptible context.
  1283. */
  1284. static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
  1285. unsigned int cap)
  1286. {
  1287. const struct arm64_cpu_capabilities *caps;
  1288. if (WARN_ON(preemptible()))
  1289. return false;
  1290. for (caps = cap_array; caps->matches; caps++)
  1291. if (caps->capability == cap)
  1292. return caps->matches(caps, SCOPE_LOCAL_CPU);
  1293. return false;
  1294. }
  1295. static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  1296. u16 scope_mask, const char *info)
  1297. {
  1298. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1299. for (; caps->matches; caps++) {
  1300. if (!(caps->type & scope_mask) ||
  1301. !caps->matches(caps, cpucap_default_scope(caps)))
  1302. continue;
  1303. if (!cpus_have_cap(caps->capability) && caps->desc)
  1304. pr_info("%s %s\n", info, caps->desc);
  1305. cpus_set_cap(caps->capability);
  1306. }
  1307. }
  1308. static void update_cpu_capabilities(u16 scope_mask)
  1309. {
  1310. __update_cpu_capabilities(arm64_errata, scope_mask,
  1311. "enabling workaround for");
  1312. __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
  1313. }
  1314. static int __enable_cpu_capability(void *arg)
  1315. {
  1316. const struct arm64_cpu_capabilities *cap = arg;
  1317. cap->cpu_enable(cap);
  1318. return 0;
  1319. }
  1320. /*
  1321. * Run through the enabled capabilities and enable() it on all active
  1322. * CPUs
  1323. */
  1324. static void __init
  1325. __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  1326. u16 scope_mask)
  1327. {
  1328. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1329. for (; caps->matches; caps++) {
  1330. unsigned int num = caps->capability;
  1331. if (!(caps->type & scope_mask) || !cpus_have_cap(num))
  1332. continue;
  1333. /* Ensure cpus_have_const_cap(num) works */
  1334. static_branch_enable(&cpu_hwcap_keys[num]);
  1335. if (caps->cpu_enable) {
  1336. /*
  1337. * Capabilities with SCOPE_BOOT_CPU scope are finalised
  1338. * before any secondary CPU boots. Thus, each secondary
  1339. * will enable the capability as appropriate via
  1340. * check_local_cpu_capabilities(). The only exception is
  1341. * the boot CPU, for which the capability must be
  1342. * enabled here. This approach avoids costly
  1343. * stop_machine() calls for this case.
  1344. *
  1345. * Otherwise, use stop_machine() as it schedules the
  1346. * work allowing us to modify PSTATE, instead of
  1347. * on_each_cpu() which uses an IPI, giving us a PSTATE
  1348. * that disappears when we return.
  1349. */
  1350. if (scope_mask & SCOPE_BOOT_CPU)
  1351. caps->cpu_enable(caps);
  1352. else
  1353. stop_machine(__enable_cpu_capability,
  1354. (void *)caps, cpu_online_mask);
  1355. }
  1356. }
  1357. }
  1358. static void __init enable_cpu_capabilities(u16 scope_mask)
  1359. {
  1360. __enable_cpu_capabilities(arm64_errata, scope_mask);
  1361. __enable_cpu_capabilities(arm64_features, scope_mask);
  1362. }
  1363. /*
  1364. * Run through the list of capabilities to check for conflicts.
  1365. * If the system has already detected a capability, take necessary
  1366. * action on this CPU.
  1367. *
  1368. * Returns "false" on conflicts.
  1369. */
  1370. static bool
  1371. __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
  1372. u16 scope_mask)
  1373. {
  1374. bool cpu_has_cap, system_has_cap;
  1375. scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
  1376. for (; caps->matches; caps++) {
  1377. if (!(caps->type & scope_mask))
  1378. continue;
  1379. cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
  1380. system_has_cap = cpus_have_cap(caps->capability);
  1381. if (system_has_cap) {
  1382. /*
  1383. * Check if the new CPU misses an advertised feature,
  1384. * which is not safe to miss.
  1385. */
  1386. if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
  1387. break;
  1388. /*
  1389. * We have to issue cpu_enable() irrespective of
  1390. * whether the CPU has it or not, as it is enabeld
  1391. * system wide. It is upto the call back to take
  1392. * appropriate action on this CPU.
  1393. */
  1394. if (caps->cpu_enable)
  1395. caps->cpu_enable(caps);
  1396. } else {
  1397. /*
  1398. * Check if the CPU has this capability if it isn't
  1399. * safe to have when the system doesn't.
  1400. */
  1401. if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
  1402. break;
  1403. }
  1404. }
  1405. if (caps->matches) {
  1406. pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
  1407. smp_processor_id(), caps->capability,
  1408. caps->desc, system_has_cap, cpu_has_cap);
  1409. return false;
  1410. }
  1411. return true;
  1412. }
  1413. static bool verify_local_cpu_caps(u16 scope_mask)
  1414. {
  1415. return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
  1416. __verify_local_cpu_caps(arm64_features, scope_mask);
  1417. }
  1418. /*
  1419. * Check for CPU features that are used in early boot
  1420. * based on the Boot CPU value.
  1421. */
  1422. static void check_early_cpu_features(void)
  1423. {
  1424. verify_cpu_asid_bits();
  1425. /*
  1426. * Early features are used by the kernel already. If there
  1427. * is a conflict, we cannot proceed further.
  1428. */
  1429. if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
  1430. cpu_panic_kernel();
  1431. }
  1432. static void
  1433. verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
  1434. {
  1435. for (; caps->matches; caps++)
  1436. if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
  1437. pr_crit("CPU%d: missing HWCAP: %s\n",
  1438. smp_processor_id(), caps->desc);
  1439. cpu_die_early();
  1440. }
  1441. }
  1442. static void verify_sve_features(void)
  1443. {
  1444. u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
  1445. u64 zcr = read_zcr_features();
  1446. unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
  1447. unsigned int len = zcr & ZCR_ELx_LEN_MASK;
  1448. if (len < safe_len || sve_verify_vq_map()) {
  1449. pr_crit("CPU%d: SVE: required vector length(s) missing\n",
  1450. smp_processor_id());
  1451. cpu_die_early();
  1452. }
  1453. /* Add checks on other ZCR bits here if necessary */
  1454. }
  1455. /*
  1456. * Run through the enabled system capabilities and enable() it on this CPU.
  1457. * The capabilities were decided based on the available CPUs at the boot time.
  1458. * Any new CPU should match the system wide status of the capability. If the
  1459. * new CPU doesn't have a capability which the system now has enabled, we
  1460. * cannot do anything to fix it up and could cause unexpected failures. So
  1461. * we park the CPU.
  1462. */
  1463. static void verify_local_cpu_capabilities(void)
  1464. {
  1465. /*
  1466. * The capabilities with SCOPE_BOOT_CPU are checked from
  1467. * check_early_cpu_features(), as they need to be verified
  1468. * on all secondary CPUs.
  1469. */
  1470. if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
  1471. cpu_die_early();
  1472. verify_local_elf_hwcaps(arm64_elf_hwcaps);
  1473. if (system_supports_32bit_el0())
  1474. verify_local_elf_hwcaps(compat_elf_hwcaps);
  1475. if (system_supports_sve())
  1476. verify_sve_features();
  1477. }
  1478. void check_local_cpu_capabilities(void)
  1479. {
  1480. /*
  1481. * All secondary CPUs should conform to the early CPU features
  1482. * in use by the kernel based on boot CPU.
  1483. */
  1484. check_early_cpu_features();
  1485. /*
  1486. * If we haven't finalised the system capabilities, this CPU gets
  1487. * a chance to update the errata work arounds and local features.
  1488. * Otherwise, this CPU should verify that it has all the system
  1489. * advertised capabilities.
  1490. */
  1491. if (!sys_caps_initialised)
  1492. update_cpu_capabilities(SCOPE_LOCAL_CPU);
  1493. else
  1494. verify_local_cpu_capabilities();
  1495. }
  1496. static void __init setup_boot_cpu_capabilities(void)
  1497. {
  1498. /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
  1499. update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
  1500. /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
  1501. enable_cpu_capabilities(SCOPE_BOOT_CPU);
  1502. }
  1503. DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
  1504. EXPORT_SYMBOL(arm64_const_caps_ready);
  1505. static void __init mark_const_caps_ready(void)
  1506. {
  1507. static_branch_enable(&arm64_const_caps_ready);
  1508. }
  1509. extern const struct arm64_cpu_capabilities arm64_errata[];
  1510. bool this_cpu_has_cap(unsigned int cap)
  1511. {
  1512. return (__this_cpu_has_cap(arm64_features, cap) ||
  1513. __this_cpu_has_cap(arm64_errata, cap));
  1514. }
  1515. static void __init setup_system_capabilities(void)
  1516. {
  1517. /*
  1518. * We have finalised the system-wide safe feature
  1519. * registers, finalise the capabilities that depend
  1520. * on it. Also enable all the available capabilities,
  1521. * that are not enabled already.
  1522. */
  1523. update_cpu_capabilities(SCOPE_SYSTEM);
  1524. enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
  1525. }
  1526. void __init setup_cpu_features(void)
  1527. {
  1528. u32 cwg;
  1529. setup_system_capabilities();
  1530. mark_const_caps_ready();
  1531. setup_elf_hwcaps(arm64_elf_hwcaps);
  1532. if (system_supports_32bit_el0())
  1533. setup_elf_hwcaps(compat_elf_hwcaps);
  1534. if (system_uses_ttbr0_pan())
  1535. pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
  1536. sve_setup();
  1537. minsigstksz_setup();
  1538. /* Advertise that we have computed the system capabilities */
  1539. set_sys_caps_initialised();
  1540. /*
  1541. * Check for sane CTR_EL0.CWG value.
  1542. */
  1543. cwg = cache_type_cwg();
  1544. if (!cwg)
  1545. pr_warn("No Cache Writeback Granule information, assuming %d\n",
  1546. ARCH_DMA_MINALIGN);
  1547. }
  1548. static bool __maybe_unused
  1549. cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
  1550. {
  1551. return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
  1552. }
  1553. /*
  1554. * We emulate only the following system register space.
  1555. * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
  1556. * See Table C5-6 System instruction encodings for System register accesses,
  1557. * ARMv8 ARM(ARM DDI 0487A.f) for more details.
  1558. */
  1559. static inline bool __attribute_const__ is_emulated(u32 id)
  1560. {
  1561. return (sys_reg_Op0(id) == 0x3 &&
  1562. sys_reg_CRn(id) == 0x0 &&
  1563. sys_reg_Op1(id) == 0x0 &&
  1564. (sys_reg_CRm(id) == 0 ||
  1565. ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
  1566. }
  1567. /*
  1568. * With CRm == 0, reg should be one of :
  1569. * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
  1570. */
  1571. static inline int emulate_id_reg(u32 id, u64 *valp)
  1572. {
  1573. switch (id) {
  1574. case SYS_MIDR_EL1:
  1575. *valp = read_cpuid_id();
  1576. break;
  1577. case SYS_MPIDR_EL1:
  1578. *valp = SYS_MPIDR_SAFE_VAL;
  1579. break;
  1580. case SYS_REVIDR_EL1:
  1581. /* IMPLEMENTATION DEFINED values are emulated with 0 */
  1582. *valp = 0;
  1583. break;
  1584. default:
  1585. return -EINVAL;
  1586. }
  1587. return 0;
  1588. }
  1589. static int emulate_sys_reg(u32 id, u64 *valp)
  1590. {
  1591. struct arm64_ftr_reg *regp;
  1592. if (!is_emulated(id))
  1593. return -EINVAL;
  1594. if (sys_reg_CRm(id) == 0)
  1595. return emulate_id_reg(id, valp);
  1596. regp = get_arm64_ftr_reg(id);
  1597. if (regp)
  1598. *valp = arm64_ftr_reg_user_value(regp);
  1599. else
  1600. /*
  1601. * The untracked registers are either IMPLEMENTATION DEFINED
  1602. * (e.g, ID_AFR0_EL1) or reserved RAZ.
  1603. */
  1604. *valp = 0;
  1605. return 0;
  1606. }
  1607. static int emulate_mrs(struct pt_regs *regs, u32 insn)
  1608. {
  1609. int rc;
  1610. u32 sys_reg, dst;
  1611. u64 val;
  1612. /*
  1613. * sys_reg values are defined as used in mrs/msr instruction.
  1614. * shift the imm value to get the encoding.
  1615. */
  1616. sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
  1617. rc = emulate_sys_reg(sys_reg, &val);
  1618. if (!rc) {
  1619. dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
  1620. pt_regs_write_reg(regs, dst, val);
  1621. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  1622. }
  1623. return rc;
  1624. }
  1625. static struct undef_hook mrs_hook = {
  1626. .instr_mask = 0xfff00000,
  1627. .instr_val = 0xd5300000,
  1628. .pstate_mask = PSR_AA32_MODE_MASK,
  1629. .pstate_val = PSR_MODE_EL0t,
  1630. .fn = emulate_mrs,
  1631. };
  1632. static int __init enable_mrs_emulation(void)
  1633. {
  1634. register_undef_hook(&mrs_hook);
  1635. return 0;
  1636. }
  1637. core_initcall(enable_mrs_emulation);
  1638. void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
  1639. {
  1640. /* Firmware may have left a deferred SError in this register. */
  1641. write_sysreg_s(0, SYS_DISR_EL1);
  1642. }
  1643. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
  1644. char *buf)
  1645. {
  1646. if (__meltdown_safe)
  1647. return sprintf(buf, "Not affected\n");
  1648. if (arm64_kernel_unmapped_at_el0())
  1649. return sprintf(buf, "Mitigation: PTI\n");
  1650. return sprintf(buf, "Vulnerable\n");
  1651. }