cpufeature.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. /*
  2. * Contains CPU feature definitions
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "CPU features: " fmt
  19. #include <linux/bsearch.h>
  20. #include <linux/sort.h>
  21. #include <linux/types.h>
  22. #include <asm/cpu.h>
  23. #include <asm/cpufeature.h>
  24. #include <asm/cpu_ops.h>
  25. #include <asm/processor.h>
  26. #include <asm/sysreg.h>
  27. unsigned long elf_hwcap __read_mostly;
  28. EXPORT_SYMBOL_GPL(elf_hwcap);
  29. #ifdef CONFIG_COMPAT
  30. #define COMPAT_ELF_HWCAP_DEFAULT \
  31. (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
  32. COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
  33. COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
  34. COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
  35. COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
  36. COMPAT_HWCAP_LPAE)
  37. unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
  38. unsigned int compat_elf_hwcap2 __read_mostly;
  39. #endif
  40. DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  41. #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  42. { \
  43. .sign = SIGNED, \
  44. .strict = STRICT, \
  45. .type = TYPE, \
  46. .shift = SHIFT, \
  47. .width = WIDTH, \
  48. .safe_val = SAFE_VAL, \
  49. }
  50. /* Define a feature with signed values */
  51. #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  52. __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  53. /* Define a feature with unsigned value */
  54. #define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  55. __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
  56. #define ARM64_FTR_END \
  57. { \
  58. .width = 0, \
  59. }
  60. static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
  61. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
  62. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
  63. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),
  64. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
  65. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
  66. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
  67. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
  68. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
  69. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
  70. ARM64_FTR_END,
  71. };
  72. static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
  73. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
  74. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
  75. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
  76. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
  77. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
  78. /* Linux doesn't care about the EL3 */
  79. ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
  80. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
  81. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
  82. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
  83. ARM64_FTR_END,
  84. };
  85. static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
  86. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
  87. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
  88. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
  89. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
  90. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
  91. /* Linux shouldn't care about secure memory */
  92. ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
  93. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
  94. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
  95. /*
  96. * Differing PARange is fine as long as all peripherals and memory are mapped
  97. * within the minimum PARange of all CPUs
  98. */
  99. U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
  100. ARM64_FTR_END,
  101. };
  102. static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
  103. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
  104. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
  105. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
  106. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
  107. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
  108. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
  109. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
  110. ARM64_FTR_END,
  111. };
  112. static struct arm64_ftr_bits ftr_ctr[] = {
  113. U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
  114. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
  115. U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
  116. U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
  117. U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
  118. /*
  119. * Linux can handle differing I-cache policies. Userspace JITs will
  120. * make use of *minLine
  121. */
  122. U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */
  123. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
  124. U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
  125. ARM64_FTR_END,
  126. };
  127. static struct arm64_ftr_bits ftr_id_mmfr0[] = {
  128. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), /* InnerShr */
  129. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
  130. ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
  131. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
  132. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
  133. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* OuterShr */
  134. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
  135. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
  136. ARM64_FTR_END,
  137. };
  138. static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
  139. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
  140. U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
  141. U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
  142. U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
  143. U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
  144. U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
  145. U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
  146. ARM64_FTR_END,
  147. };
  148. static struct arm64_ftr_bits ftr_mvfr2[] = {
  149. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */
  150. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
  151. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
  152. ARM64_FTR_END,
  153. };
  154. static struct arm64_ftr_bits ftr_dczid[] = {
  155. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */
  156. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
  157. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
  158. ARM64_FTR_END,
  159. };
  160. static struct arm64_ftr_bits ftr_id_isar5[] = {
  161. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
  162. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */
  163. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
  164. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
  165. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
  166. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
  167. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
  168. ARM64_FTR_END,
  169. };
  170. static struct arm64_ftr_bits ftr_id_mmfr4[] = {
  171. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */
  172. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
  173. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
  174. ARM64_FTR_END,
  175. };
  176. static struct arm64_ftr_bits ftr_id_pfr0[] = {
  177. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */
  178. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
  179. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
  180. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
  181. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
  182. ARM64_FTR_END,
  183. };
  184. /*
  185. * Common ftr bits for a 32bit register with all hidden, strict
  186. * attributes, with 4bit feature fields and a default safe value of
  187. * 0. Covers the following 32bit registers:
  188. * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
  189. */
  190. static struct arm64_ftr_bits ftr_generic_32bits[] = {
  191. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
  192. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
  193. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
  194. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
  195. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
  196. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
  197. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
  198. ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
  199. ARM64_FTR_END,
  200. };
  201. static struct arm64_ftr_bits ftr_generic[] = {
  202. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
  203. ARM64_FTR_END,
  204. };
  205. static struct arm64_ftr_bits ftr_generic32[] = {
  206. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0),
  207. ARM64_FTR_END,
  208. };
  209. static struct arm64_ftr_bits ftr_aa64raz[] = {
  210. ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
  211. ARM64_FTR_END,
  212. };
  213. #define ARM64_FTR_REG(id, table) \
  214. { \
  215. .sys_id = id, \
  216. .name = #id, \
  217. .ftr_bits = &((table)[0]), \
  218. }
  219. static struct arm64_ftr_reg arm64_ftr_regs[] = {
  220. /* Op1 = 0, CRn = 0, CRm = 1 */
  221. ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
  222. ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
  223. ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_generic_32bits),
  224. ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
  225. ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
  226. ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
  227. ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
  228. /* Op1 = 0, CRn = 0, CRm = 2 */
  229. ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
  230. ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
  231. ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
  232. ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
  233. ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
  234. ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
  235. ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
  236. /* Op1 = 0, CRn = 0, CRm = 3 */
  237. ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
  238. ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
  239. ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
  240. /* Op1 = 0, CRn = 0, CRm = 4 */
  241. ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
  242. ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz),
  243. /* Op1 = 0, CRn = 0, CRm = 5 */
  244. ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
  245. ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic),
  246. /* Op1 = 0, CRn = 0, CRm = 6 */
  247. ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
  248. ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz),
  249. /* Op1 = 0, CRn = 0, CRm = 7 */
  250. ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
  251. ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
  252. /* Op1 = 3, CRn = 0, CRm = 0 */
  253. ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
  254. ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
  255. /* Op1 = 3, CRn = 14, CRm = 0 */
  256. ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32),
  257. };
  258. static int search_cmp_ftr_reg(const void *id, const void *regp)
  259. {
  260. return (int)(unsigned long)id - (int)((const struct arm64_ftr_reg *)regp)->sys_id;
  261. }
  262. /*
  263. * get_arm64_ftr_reg - Lookup a feature register entry using its
  264. * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
  265. * ascending order of sys_id , we use binary search to find a matching
  266. * entry.
  267. *
  268. * returns - Upon success, matching ftr_reg entry for id.
  269. * - NULL on failure. It is upto the caller to decide
  270. * the impact of a failure.
  271. */
  272. static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
  273. {
  274. return bsearch((const void *)(unsigned long)sys_id,
  275. arm64_ftr_regs,
  276. ARRAY_SIZE(arm64_ftr_regs),
  277. sizeof(arm64_ftr_regs[0]),
  278. search_cmp_ftr_reg);
  279. }
  280. static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val)
  281. {
  282. u64 mask = arm64_ftr_mask(ftrp);
  283. reg &= ~mask;
  284. reg |= (ftr_val << ftrp->shift) & mask;
  285. return reg;
  286. }
  287. static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur)
  288. {
  289. s64 ret = 0;
  290. switch (ftrp->type) {
  291. case FTR_EXACT:
  292. ret = ftrp->safe_val;
  293. break;
  294. case FTR_LOWER_SAFE:
  295. ret = new < cur ? new : cur;
  296. break;
  297. case FTR_HIGHER_SAFE:
  298. ret = new > cur ? new : cur;
  299. break;
  300. default:
  301. BUG();
  302. }
  303. return ret;
  304. }
  305. static int __init sort_cmp_ftr_regs(const void *a, const void *b)
  306. {
  307. return ((const struct arm64_ftr_reg *)a)->sys_id -
  308. ((const struct arm64_ftr_reg *)b)->sys_id;
  309. }
  310. static void __init swap_ftr_regs(void *a, void *b, int size)
  311. {
  312. struct arm64_ftr_reg tmp = *(struct arm64_ftr_reg *)a;
  313. *(struct arm64_ftr_reg *)a = *(struct arm64_ftr_reg *)b;
  314. *(struct arm64_ftr_reg *)b = tmp;
  315. }
  316. static void __init sort_ftr_regs(void)
  317. {
  318. /* Keep the array sorted so that we can do the binary search */
  319. sort(arm64_ftr_regs,
  320. ARRAY_SIZE(arm64_ftr_regs),
  321. sizeof(arm64_ftr_regs[0]),
  322. sort_cmp_ftr_regs,
  323. swap_ftr_regs);
  324. }
  325. /*
  326. * Initialise the CPU feature register from Boot CPU values.
  327. * Also initiliases the strict_mask for the register.
  328. */
  329. static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
  330. {
  331. u64 val = 0;
  332. u64 strict_mask = ~0x0ULL;
  333. struct arm64_ftr_bits *ftrp;
  334. struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
  335. BUG_ON(!reg);
  336. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  337. s64 ftr_new = arm64_ftr_value(ftrp, new);
  338. val = arm64_ftr_set_value(ftrp, val, ftr_new);
  339. if (!ftrp->strict)
  340. strict_mask &= ~arm64_ftr_mask(ftrp);
  341. }
  342. reg->sys_val = val;
  343. reg->strict_mask = strict_mask;
  344. }
  345. void __init init_cpu_features(struct cpuinfo_arm64 *info)
  346. {
  347. /* Before we start using the tables, make sure it is sorted */
  348. sort_ftr_regs();
  349. init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
  350. init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
  351. init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
  352. init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
  353. init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
  354. init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
  355. init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
  356. init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
  357. init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
  358. init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
  359. init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
  360. init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
  361. init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
  362. init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
  363. init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
  364. init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
  365. init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
  366. init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
  367. init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
  368. init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
  369. init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
  370. init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
  371. init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
  372. init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
  373. init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
  374. init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
  375. init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
  376. }
  377. static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
  378. {
  379. struct arm64_ftr_bits *ftrp;
  380. for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  381. s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
  382. s64 ftr_new = arm64_ftr_value(ftrp, new);
  383. if (ftr_cur == ftr_new)
  384. continue;
  385. /* Find a safe value */
  386. ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
  387. reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
  388. }
  389. }
  390. static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
  391. {
  392. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
  393. BUG_ON(!regp);
  394. update_cpu_ftr_reg(regp, val);
  395. if ((boot & regp->strict_mask) == (val & regp->strict_mask))
  396. return 0;
  397. pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
  398. regp->name, boot, cpu, val);
  399. return 1;
  400. }
  401. /*
  402. * Update system wide CPU feature registers with the values from a
  403. * non-boot CPU. Also performs SANITY checks to make sure that there
  404. * aren't any insane variations from that of the boot CPU.
  405. */
  406. void update_cpu_features(int cpu,
  407. struct cpuinfo_arm64 *info,
  408. struct cpuinfo_arm64 *boot)
  409. {
  410. int taint = 0;
  411. /*
  412. * The kernel can handle differing I-cache policies, but otherwise
  413. * caches should look identical. Userspace JITs will make use of
  414. * *minLine.
  415. */
  416. taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
  417. info->reg_ctr, boot->reg_ctr);
  418. /*
  419. * Userspace may perform DC ZVA instructions. Mismatched block sizes
  420. * could result in too much or too little memory being zeroed if a
  421. * process is preempted and migrated between CPUs.
  422. */
  423. taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
  424. info->reg_dczid, boot->reg_dczid);
  425. /* If different, timekeeping will be broken (especially with KVM) */
  426. taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
  427. info->reg_cntfrq, boot->reg_cntfrq);
  428. /*
  429. * The kernel uses self-hosted debug features and expects CPUs to
  430. * support identical debug features. We presently need CTX_CMPs, WRPs,
  431. * and BRPs to be identical.
  432. * ID_AA64DFR1 is currently RES0.
  433. */
  434. taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
  435. info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
  436. taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
  437. info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
  438. /*
  439. * Even in big.LITTLE, processors should be identical instruction-set
  440. * wise.
  441. */
  442. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
  443. info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
  444. taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
  445. info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
  446. /*
  447. * Differing PARange support is fine as long as all peripherals and
  448. * memory are mapped within the minimum PARange of all CPUs.
  449. * Linux should not care about secure memory.
  450. */
  451. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
  452. info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
  453. taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
  454. info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
  455. /*
  456. * EL3 is not our concern.
  457. * ID_AA64PFR1 is currently RES0.
  458. */
  459. taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
  460. info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
  461. taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
  462. info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
  463. /*
  464. * If we have AArch32, we care about 32-bit features for compat. These
  465. * registers should be RES0 otherwise.
  466. */
  467. taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
  468. info->reg_id_dfr0, boot->reg_id_dfr0);
  469. taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
  470. info->reg_id_isar0, boot->reg_id_isar0);
  471. taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
  472. info->reg_id_isar1, boot->reg_id_isar1);
  473. taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
  474. info->reg_id_isar2, boot->reg_id_isar2);
  475. taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
  476. info->reg_id_isar3, boot->reg_id_isar3);
  477. taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
  478. info->reg_id_isar4, boot->reg_id_isar4);
  479. taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
  480. info->reg_id_isar5, boot->reg_id_isar5);
  481. /*
  482. * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
  483. * ACTLR formats could differ across CPUs and therefore would have to
  484. * be trapped for virtualization anyway.
  485. */
  486. taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
  487. info->reg_id_mmfr0, boot->reg_id_mmfr0);
  488. taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
  489. info->reg_id_mmfr1, boot->reg_id_mmfr1);
  490. taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
  491. info->reg_id_mmfr2, boot->reg_id_mmfr2);
  492. taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
  493. info->reg_id_mmfr3, boot->reg_id_mmfr3);
  494. taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
  495. info->reg_id_pfr0, boot->reg_id_pfr0);
  496. taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
  497. info->reg_id_pfr1, boot->reg_id_pfr1);
  498. taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
  499. info->reg_mvfr0, boot->reg_mvfr0);
  500. taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
  501. info->reg_mvfr1, boot->reg_mvfr1);
  502. taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
  503. info->reg_mvfr2, boot->reg_mvfr2);
  504. /*
  505. * Mismatched CPU features are a recipe for disaster. Don't even
  506. * pretend to support them.
  507. */
  508. WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
  509. "Unsupported CPU feature variation.\n");
  510. }
  511. u64 read_system_reg(u32 id)
  512. {
  513. struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
  514. /* We shouldn't get a request for an unsupported register */
  515. BUG_ON(!regp);
  516. return regp->sys_val;
  517. }
  518. #include <linux/irqchip/arm-gic-v3.h>
  519. static bool
  520. feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
  521. {
  522. int val = cpuid_feature_extract_field(reg, entry->field_pos);
  523. return val >= entry->min_field_value;
  524. }
  525. static bool
  526. has_cpuid_feature(const struct arm64_cpu_capabilities *entry)
  527. {
  528. u64 val;
  529. val = read_system_reg(entry->sys_reg);
  530. return feature_matches(val, entry);
  531. }
  532. static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
  533. {
  534. bool has_sre;
  535. if (!has_cpuid_feature(entry))
  536. return false;
  537. has_sre = gic_enable_sre();
  538. if (!has_sre)
  539. pr_warn_once("%s present but disabled by higher exception level\n",
  540. entry->desc);
  541. return has_sre;
  542. }
  543. static const struct arm64_cpu_capabilities arm64_features[] = {
  544. {
  545. .desc = "GIC system register CPU interface",
  546. .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
  547. .matches = has_useable_gicv3_cpuif,
  548. .sys_reg = SYS_ID_AA64PFR0_EL1,
  549. .field_pos = ID_AA64PFR0_GIC_SHIFT,
  550. .min_field_value = 1,
  551. },
  552. #ifdef CONFIG_ARM64_PAN
  553. {
  554. .desc = "Privileged Access Never",
  555. .capability = ARM64_HAS_PAN,
  556. .matches = has_cpuid_feature,
  557. .sys_reg = SYS_ID_AA64MMFR1_EL1,
  558. .field_pos = ID_AA64MMFR1_PAN_SHIFT,
  559. .min_field_value = 1,
  560. .enable = cpu_enable_pan,
  561. },
  562. #endif /* CONFIG_ARM64_PAN */
  563. #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
  564. {
  565. .desc = "LSE atomic instructions",
  566. .capability = ARM64_HAS_LSE_ATOMICS,
  567. .matches = has_cpuid_feature,
  568. .sys_reg = SYS_ID_AA64ISAR0_EL1,
  569. .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
  570. .min_field_value = 2,
  571. },
  572. #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
  573. {},
  574. };
  575. #define HWCAP_CAP(reg, field, min_value, type, cap) \
  576. { \
  577. .desc = #cap, \
  578. .matches = has_cpuid_feature, \
  579. .sys_reg = reg, \
  580. .field_pos = field, \
  581. .min_field_value = min_value, \
  582. .hwcap_type = type, \
  583. .hwcap = cap, \
  584. }
  585. static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
  586. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 2, CAP_HWCAP, HWCAP_PMULL),
  587. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 1, CAP_HWCAP, HWCAP_AES),
  588. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 1, CAP_HWCAP, HWCAP_SHA1),
  589. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 1, CAP_HWCAP, HWCAP_SHA2),
  590. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 1, CAP_HWCAP, HWCAP_CRC32),
  591. HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 2, CAP_HWCAP, HWCAP_ATOMICS),
  592. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 0, CAP_HWCAP, HWCAP_FP),
  593. HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 0, CAP_HWCAP, HWCAP_ASIMD),
  594. #ifdef CONFIG_COMPAT
  595. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
  596. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
  597. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
  598. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
  599. HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
  600. #endif
  601. {},
  602. };
  603. static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
  604. {
  605. switch (cap->hwcap_type) {
  606. case CAP_HWCAP:
  607. elf_hwcap |= cap->hwcap;
  608. break;
  609. #ifdef CONFIG_COMPAT
  610. case CAP_COMPAT_HWCAP:
  611. compat_elf_hwcap |= (u32)cap->hwcap;
  612. break;
  613. case CAP_COMPAT_HWCAP2:
  614. compat_elf_hwcap2 |= (u32)cap->hwcap;
  615. break;
  616. #endif
  617. default:
  618. WARN_ON(1);
  619. break;
  620. }
  621. }
  622. /* Check if we have a particular HWCAP enabled */
  623. static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap)
  624. {
  625. bool rc;
  626. switch (cap->hwcap_type) {
  627. case CAP_HWCAP:
  628. rc = (elf_hwcap & cap->hwcap) != 0;
  629. break;
  630. #ifdef CONFIG_COMPAT
  631. case CAP_COMPAT_HWCAP:
  632. rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
  633. break;
  634. case CAP_COMPAT_HWCAP2:
  635. rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
  636. break;
  637. #endif
  638. default:
  639. WARN_ON(1);
  640. rc = false;
  641. }
  642. return rc;
  643. }
  644. static void __init setup_cpu_hwcaps(void)
  645. {
  646. int i;
  647. const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
  648. for (i = 0; hwcaps[i].desc; i++)
  649. if (hwcaps[i].matches(&hwcaps[i]))
  650. cap_set_hwcap(&hwcaps[i]);
  651. }
  652. void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  653. const char *info)
  654. {
  655. int i;
  656. for (i = 0; caps[i].desc; i++) {
  657. if (!caps[i].matches(&caps[i]))
  658. continue;
  659. if (!cpus_have_cap(caps[i].capability))
  660. pr_info("%s %s\n", info, caps[i].desc);
  661. cpus_set_cap(caps[i].capability);
  662. }
  663. }
  664. /*
  665. * Run through the enabled capabilities and enable() it on all active
  666. * CPUs
  667. */
  668. static void __init
  669. enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
  670. {
  671. int i;
  672. for (i = 0; caps[i].desc; i++)
  673. if (caps[i].enable && cpus_have_cap(caps[i].capability))
  674. on_each_cpu(caps[i].enable, NULL, true);
  675. }
  676. #ifdef CONFIG_HOTPLUG_CPU
  677. /*
  678. * Flag to indicate if we have computed the system wide
  679. * capabilities based on the boot time active CPUs. This
  680. * will be used to determine if a new booting CPU should
  681. * go through the verification process to make sure that it
  682. * supports the system capabilities, without using a hotplug
  683. * notifier.
  684. */
  685. static bool sys_caps_initialised;
  686. static inline void set_sys_caps_initialised(void)
  687. {
  688. sys_caps_initialised = true;
  689. }
  690. /*
  691. * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
  692. */
  693. static u64 __raw_read_system_reg(u32 sys_id)
  694. {
  695. switch (sys_id) {
  696. case SYS_ID_PFR0_EL1: return (u64)read_cpuid(ID_PFR0_EL1);
  697. case SYS_ID_PFR1_EL1: return (u64)read_cpuid(ID_PFR1_EL1);
  698. case SYS_ID_DFR0_EL1: return (u64)read_cpuid(ID_DFR0_EL1);
  699. case SYS_ID_MMFR0_EL1: return (u64)read_cpuid(ID_MMFR0_EL1);
  700. case SYS_ID_MMFR1_EL1: return (u64)read_cpuid(ID_MMFR1_EL1);
  701. case SYS_ID_MMFR2_EL1: return (u64)read_cpuid(ID_MMFR2_EL1);
  702. case SYS_ID_MMFR3_EL1: return (u64)read_cpuid(ID_MMFR3_EL1);
  703. case SYS_ID_ISAR0_EL1: return (u64)read_cpuid(ID_ISAR0_EL1);
  704. case SYS_ID_ISAR1_EL1: return (u64)read_cpuid(ID_ISAR1_EL1);
  705. case SYS_ID_ISAR2_EL1: return (u64)read_cpuid(ID_ISAR2_EL1);
  706. case SYS_ID_ISAR3_EL1: return (u64)read_cpuid(ID_ISAR3_EL1);
  707. case SYS_ID_ISAR4_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
  708. case SYS_ID_ISAR5_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
  709. case SYS_MVFR0_EL1: return (u64)read_cpuid(MVFR0_EL1);
  710. case SYS_MVFR1_EL1: return (u64)read_cpuid(MVFR1_EL1);
  711. case SYS_MVFR2_EL1: return (u64)read_cpuid(MVFR2_EL1);
  712. case SYS_ID_AA64PFR0_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
  713. case SYS_ID_AA64PFR1_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
  714. case SYS_ID_AA64DFR0_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
  715. case SYS_ID_AA64DFR1_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
  716. case SYS_ID_AA64MMFR0_EL1: return (u64)read_cpuid(ID_AA64MMFR0_EL1);
  717. case SYS_ID_AA64MMFR1_EL1: return (u64)read_cpuid(ID_AA64MMFR1_EL1);
  718. case SYS_ID_AA64ISAR0_EL1: return (u64)read_cpuid(ID_AA64ISAR0_EL1);
  719. case SYS_ID_AA64ISAR1_EL1: return (u64)read_cpuid(ID_AA64ISAR1_EL1);
  720. case SYS_CNTFRQ_EL0: return (u64)read_cpuid(CNTFRQ_EL0);
  721. case SYS_CTR_EL0: return (u64)read_cpuid(CTR_EL0);
  722. case SYS_DCZID_EL0: return (u64)read_cpuid(DCZID_EL0);
  723. default:
  724. BUG();
  725. return 0;
  726. }
  727. }
  728. /*
  729. * Park the CPU which doesn't have the capability as advertised
  730. * by the system.
  731. */
  732. static void fail_incapable_cpu(char *cap_type,
  733. const struct arm64_cpu_capabilities *cap)
  734. {
  735. int cpu = smp_processor_id();
  736. pr_crit("CPU%d: missing %s : %s\n", cpu, cap_type, cap->desc);
  737. /* Mark this CPU absent */
  738. set_cpu_present(cpu, 0);
  739. /* Check if we can park ourselves */
  740. if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
  741. cpu_ops[cpu]->cpu_die(cpu);
  742. asm(
  743. "1: wfe\n"
  744. " wfi\n"
  745. " b 1b");
  746. }
  747. /*
  748. * Run through the enabled system capabilities and enable() it on this CPU.
  749. * The capabilities were decided based on the available CPUs at the boot time.
  750. * Any new CPU should match the system wide status of the capability. If the
  751. * new CPU doesn't have a capability which the system now has enabled, we
  752. * cannot do anything to fix it up and could cause unexpected failures. So
  753. * we park the CPU.
  754. */
  755. void verify_local_cpu_capabilities(void)
  756. {
  757. int i;
  758. const struct arm64_cpu_capabilities *caps;
  759. /*
  760. * If we haven't computed the system capabilities, there is nothing
  761. * to verify.
  762. */
  763. if (!sys_caps_initialised)
  764. return;
  765. caps = arm64_features;
  766. for (i = 0; caps[i].desc; i++) {
  767. if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
  768. continue;
  769. /*
  770. * If the new CPU misses an advertised feature, we cannot proceed
  771. * further, park the cpu.
  772. */
  773. if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
  774. fail_incapable_cpu("arm64_features", &caps[i]);
  775. if (caps[i].enable)
  776. caps[i].enable(NULL);
  777. }
  778. for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
  779. if (!cpus_have_hwcap(&caps[i]))
  780. continue;
  781. if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
  782. fail_incapable_cpu("arm64_hwcaps", &caps[i]);
  783. }
  784. }
  785. #else /* !CONFIG_HOTPLUG_CPU */
  786. static inline void set_sys_caps_initialised(void)
  787. {
  788. }
  789. #endif /* CONFIG_HOTPLUG_CPU */
  790. static void __init setup_feature_capabilities(void)
  791. {
  792. update_cpu_capabilities(arm64_features, "detected feature:");
  793. enable_cpu_capabilities(arm64_features);
  794. }
  795. void __init setup_cpu_features(void)
  796. {
  797. u32 cwg;
  798. int cls;
  799. /* Set the CPU feature capabilies */
  800. setup_feature_capabilities();
  801. setup_cpu_hwcaps();
  802. /* Advertise that we have computed the system capabilities */
  803. set_sys_caps_initialised();
  804. /*
  805. * Check for sane CTR_EL0.CWG value.
  806. */
  807. cwg = cache_type_cwg();
  808. cls = cache_line_size();
  809. if (!cwg)
  810. pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
  811. cls);
  812. if (L1_CACHE_BYTES < cls)
  813. pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
  814. L1_CACHE_BYTES, cls);
  815. }