dt_cpu_ftrs.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. /*
  2. * Copyright 2017, Nicholas Piggin, IBM Corporation
  3. * Licensed under GPLv2.
  4. */
  5. #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/jump_label.h>
  9. #include <linux/libfdt.h>
  10. #include <linux/memblock.h>
  11. #include <linux/printk.h>
  12. #include <linux/sched.h>
  13. #include <linux/string.h>
  14. #include <linux/threads.h>
  15. #include <asm/cputable.h>
  16. #include <asm/dt_cpu_ftrs.h>
  17. #include <asm/mmu.h>
  18. #include <asm/oprofile_impl.h>
  19. #include <asm/prom.h>
  20. #include <asm/setup.h>
  21. /* Device-tree visible constants follow */
  22. #define ISA_V2_07B 2070
  23. #define ISA_V3_0B 3000
  24. #define USABLE_PR (1U << 0)
  25. #define USABLE_OS (1U << 1)
  26. #define USABLE_HV (1U << 2)
  27. #define HV_SUPPORT_HFSCR (1U << 0)
  28. #define OS_SUPPORT_FSCR (1U << 0)
  29. /* For parsing, we define all bits set as "NONE" case */
  30. #define HV_SUPPORT_NONE 0xffffffffU
  31. #define OS_SUPPORT_NONE 0xffffffffU
  32. struct dt_cpu_feature {
  33. const char *name;
  34. uint32_t isa;
  35. uint32_t usable_privilege;
  36. uint32_t hv_support;
  37. uint32_t os_support;
  38. uint32_t hfscr_bit_nr;
  39. uint32_t fscr_bit_nr;
  40. uint32_t hwcap_bit_nr;
  41. /* fdt parsing */
  42. unsigned long node;
  43. int enabled;
  44. int disabled;
  45. };
  46. #define CPU_FTRS_BASE \
  47. (CPU_FTR_USE_TB | \
  48. CPU_FTR_LWSYNC | \
  49. CPU_FTR_FPU_UNAVAILABLE |\
  50. CPU_FTR_NODSISRALIGN |\
  51. CPU_FTR_NOEXECUTE |\
  52. CPU_FTR_COHERENT_ICACHE | \
  53. CPU_FTR_STCX_CHECKS_ADDRESS |\
  54. CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
  55. CPU_FTR_DAWR | \
  56. CPU_FTR_ARCH_206 |\
  57. CPU_FTR_ARCH_207S)
  58. #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
  59. #define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
  60. PPC_FEATURE_ARCH_2_06 |\
  61. PPC_FEATURE_ICACHE_SNOOP)
  62. #define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
  63. PPC_FEATURE2_ISEL)
  64. /*
  65. * Set up the base CPU
  66. */
  67. extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
  68. extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
  69. static int hv_mode;
  70. static struct {
  71. u64 lpcr;
  72. u64 hfscr;
  73. u64 fscr;
  74. } system_registers;
  75. static void (*init_pmu_registers)(void);
  76. static void __restore_cpu_cpufeatures(void)
  77. {
  78. /*
  79. * LPCR is restored by the power on engine already. It can be changed
  80. * after early init e.g., by radix enable, and we have no unified API
  81. * for saving and restoring such SPRs.
  82. *
  83. * This ->restore hook should really be removed from idle and register
  84. * restore moved directly into the idle restore code, because this code
  85. * doesn't know how idle is implemented or what it needs restored here.
  86. *
  87. * The best we can do to accommodate secondary boot and idle restore
  88. * for now is "or" LPCR with existing.
  89. */
  90. mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
  91. if (hv_mode) {
  92. mtspr(SPRN_LPID, 0);
  93. mtspr(SPRN_HFSCR, system_registers.hfscr);
  94. }
  95. mtspr(SPRN_FSCR, system_registers.fscr);
  96. if (init_pmu_registers)
  97. init_pmu_registers();
  98. }
  99. static char dt_cpu_name[64];
  100. static struct cpu_spec __initdata base_cpu_spec = {
  101. .cpu_name = NULL,
  102. .cpu_features = CPU_FTRS_BASE,
  103. .cpu_user_features = COMMON_USER_BASE,
  104. .cpu_user_features2 = COMMON_USER2_BASE,
  105. .mmu_features = 0,
  106. .icache_bsize = 32, /* minimum block size, fixed by */
  107. .dcache_bsize = 32, /* cache info init. */
  108. .num_pmcs = 0,
  109. .pmc_type = PPC_PMC_DEFAULT,
  110. .oprofile_cpu_type = NULL,
  111. .oprofile_type = PPC_OPROFILE_INVALID,
  112. .cpu_setup = NULL,
  113. .cpu_restore = __restore_cpu_cpufeatures,
  114. .machine_check_early = NULL,
  115. .platform = NULL,
  116. };
  117. static void __init cpufeatures_setup_cpu(void)
  118. {
  119. set_cur_cpu_spec(&base_cpu_spec);
  120. cur_cpu_spec->pvr_mask = -1;
  121. cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
  122. /* Initialize the base environment -- clear FSCR/HFSCR. */
  123. hv_mode = !!(mfmsr() & MSR_HV);
  124. if (hv_mode) {
  125. /* CPU_FTR_HVMODE is used early in PACA setup */
  126. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  127. mtspr(SPRN_HFSCR, 0);
  128. }
  129. mtspr(SPRN_FSCR, 0);
  130. /*
  131. * LPCR does not get cleared, to match behaviour with secondaries
  132. * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
  133. * could clear LPCR too.
  134. */
  135. }
  136. static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
  137. {
  138. if (f->hv_support == HV_SUPPORT_NONE) {
  139. } else if (f->hv_support & HV_SUPPORT_HFSCR) {
  140. u64 hfscr = mfspr(SPRN_HFSCR);
  141. hfscr |= 1UL << f->hfscr_bit_nr;
  142. mtspr(SPRN_HFSCR, hfscr);
  143. } else {
  144. /* Does not have a known recipe */
  145. return 0;
  146. }
  147. if (f->os_support == OS_SUPPORT_NONE) {
  148. } else if (f->os_support & OS_SUPPORT_FSCR) {
  149. u64 fscr = mfspr(SPRN_FSCR);
  150. fscr |= 1UL << f->fscr_bit_nr;
  151. mtspr(SPRN_FSCR, fscr);
  152. } else {
  153. /* Does not have a known recipe */
  154. return 0;
  155. }
  156. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  157. uint32_t word = f->hwcap_bit_nr / 32;
  158. uint32_t bit = f->hwcap_bit_nr % 32;
  159. if (word == 0)
  160. cur_cpu_spec->cpu_user_features |= 1U << bit;
  161. else if (word == 1)
  162. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  163. else
  164. pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
  165. }
  166. return 1;
  167. }
  168. static int __init feat_enable(struct dt_cpu_feature *f)
  169. {
  170. if (f->hv_support != HV_SUPPORT_NONE) {
  171. if (f->hfscr_bit_nr != -1) {
  172. u64 hfscr = mfspr(SPRN_HFSCR);
  173. hfscr |= 1UL << f->hfscr_bit_nr;
  174. mtspr(SPRN_HFSCR, hfscr);
  175. }
  176. }
  177. if (f->os_support != OS_SUPPORT_NONE) {
  178. if (f->fscr_bit_nr != -1) {
  179. u64 fscr = mfspr(SPRN_FSCR);
  180. fscr |= 1UL << f->fscr_bit_nr;
  181. mtspr(SPRN_FSCR, fscr);
  182. }
  183. }
  184. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  185. uint32_t word = f->hwcap_bit_nr / 32;
  186. uint32_t bit = f->hwcap_bit_nr % 32;
  187. if (word == 0)
  188. cur_cpu_spec->cpu_user_features |= 1U << bit;
  189. else if (word == 1)
  190. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  191. else
  192. pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
  193. }
  194. return 1;
  195. }
  196. static int __init feat_disable(struct dt_cpu_feature *f)
  197. {
  198. return 0;
  199. }
  200. static int __init feat_enable_hv(struct dt_cpu_feature *f)
  201. {
  202. u64 lpcr;
  203. if (!hv_mode) {
  204. pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
  205. return 0;
  206. }
  207. mtspr(SPRN_LPID, 0);
  208. lpcr = mfspr(SPRN_LPCR);
  209. lpcr &= ~LPCR_LPES0; /* HV external interrupts */
  210. mtspr(SPRN_LPCR, lpcr);
  211. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  212. return 1;
  213. }
  214. static int __init feat_enable_le(struct dt_cpu_feature *f)
  215. {
  216. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
  217. return 1;
  218. }
  219. static int __init feat_enable_smt(struct dt_cpu_feature *f)
  220. {
  221. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  222. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
  223. return 1;
  224. }
  225. static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
  226. {
  227. u64 lpcr;
  228. /* Set PECE wakeup modes for ISA 207 */
  229. lpcr = mfspr(SPRN_LPCR);
  230. lpcr |= LPCR_PECE0;
  231. lpcr |= LPCR_PECE1;
  232. lpcr |= LPCR_PECE2;
  233. mtspr(SPRN_LPCR, lpcr);
  234. return 1;
  235. }
  236. static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
  237. {
  238. cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
  239. return 1;
  240. }
  241. static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
  242. {
  243. u64 lpcr;
  244. /* Set PECE wakeup modes for ISAv3.0B */
  245. lpcr = mfspr(SPRN_LPCR);
  246. lpcr |= LPCR_PECE0;
  247. lpcr |= LPCR_PECE1;
  248. lpcr |= LPCR_PECE2;
  249. mtspr(SPRN_LPCR, lpcr);
  250. return 1;
  251. }
  252. static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
  253. {
  254. u64 lpcr;
  255. lpcr = mfspr(SPRN_LPCR);
  256. lpcr &= ~LPCR_ISL;
  257. /* VRMASD */
  258. lpcr |= LPCR_VPM0;
  259. lpcr &= ~LPCR_VPM1;
  260. lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
  261. mtspr(SPRN_LPCR, lpcr);
  262. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  263. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  264. return 1;
  265. }
  266. static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
  267. {
  268. u64 lpcr;
  269. lpcr = mfspr(SPRN_LPCR);
  270. lpcr &= ~LPCR_ISL;
  271. mtspr(SPRN_LPCR, lpcr);
  272. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  273. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  274. return 1;
  275. }
  276. static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
  277. {
  278. #ifdef CONFIG_PPC_RADIX_MMU
  279. cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
  280. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  281. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  282. return 1;
  283. #endif
  284. return 0;
  285. }
  286. static int __init feat_enable_dscr(struct dt_cpu_feature *f)
  287. {
  288. u64 lpcr;
  289. feat_enable(f);
  290. lpcr = mfspr(SPRN_LPCR);
  291. lpcr &= ~LPCR_DPFD;
  292. lpcr |= (4UL << LPCR_DPFD_SH);
  293. mtspr(SPRN_LPCR, lpcr);
  294. return 1;
  295. }
  296. static void hfscr_pmu_enable(void)
  297. {
  298. u64 hfscr = mfspr(SPRN_HFSCR);
  299. hfscr |= PPC_BIT(60);
  300. mtspr(SPRN_HFSCR, hfscr);
  301. }
  302. static void init_pmu_power8(void)
  303. {
  304. if (hv_mode) {
  305. mtspr(SPRN_MMCRC, 0);
  306. mtspr(SPRN_MMCRH, 0);
  307. }
  308. mtspr(SPRN_MMCRA, 0);
  309. mtspr(SPRN_MMCR0, 0);
  310. mtspr(SPRN_MMCR1, 0);
  311. mtspr(SPRN_MMCR2, 0);
  312. mtspr(SPRN_MMCRS, 0);
  313. }
  314. static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
  315. {
  316. cur_cpu_spec->platform = "power8";
  317. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
  318. return 1;
  319. }
  320. static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
  321. {
  322. hfscr_pmu_enable();
  323. init_pmu_power8();
  324. init_pmu_registers = init_pmu_power8;
  325. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  326. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  327. if (pvr_version_is(PVR_POWER8E))
  328. cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
  329. cur_cpu_spec->num_pmcs = 6;
  330. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  331. cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
  332. return 1;
  333. }
  334. static void init_pmu_power9(void)
  335. {
  336. if (hv_mode)
  337. mtspr(SPRN_MMCRC, 0);
  338. mtspr(SPRN_MMCRA, 0);
  339. mtspr(SPRN_MMCR0, 0);
  340. mtspr(SPRN_MMCR1, 0);
  341. mtspr(SPRN_MMCR2, 0);
  342. }
  343. static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
  344. {
  345. cur_cpu_spec->platform = "power9";
  346. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
  347. return 1;
  348. }
  349. static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
  350. {
  351. hfscr_pmu_enable();
  352. init_pmu_power9();
  353. init_pmu_registers = init_pmu_power9;
  354. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  355. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  356. cur_cpu_spec->num_pmcs = 6;
  357. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  358. cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
  359. return 1;
  360. }
  361. static int __init feat_enable_tm(struct dt_cpu_feature *f)
  362. {
  363. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  364. feat_enable(f);
  365. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
  366. return 1;
  367. #endif
  368. return 0;
  369. }
  370. static int __init feat_enable_fp(struct dt_cpu_feature *f)
  371. {
  372. feat_enable(f);
  373. cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
  374. return 1;
  375. }
  376. static int __init feat_enable_vector(struct dt_cpu_feature *f)
  377. {
  378. #ifdef CONFIG_ALTIVEC
  379. feat_enable(f);
  380. cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
  381. cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
  382. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
  383. return 1;
  384. #endif
  385. return 0;
  386. }
  387. static int __init feat_enable_vsx(struct dt_cpu_feature *f)
  388. {
  389. #ifdef CONFIG_VSX
  390. feat_enable(f);
  391. cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
  392. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
  393. return 1;
  394. #endif
  395. return 0;
  396. }
  397. static int __init feat_enable_purr(struct dt_cpu_feature *f)
  398. {
  399. cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
  400. return 1;
  401. }
  402. static int __init feat_enable_ebb(struct dt_cpu_feature *f)
  403. {
  404. /*
  405. * PPC_FEATURE2_EBB is enabled in PMU init code because it has
  406. * historically been related to the PMU facility. This may have
  407. * to be decoupled if EBB becomes more generic. For now, follow
  408. * existing convention.
  409. */
  410. f->hwcap_bit_nr = -1;
  411. feat_enable(f);
  412. return 1;
  413. }
  414. static int __init feat_enable_dbell(struct dt_cpu_feature *f)
  415. {
  416. u64 lpcr;
  417. /* P9 has an HFSCR for privileged state */
  418. feat_enable(f);
  419. cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
  420. lpcr = mfspr(SPRN_LPCR);
  421. lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
  422. mtspr(SPRN_LPCR, lpcr);
  423. return 1;
  424. }
  425. static int __init feat_enable_hvi(struct dt_cpu_feature *f)
  426. {
  427. u64 lpcr;
  428. /*
  429. * POWER9 XIVE interrupts including in OPAL XICS compatibility
  430. * are always delivered as hypervisor virtualization interrupts (HVI)
  431. * rather than EE.
  432. *
  433. * However LPES0 is not set here, in the chance that an EE does get
  434. * delivered to the host somehow, the EE handler would not expect it
  435. * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
  436. * happen if there is a bug in interrupt controller code, or IC is
  437. * misconfigured in systemsim.
  438. */
  439. lpcr = mfspr(SPRN_LPCR);
  440. lpcr |= LPCR_HVICE; /* enable hvi interrupts */
  441. lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
  442. lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
  443. mtspr(SPRN_LPCR, lpcr);
  444. return 1;
  445. }
  446. static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
  447. {
  448. cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
  449. return 1;
  450. }
  451. struct dt_cpu_feature_match {
  452. const char *name;
  453. int (*enable)(struct dt_cpu_feature *f);
  454. u64 cpu_ftr_bit_mask;
  455. };
  456. static struct dt_cpu_feature_match __initdata
  457. dt_cpu_feature_match_table[] = {
  458. {"hypervisor", feat_enable_hv, 0},
  459. {"big-endian", feat_enable, 0},
  460. {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
  461. {"smt", feat_enable_smt, 0},
  462. {"interrupt-facilities", feat_enable, 0},
  463. {"timer-facilities", feat_enable, 0},
  464. {"timer-facilities-v3", feat_enable, 0},
  465. {"debug-facilities", feat_enable, 0},
  466. {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
  467. {"branch-tracing", feat_enable, 0},
  468. {"floating-point", feat_enable_fp, 0},
  469. {"vector", feat_enable_vector, 0},
  470. {"vector-scalar", feat_enable_vsx, 0},
  471. {"vector-scalar-v3", feat_enable, 0},
  472. {"decimal-floating-point", feat_enable, 0},
  473. {"decimal-integer", feat_enable, 0},
  474. {"quadword-load-store", feat_enable, 0},
  475. {"vector-crypto", feat_enable, 0},
  476. {"mmu-hash", feat_enable_mmu_hash, 0},
  477. {"mmu-radix", feat_enable_mmu_radix, 0},
  478. {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
  479. {"virtual-page-class-key-protection", feat_enable, 0},
  480. {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
  481. {"transactional-memory-v3", feat_enable_tm, 0},
  482. {"idle-nap", feat_enable_idle_nap, 0},
  483. {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
  484. {"idle-stop", feat_enable_idle_stop, 0},
  485. {"machine-check-power8", feat_enable_mce_power8, 0},
  486. {"performance-monitor-power8", feat_enable_pmu_power8, 0},
  487. {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
  488. {"event-based-branch", feat_enable_ebb, 0},
  489. {"target-address-register", feat_enable, 0},
  490. {"branch-history-rolling-buffer", feat_enable, 0},
  491. {"control-register", feat_enable, CPU_FTR_CTRL},
  492. {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
  493. {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
  494. {"processor-utilization-of-resources-register", feat_enable_purr, 0},
  495. {"no-execute", feat_enable, 0},
  496. {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
  497. {"cache-inhibited-large-page", feat_enable_large_ci, 0},
  498. {"coprocessor-icswx", feat_enable, 0},
  499. {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
  500. {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
  501. {"wait", feat_enable, 0},
  502. {"atomic-memory-operations", feat_enable, 0},
  503. {"branch-v3", feat_enable, 0},
  504. {"copy-paste", feat_enable, 0},
  505. {"decimal-floating-point-v3", feat_enable, 0},
  506. {"decimal-integer-v3", feat_enable, 0},
  507. {"fixed-point-v3", feat_enable, 0},
  508. {"floating-point-v3", feat_enable, 0},
  509. {"group-start-register", feat_enable, 0},
  510. {"pc-relative-addressing", feat_enable, 0},
  511. {"machine-check-power9", feat_enable_mce_power9, 0},
  512. {"performance-monitor-power9", feat_enable_pmu_power9, 0},
  513. {"event-based-branch-v3", feat_enable, 0},
  514. {"random-number-generator", feat_enable, 0},
  515. {"system-call-vectored", feat_disable, 0},
  516. {"trace-interrupt-v3", feat_enable, 0},
  517. {"vector-v3", feat_enable, 0},
  518. {"vector-binary128", feat_enable, 0},
  519. {"vector-binary16", feat_enable, 0},
  520. {"wait-v3", feat_enable, 0},
  521. };
  522. static bool __initdata using_dt_cpu_ftrs;
  523. static bool __initdata enable_unknown = true;
  524. static int __init dt_cpu_ftrs_parse(char *str)
  525. {
  526. if (!str)
  527. return 0;
  528. if (!strcmp(str, "off"))
  529. using_dt_cpu_ftrs = false;
  530. else if (!strcmp(str, "known"))
  531. enable_unknown = false;
  532. else
  533. return 1;
  534. return 0;
  535. }
  536. early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
  537. static void __init cpufeatures_setup_start(u32 isa)
  538. {
  539. pr_info("setup for ISA %d\n", isa);
  540. if (isa >= 3000) {
  541. cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
  542. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
  543. }
  544. }
  545. static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
  546. {
  547. const struct dt_cpu_feature_match *m;
  548. bool known = false;
  549. int i;
  550. for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
  551. m = &dt_cpu_feature_match_table[i];
  552. if (!strcmp(f->name, m->name)) {
  553. known = true;
  554. if (m->enable(f))
  555. break;
  556. pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
  557. f->name);
  558. return false;
  559. }
  560. }
  561. if (!known && enable_unknown) {
  562. if (!feat_try_enable_unknown(f)) {
  563. pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
  564. f->name);
  565. return false;
  566. }
  567. }
  568. if (m->cpu_ftr_bit_mask)
  569. cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
  570. if (known)
  571. pr_debug("enabling: %s\n", f->name);
  572. else
  573. pr_debug("enabling: %s (unknown)\n", f->name);
  574. return true;
  575. }
  576. static __init void cpufeatures_cpu_quirks(void)
  577. {
  578. int version = mfspr(SPRN_PVR);
  579. /*
  580. * Not all quirks can be derived from the cpufeatures device tree.
  581. */
  582. if ((version & 0xffffff00) == 0x004e0100)
  583. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
  584. else if ((version & 0xffffefff) == 0x004e0201)
  585. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  586. }
  587. static void __init cpufeatures_setup_finished(void)
  588. {
  589. cpufeatures_cpu_quirks();
  590. if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
  591. pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
  592. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  593. }
  594. system_registers.lpcr = mfspr(SPRN_LPCR);
  595. system_registers.hfscr = mfspr(SPRN_HFSCR);
  596. system_registers.fscr = mfspr(SPRN_FSCR);
  597. pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
  598. cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
  599. }
  600. static int __init disabled_on_cmdline(void)
  601. {
  602. unsigned long root, chosen;
  603. const char *p;
  604. root = of_get_flat_dt_root();
  605. chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
  606. if (chosen == -FDT_ERR_NOTFOUND)
  607. return false;
  608. p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
  609. if (!p)
  610. return false;
  611. if (strstr(p, "dt_cpu_ftrs=off"))
  612. return true;
  613. return false;
  614. }
  615. static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
  616. int depth, void *data)
  617. {
  618. if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
  619. && of_get_flat_dt_prop(node, "isa", NULL))
  620. return 1;
  621. return 0;
  622. }
  623. bool __init dt_cpu_ftrs_in_use(void)
  624. {
  625. return using_dt_cpu_ftrs;
  626. }
  627. bool __init dt_cpu_ftrs_init(void *fdt)
  628. {
  629. using_dt_cpu_ftrs = false;
  630. /* Setup and verify the FDT, if it fails we just bail */
  631. if (!early_init_dt_verify(fdt))
  632. return false;
  633. if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
  634. return false;
  635. if (disabled_on_cmdline())
  636. return false;
  637. cpufeatures_setup_cpu();
  638. using_dt_cpu_ftrs = true;
  639. return true;
  640. }
  641. static int nr_dt_cpu_features;
  642. static struct dt_cpu_feature *dt_cpu_features;
  643. static int __init process_cpufeatures_node(unsigned long node,
  644. const char *uname, int i)
  645. {
  646. const __be32 *prop;
  647. struct dt_cpu_feature *f;
  648. int len;
  649. f = &dt_cpu_features[i];
  650. memset(f, 0, sizeof(struct dt_cpu_feature));
  651. f->node = node;
  652. f->name = uname;
  653. prop = of_get_flat_dt_prop(node, "isa", &len);
  654. if (!prop) {
  655. pr_warn("%s: missing isa property\n", uname);
  656. return 0;
  657. }
  658. f->isa = be32_to_cpup(prop);
  659. prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
  660. if (!prop) {
  661. pr_warn("%s: missing usable-privilege property", uname);
  662. return 0;
  663. }
  664. f->usable_privilege = be32_to_cpup(prop);
  665. prop = of_get_flat_dt_prop(node, "hv-support", &len);
  666. if (prop)
  667. f->hv_support = be32_to_cpup(prop);
  668. else
  669. f->hv_support = HV_SUPPORT_NONE;
  670. prop = of_get_flat_dt_prop(node, "os-support", &len);
  671. if (prop)
  672. f->os_support = be32_to_cpup(prop);
  673. else
  674. f->os_support = OS_SUPPORT_NONE;
  675. prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
  676. if (prop)
  677. f->hfscr_bit_nr = be32_to_cpup(prop);
  678. else
  679. f->hfscr_bit_nr = -1;
  680. prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
  681. if (prop)
  682. f->fscr_bit_nr = be32_to_cpup(prop);
  683. else
  684. f->fscr_bit_nr = -1;
  685. prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
  686. if (prop)
  687. f->hwcap_bit_nr = be32_to_cpup(prop);
  688. else
  689. f->hwcap_bit_nr = -1;
  690. if (f->usable_privilege & USABLE_HV) {
  691. if (!(mfmsr() & MSR_HV)) {
  692. pr_warn("%s: HV feature passed to guest\n", uname);
  693. return 0;
  694. }
  695. if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
  696. pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
  697. return 0;
  698. }
  699. if (f->hv_support == HV_SUPPORT_HFSCR) {
  700. if (f->hfscr_bit_nr == -1) {
  701. pr_warn("%s: missing hfscr_bit_nr\n", uname);
  702. return 0;
  703. }
  704. }
  705. } else {
  706. if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
  707. pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
  708. return 0;
  709. }
  710. }
  711. if (f->usable_privilege & USABLE_OS) {
  712. if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
  713. pr_warn("%s: unwanted fscr_bit_nr\n", uname);
  714. return 0;
  715. }
  716. if (f->os_support == OS_SUPPORT_FSCR) {
  717. if (f->fscr_bit_nr == -1) {
  718. pr_warn("%s: missing fscr_bit_nr\n", uname);
  719. return 0;
  720. }
  721. }
  722. } else {
  723. if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
  724. pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
  725. return 0;
  726. }
  727. }
  728. if (!(f->usable_privilege & USABLE_PR)) {
  729. if (f->hwcap_bit_nr != -1) {
  730. pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
  731. return 0;
  732. }
  733. }
  734. /* Do all the independent features in the first pass */
  735. if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
  736. if (cpufeatures_process_feature(f))
  737. f->enabled = 1;
  738. else
  739. f->disabled = 1;
  740. }
  741. return 0;
  742. }
  743. static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
  744. {
  745. const __be32 *prop;
  746. int len;
  747. int nr_deps;
  748. int i;
  749. if (f->enabled || f->disabled)
  750. return;
  751. prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
  752. if (!prop) {
  753. pr_warn("%s: missing dependencies property", f->name);
  754. return;
  755. }
  756. nr_deps = len / sizeof(int);
  757. for (i = 0; i < nr_deps; i++) {
  758. unsigned long phandle = be32_to_cpu(prop[i]);
  759. int j;
  760. for (j = 0; j < nr_dt_cpu_features; j++) {
  761. struct dt_cpu_feature *d = &dt_cpu_features[j];
  762. if (of_get_flat_dt_phandle(d->node) == phandle) {
  763. cpufeatures_deps_enable(d);
  764. if (d->disabled) {
  765. f->disabled = 1;
  766. return;
  767. }
  768. }
  769. }
  770. }
  771. if (cpufeatures_process_feature(f))
  772. f->enabled = 1;
  773. else
  774. f->disabled = 1;
  775. }
  776. static int __init scan_cpufeatures_subnodes(unsigned long node,
  777. const char *uname,
  778. void *data)
  779. {
  780. int *count = data;
  781. process_cpufeatures_node(node, uname, *count);
  782. (*count)++;
  783. return 0;
  784. }
  785. static int __init count_cpufeatures_subnodes(unsigned long node,
  786. const char *uname,
  787. void *data)
  788. {
  789. int *count = data;
  790. (*count)++;
  791. return 0;
  792. }
  793. static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
  794. *uname, int depth, void *data)
  795. {
  796. const __be32 *prop;
  797. int count, i;
  798. u32 isa;
  799. /* We are scanning "ibm,powerpc-cpu-features" nodes only */
  800. if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
  801. return 0;
  802. prop = of_get_flat_dt_prop(node, "isa", NULL);
  803. if (!prop)
  804. /* We checked before, "can't happen" */
  805. return 0;
  806. isa = be32_to_cpup(prop);
  807. /* Count and allocate space for cpu features */
  808. of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
  809. &nr_dt_cpu_features);
  810. dt_cpu_features = __va(
  811. memblock_alloc(sizeof(struct dt_cpu_feature)*
  812. nr_dt_cpu_features, PAGE_SIZE));
  813. cpufeatures_setup_start(isa);
  814. /* Scan nodes into dt_cpu_features and enable those without deps */
  815. count = 0;
  816. of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
  817. /* Recursive enable remaining features with dependencies */
  818. for (i = 0; i < nr_dt_cpu_features; i++) {
  819. struct dt_cpu_feature *f = &dt_cpu_features[i];
  820. cpufeatures_deps_enable(f);
  821. }
  822. prop = of_get_flat_dt_prop(node, "display-name", NULL);
  823. if (prop && strlen((char *)prop) != 0) {
  824. strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
  825. cur_cpu_spec->cpu_name = dt_cpu_name;
  826. }
  827. cpufeatures_setup_finished();
  828. memblock_free(__pa(dt_cpu_features),
  829. sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
  830. return 0;
  831. }
  832. void __init dt_cpu_ftrs_scan(void)
  833. {
  834. if (!using_dt_cpu_ftrs)
  835. return;
  836. of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
  837. }