dt_cpu_ftrs.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /*
  2. * Copyright 2017, Nicholas Piggin, IBM Corporation
  3. * Licensed under GPLv2.
  4. */
  5. #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/jump_label.h>
  9. #include <linux/libfdt.h>
  10. #include <linux/memblock.h>
  11. #include <linux/printk.h>
  12. #include <linux/sched.h>
  13. #include <linux/string.h>
  14. #include <linux/threads.h>
  15. #include <asm/cputable.h>
  16. #include <asm/dt_cpu_ftrs.h>
  17. #include <asm/mmu.h>
  18. #include <asm/oprofile_impl.h>
  19. #include <asm/prom.h>
  20. #include <asm/setup.h>
  21. /* Device-tree visible constants follow */
  22. #define ISA_V2_07B 2070
  23. #define ISA_V3_0B 3000
  24. #define USABLE_PR (1U << 0)
  25. #define USABLE_OS (1U << 1)
  26. #define USABLE_HV (1U << 2)
  27. #define HV_SUPPORT_HFSCR (1U << 0)
  28. #define OS_SUPPORT_FSCR (1U << 0)
  29. /* For parsing, we define all bits set as "NONE" case */
  30. #define HV_SUPPORT_NONE 0xffffffffU
  31. #define OS_SUPPORT_NONE 0xffffffffU
  32. struct dt_cpu_feature {
  33. const char *name;
  34. uint32_t isa;
  35. uint32_t usable_privilege;
  36. uint32_t hv_support;
  37. uint32_t os_support;
  38. uint32_t hfscr_bit_nr;
  39. uint32_t fscr_bit_nr;
  40. uint32_t hwcap_bit_nr;
  41. /* fdt parsing */
  42. unsigned long node;
  43. int enabled;
  44. int disabled;
  45. };
  46. #define CPU_FTRS_BASE \
  47. (CPU_FTR_USE_TB | \
  48. CPU_FTR_LWSYNC | \
  49. CPU_FTR_FPU_UNAVAILABLE |\
  50. CPU_FTR_NODSISRALIGN |\
  51. CPU_FTR_NOEXECUTE |\
  52. CPU_FTR_COHERENT_ICACHE | \
  53. CPU_FTR_STCX_CHECKS_ADDRESS |\
  54. CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
  55. CPU_FTR_DAWR | \
  56. CPU_FTR_ARCH_206 |\
  57. CPU_FTR_ARCH_207S)
  58. #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
  59. #define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
  60. PPC_FEATURE_ARCH_2_06 |\
  61. PPC_FEATURE_ICACHE_SNOOP)
  62. #define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
  63. PPC_FEATURE2_ISEL)
  64. /*
  65. * Set up the base CPU
  66. */
  67. extern void __flush_tlb_power8(unsigned int action);
  68. extern void __flush_tlb_power9(unsigned int action);
  69. extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
  70. extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
  71. static int hv_mode;
  72. static struct {
  73. u64 lpcr;
  74. u64 hfscr;
  75. u64 fscr;
  76. } system_registers;
  77. static void (*init_pmu_registers)(void);
  78. static void cpufeatures_flush_tlb(void)
  79. {
  80. unsigned long rb;
  81. unsigned int i, num_sets;
  82. /*
  83. * This is a temporary measure to keep equivalent TLB flush as the
  84. * cputable based setup code.
  85. */
  86. switch (PVR_VER(mfspr(SPRN_PVR))) {
  87. case PVR_POWER8:
  88. case PVR_POWER8E:
  89. case PVR_POWER8NVL:
  90. num_sets = POWER8_TLB_SETS;
  91. break;
  92. case PVR_POWER9:
  93. num_sets = POWER9_TLB_SETS_HASH;
  94. break;
  95. default:
  96. num_sets = 1;
  97. pr_err("unknown CPU version for boot TLB flush\n");
  98. break;
  99. }
  100. asm volatile("ptesync" : : : "memory");
  101. rb = TLBIEL_INVAL_SET;
  102. for (i = 0; i < num_sets; i++) {
  103. asm volatile("tlbiel %0" : : "r" (rb));
  104. rb += 1 << TLBIEL_INVAL_SET_SHIFT;
  105. }
  106. asm volatile("ptesync" : : : "memory");
  107. }
  108. static void __restore_cpu_cpufeatures(void)
  109. {
  110. /*
  111. * LPCR is restored by the power on engine already. It can be changed
  112. * after early init e.g., by radix enable, and we have no unified API
  113. * for saving and restoring such SPRs.
  114. *
  115. * This ->restore hook should really be removed from idle and register
  116. * restore moved directly into the idle restore code, because this code
  117. * doesn't know how idle is implemented or what it needs restored here.
  118. *
  119. * The best we can do to accommodate secondary boot and idle restore
  120. * for now is "or" LPCR with existing.
  121. */
  122. mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
  123. if (hv_mode) {
  124. mtspr(SPRN_LPID, 0);
  125. mtspr(SPRN_HFSCR, system_registers.hfscr);
  126. }
  127. mtspr(SPRN_FSCR, system_registers.fscr);
  128. if (init_pmu_registers)
  129. init_pmu_registers();
  130. cpufeatures_flush_tlb();
  131. }
  132. static char dt_cpu_name[64];
  133. static struct cpu_spec __initdata base_cpu_spec = {
  134. .cpu_name = NULL,
  135. .cpu_features = CPU_FTRS_BASE,
  136. .cpu_user_features = COMMON_USER_BASE,
  137. .cpu_user_features2 = COMMON_USER2_BASE,
  138. .mmu_features = 0,
  139. .icache_bsize = 32, /* minimum block size, fixed by */
  140. .dcache_bsize = 32, /* cache info init. */
  141. .num_pmcs = 0,
  142. .pmc_type = PPC_PMC_DEFAULT,
  143. .oprofile_cpu_type = NULL,
  144. .oprofile_type = PPC_OPROFILE_INVALID,
  145. .cpu_setup = NULL,
  146. .cpu_restore = __restore_cpu_cpufeatures,
  147. .flush_tlb = NULL,
  148. .machine_check_early = NULL,
  149. .platform = NULL,
  150. };
  151. static void __init cpufeatures_setup_cpu(void)
  152. {
  153. set_cur_cpu_spec(&base_cpu_spec);
  154. cur_cpu_spec->pvr_mask = -1;
  155. cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
  156. /* Initialize the base environment -- clear FSCR/HFSCR. */
  157. hv_mode = !!(mfmsr() & MSR_HV);
  158. if (hv_mode) {
  159. /* CPU_FTR_HVMODE is used early in PACA setup */
  160. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  161. mtspr(SPRN_HFSCR, 0);
  162. }
  163. mtspr(SPRN_FSCR, 0);
  164. /*
  165. * LPCR does not get cleared, to match behaviour with secondaries
  166. * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
  167. * could clear LPCR too.
  168. */
  169. }
  170. static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
  171. {
  172. if (f->hv_support == HV_SUPPORT_NONE) {
  173. } else if (f->hv_support & HV_SUPPORT_HFSCR) {
  174. u64 hfscr = mfspr(SPRN_HFSCR);
  175. hfscr |= 1UL << f->hfscr_bit_nr;
  176. mtspr(SPRN_HFSCR, hfscr);
  177. } else {
  178. /* Does not have a known recipe */
  179. return 0;
  180. }
  181. if (f->os_support == OS_SUPPORT_NONE) {
  182. } else if (f->os_support & OS_SUPPORT_FSCR) {
  183. u64 fscr = mfspr(SPRN_FSCR);
  184. fscr |= 1UL << f->fscr_bit_nr;
  185. mtspr(SPRN_FSCR, fscr);
  186. } else {
  187. /* Does not have a known recipe */
  188. return 0;
  189. }
  190. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  191. uint32_t word = f->hwcap_bit_nr / 32;
  192. uint32_t bit = f->hwcap_bit_nr % 32;
  193. if (word == 0)
  194. cur_cpu_spec->cpu_user_features |= 1U << bit;
  195. else if (word == 1)
  196. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  197. else
  198. pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
  199. }
  200. return 1;
  201. }
  202. static int __init feat_enable(struct dt_cpu_feature *f)
  203. {
  204. if (f->hv_support != HV_SUPPORT_NONE) {
  205. if (f->hfscr_bit_nr != -1) {
  206. u64 hfscr = mfspr(SPRN_HFSCR);
  207. hfscr |= 1UL << f->hfscr_bit_nr;
  208. mtspr(SPRN_HFSCR, hfscr);
  209. }
  210. }
  211. if (f->os_support != OS_SUPPORT_NONE) {
  212. if (f->fscr_bit_nr != -1) {
  213. u64 fscr = mfspr(SPRN_FSCR);
  214. fscr |= 1UL << f->fscr_bit_nr;
  215. mtspr(SPRN_FSCR, fscr);
  216. }
  217. }
  218. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  219. uint32_t word = f->hwcap_bit_nr / 32;
  220. uint32_t bit = f->hwcap_bit_nr % 32;
  221. if (word == 0)
  222. cur_cpu_spec->cpu_user_features |= 1U << bit;
  223. else if (word == 1)
  224. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  225. else
  226. pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
  227. }
  228. return 1;
  229. }
  230. static int __init feat_disable(struct dt_cpu_feature *f)
  231. {
  232. return 0;
  233. }
  234. static int __init feat_enable_hv(struct dt_cpu_feature *f)
  235. {
  236. u64 lpcr;
  237. if (!hv_mode) {
  238. pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
  239. return 0;
  240. }
  241. mtspr(SPRN_LPID, 0);
  242. lpcr = mfspr(SPRN_LPCR);
  243. lpcr &= ~LPCR_LPES0; /* HV external interrupts */
  244. mtspr(SPRN_LPCR, lpcr);
  245. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  246. return 1;
  247. }
  248. static int __init feat_enable_le(struct dt_cpu_feature *f)
  249. {
  250. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
  251. return 1;
  252. }
  253. static int __init feat_enable_smt(struct dt_cpu_feature *f)
  254. {
  255. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  256. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
  257. return 1;
  258. }
  259. static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
  260. {
  261. u64 lpcr;
  262. /* Set PECE wakeup modes for ISA 207 */
  263. lpcr = mfspr(SPRN_LPCR);
  264. lpcr |= LPCR_PECE0;
  265. lpcr |= LPCR_PECE1;
  266. lpcr |= LPCR_PECE2;
  267. mtspr(SPRN_LPCR, lpcr);
  268. return 1;
  269. }
  270. static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
  271. {
  272. cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
  273. return 1;
  274. }
  275. static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
  276. {
  277. u64 lpcr;
  278. /* Set PECE wakeup modes for ISAv3.0B */
  279. lpcr = mfspr(SPRN_LPCR);
  280. lpcr |= LPCR_PECE0;
  281. lpcr |= LPCR_PECE1;
  282. lpcr |= LPCR_PECE2;
  283. mtspr(SPRN_LPCR, lpcr);
  284. return 1;
  285. }
  286. static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
  287. {
  288. u64 lpcr;
  289. lpcr = mfspr(SPRN_LPCR);
  290. lpcr &= ~LPCR_ISL;
  291. /* VRMASD */
  292. lpcr |= LPCR_VPM0;
  293. lpcr &= ~LPCR_VPM1;
  294. lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
  295. mtspr(SPRN_LPCR, lpcr);
  296. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  297. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  298. return 1;
  299. }
  300. static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
  301. {
  302. u64 lpcr;
  303. lpcr = mfspr(SPRN_LPCR);
  304. lpcr &= ~LPCR_ISL;
  305. mtspr(SPRN_LPCR, lpcr);
  306. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  307. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  308. return 1;
  309. }
  310. static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
  311. {
  312. #ifdef CONFIG_PPC_RADIX_MMU
  313. cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
  314. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  315. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  316. return 1;
  317. #endif
  318. return 0;
  319. }
  320. static int __init feat_enable_dscr(struct dt_cpu_feature *f)
  321. {
  322. u64 lpcr;
  323. feat_enable(f);
  324. lpcr = mfspr(SPRN_LPCR);
  325. lpcr &= ~LPCR_DPFD;
  326. lpcr |= (4UL << LPCR_DPFD_SH);
  327. mtspr(SPRN_LPCR, lpcr);
  328. return 1;
  329. }
  330. static void hfscr_pmu_enable(void)
  331. {
  332. u64 hfscr = mfspr(SPRN_HFSCR);
  333. hfscr |= PPC_BIT(60);
  334. mtspr(SPRN_HFSCR, hfscr);
  335. }
  336. static void init_pmu_power8(void)
  337. {
  338. if (hv_mode) {
  339. mtspr(SPRN_MMCRC, 0);
  340. mtspr(SPRN_MMCRH, 0);
  341. }
  342. mtspr(SPRN_MMCRA, 0);
  343. mtspr(SPRN_MMCR0, 0);
  344. mtspr(SPRN_MMCR1, 0);
  345. mtspr(SPRN_MMCR2, 0);
  346. mtspr(SPRN_MMCRS, 0);
  347. }
  348. static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
  349. {
  350. cur_cpu_spec->platform = "power8";
  351. cur_cpu_spec->flush_tlb = __flush_tlb_power8;
  352. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
  353. return 1;
  354. }
  355. static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
  356. {
  357. hfscr_pmu_enable();
  358. init_pmu_power8();
  359. init_pmu_registers = init_pmu_power8;
  360. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  361. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  362. if (pvr_version_is(PVR_POWER8E))
  363. cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
  364. cur_cpu_spec->num_pmcs = 6;
  365. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  366. cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
  367. return 1;
  368. }
  369. static void init_pmu_power9(void)
  370. {
  371. if (hv_mode)
  372. mtspr(SPRN_MMCRC, 0);
  373. mtspr(SPRN_MMCRA, 0);
  374. mtspr(SPRN_MMCR0, 0);
  375. mtspr(SPRN_MMCR1, 0);
  376. mtspr(SPRN_MMCR2, 0);
  377. }
  378. static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
  379. {
  380. cur_cpu_spec->platform = "power9";
  381. cur_cpu_spec->flush_tlb = __flush_tlb_power9;
  382. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
  383. return 1;
  384. }
  385. static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
  386. {
  387. hfscr_pmu_enable();
  388. init_pmu_power9();
  389. init_pmu_registers = init_pmu_power9;
  390. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  391. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  392. cur_cpu_spec->num_pmcs = 6;
  393. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  394. cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
  395. return 1;
  396. }
  397. static int __init feat_enable_tm(struct dt_cpu_feature *f)
  398. {
  399. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  400. feat_enable(f);
  401. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
  402. return 1;
  403. #endif
  404. return 0;
  405. }
  406. static int __init feat_enable_fp(struct dt_cpu_feature *f)
  407. {
  408. feat_enable(f);
  409. cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
  410. return 1;
  411. }
  412. static int __init feat_enable_vector(struct dt_cpu_feature *f)
  413. {
  414. #ifdef CONFIG_ALTIVEC
  415. feat_enable(f);
  416. cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
  417. cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
  418. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
  419. return 1;
  420. #endif
  421. return 0;
  422. }
  423. static int __init feat_enable_vsx(struct dt_cpu_feature *f)
  424. {
  425. #ifdef CONFIG_VSX
  426. feat_enable(f);
  427. cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
  428. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
  429. return 1;
  430. #endif
  431. return 0;
  432. }
  433. static int __init feat_enable_purr(struct dt_cpu_feature *f)
  434. {
  435. cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
  436. return 1;
  437. }
  438. static int __init feat_enable_ebb(struct dt_cpu_feature *f)
  439. {
  440. /*
  441. * PPC_FEATURE2_EBB is enabled in PMU init code because it has
  442. * historically been related to the PMU facility. This may have
  443. * to be decoupled if EBB becomes more generic. For now, follow
  444. * existing convention.
  445. */
  446. f->hwcap_bit_nr = -1;
  447. feat_enable(f);
  448. return 1;
  449. }
  450. static int __init feat_enable_dbell(struct dt_cpu_feature *f)
  451. {
  452. u64 lpcr;
  453. /* P9 has an HFSCR for privileged state */
  454. feat_enable(f);
  455. cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
  456. lpcr = mfspr(SPRN_LPCR);
  457. lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
  458. mtspr(SPRN_LPCR, lpcr);
  459. return 1;
  460. }
  461. static int __init feat_enable_hvi(struct dt_cpu_feature *f)
  462. {
  463. u64 lpcr;
  464. /*
  465. * POWER9 XIVE interrupts including in OPAL XICS compatibility
  466. * are always delivered as hypervisor virtualization interrupts (HVI)
  467. * rather than EE.
  468. *
  469. * However LPES0 is not set here, in the chance that an EE does get
  470. * delivered to the host somehow, the EE handler would not expect it
  471. * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
  472. * happen if there is a bug in interrupt controller code, or IC is
  473. * misconfigured in systemsim.
  474. */
  475. lpcr = mfspr(SPRN_LPCR);
  476. lpcr |= LPCR_HVICE; /* enable hvi interrupts */
  477. lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
  478. lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
  479. mtspr(SPRN_LPCR, lpcr);
  480. return 1;
  481. }
  482. static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
  483. {
  484. cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
  485. return 1;
  486. }
  487. struct dt_cpu_feature_match {
  488. const char *name;
  489. int (*enable)(struct dt_cpu_feature *f);
  490. u64 cpu_ftr_bit_mask;
  491. };
  492. static struct dt_cpu_feature_match __initdata
  493. dt_cpu_feature_match_table[] = {
  494. {"hypervisor", feat_enable_hv, 0},
  495. {"big-endian", feat_enable, 0},
  496. {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
  497. {"smt", feat_enable_smt, 0},
  498. {"interrupt-facilities", feat_enable, 0},
  499. {"timer-facilities", feat_enable, 0},
  500. {"timer-facilities-v3", feat_enable, 0},
  501. {"debug-facilities", feat_enable, 0},
  502. {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
  503. {"branch-tracing", feat_enable, 0},
  504. {"floating-point", feat_enable_fp, 0},
  505. {"vector", feat_enable_vector, 0},
  506. {"vector-scalar", feat_enable_vsx, 0},
  507. {"vector-scalar-v3", feat_enable, 0},
  508. {"decimal-floating-point", feat_enable, 0},
  509. {"decimal-integer", feat_enable, 0},
  510. {"quadword-load-store", feat_enable, 0},
  511. {"vector-crypto", feat_enable, 0},
  512. {"mmu-hash", feat_enable_mmu_hash, 0},
  513. {"mmu-radix", feat_enable_mmu_radix, 0},
  514. {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
  515. {"virtual-page-class-key-protection", feat_enable, 0},
  516. {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
  517. {"transactional-memory-v3", feat_enable_tm, 0},
  518. {"idle-nap", feat_enable_idle_nap, 0},
  519. {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
  520. {"idle-stop", feat_enable_idle_stop, 0},
  521. {"machine-check-power8", feat_enable_mce_power8, 0},
  522. {"performance-monitor-power8", feat_enable_pmu_power8, 0},
  523. {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
  524. {"event-based-branch", feat_enable_ebb, 0},
  525. {"target-address-register", feat_enable, 0},
  526. {"branch-history-rolling-buffer", feat_enable, 0},
  527. {"control-register", feat_enable, CPU_FTR_CTRL},
  528. {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
  529. {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
  530. {"processor-utilization-of-resources-register", feat_enable_purr, 0},
  531. {"no-execute", feat_enable, 0},
  532. {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
  533. {"cache-inhibited-large-page", feat_enable_large_ci, 0},
  534. {"coprocessor-icswx", feat_enable, CPU_FTR_ICSWX},
  535. {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
  536. {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
  537. {"wait", feat_enable, 0},
  538. {"atomic-memory-operations", feat_enable, 0},
  539. {"branch-v3", feat_enable, 0},
  540. {"copy-paste", feat_enable, 0},
  541. {"decimal-floating-point-v3", feat_enable, 0},
  542. {"decimal-integer-v3", feat_enable, 0},
  543. {"fixed-point-v3", feat_enable, 0},
  544. {"floating-point-v3", feat_enable, 0},
  545. {"group-start-register", feat_enable, 0},
  546. {"pc-relative-addressing", feat_enable, 0},
  547. {"machine-check-power9", feat_enable_mce_power9, 0},
  548. {"performance-monitor-power9", feat_enable_pmu_power9, 0},
  549. {"event-based-branch-v3", feat_enable, 0},
  550. {"random-number-generator", feat_enable, 0},
  551. {"system-call-vectored", feat_disable, 0},
  552. {"trace-interrupt-v3", feat_enable, 0},
  553. {"vector-v3", feat_enable, 0},
  554. {"vector-binary128", feat_enable, 0},
  555. {"vector-binary16", feat_enable, 0},
  556. {"wait-v3", feat_enable, 0},
  557. };
  558. static bool __initdata using_dt_cpu_ftrs;
  559. static bool __initdata enable_unknown = true;
  560. static int __init dt_cpu_ftrs_parse(char *str)
  561. {
  562. if (!str)
  563. return 0;
  564. if (!strcmp(str, "off"))
  565. using_dt_cpu_ftrs = false;
  566. else if (!strcmp(str, "known"))
  567. enable_unknown = false;
  568. else
  569. return 1;
  570. return 0;
  571. }
  572. early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
  573. static void __init cpufeatures_setup_start(u32 isa)
  574. {
  575. pr_info("setup for ISA %d\n", isa);
  576. if (isa >= 3000) {
  577. cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
  578. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
  579. }
  580. }
  581. static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
  582. {
  583. const struct dt_cpu_feature_match *m;
  584. bool known = false;
  585. int i;
  586. for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
  587. m = &dt_cpu_feature_match_table[i];
  588. if (!strcmp(f->name, m->name)) {
  589. known = true;
  590. if (m->enable(f))
  591. break;
  592. pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
  593. f->name);
  594. return false;
  595. }
  596. }
  597. if (!known && enable_unknown) {
  598. if (!feat_try_enable_unknown(f)) {
  599. pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
  600. f->name);
  601. return false;
  602. }
  603. }
  604. if (m->cpu_ftr_bit_mask)
  605. cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
  606. if (known)
  607. pr_debug("enabling: %s\n", f->name);
  608. else
  609. pr_debug("enabling: %s (unknown)\n", f->name);
  610. return true;
  611. }
  612. static __init void cpufeatures_cpu_quirks(void)
  613. {
  614. int version = mfspr(SPRN_PVR);
  615. /*
  616. * Not all quirks can be derived from the cpufeatures device tree.
  617. */
  618. if ((version & 0xffffff00) == 0x004e0100)
  619. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
  620. }
  621. static void __init cpufeatures_setup_finished(void)
  622. {
  623. cpufeatures_cpu_quirks();
  624. if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
  625. pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
  626. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  627. }
  628. system_registers.lpcr = mfspr(SPRN_LPCR);
  629. system_registers.hfscr = mfspr(SPRN_HFSCR);
  630. system_registers.fscr = mfspr(SPRN_FSCR);
  631. cpufeatures_flush_tlb();
  632. pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
  633. cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
  634. }
  635. static int __init disabled_on_cmdline(void)
  636. {
  637. unsigned long root, chosen;
  638. const char *p;
  639. root = of_get_flat_dt_root();
  640. chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
  641. if (chosen == -FDT_ERR_NOTFOUND)
  642. return false;
  643. p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
  644. if (!p)
  645. return false;
  646. if (strstr(p, "dt_cpu_ftrs=off"))
  647. return true;
  648. return false;
  649. }
  650. static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
  651. int depth, void *data)
  652. {
  653. if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
  654. && of_get_flat_dt_prop(node, "isa", NULL))
  655. return 1;
  656. return 0;
  657. }
  658. bool __init dt_cpu_ftrs_in_use(void)
  659. {
  660. return using_dt_cpu_ftrs;
  661. }
  662. bool __init dt_cpu_ftrs_init(void *fdt)
  663. {
  664. using_dt_cpu_ftrs = false;
  665. /* Setup and verify the FDT, if it fails we just bail */
  666. if (!early_init_dt_verify(fdt))
  667. return false;
  668. if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
  669. return false;
  670. if (disabled_on_cmdline())
  671. return false;
  672. cpufeatures_setup_cpu();
  673. using_dt_cpu_ftrs = true;
  674. return true;
  675. }
  676. static int nr_dt_cpu_features;
  677. static struct dt_cpu_feature *dt_cpu_features;
  678. static int __init process_cpufeatures_node(unsigned long node,
  679. const char *uname, int i)
  680. {
  681. const __be32 *prop;
  682. struct dt_cpu_feature *f;
  683. int len;
  684. f = &dt_cpu_features[i];
  685. memset(f, 0, sizeof(struct dt_cpu_feature));
  686. f->node = node;
  687. f->name = uname;
  688. prop = of_get_flat_dt_prop(node, "isa", &len);
  689. if (!prop) {
  690. pr_warn("%s: missing isa property\n", uname);
  691. return 0;
  692. }
  693. f->isa = be32_to_cpup(prop);
  694. prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
  695. if (!prop) {
  696. pr_warn("%s: missing usable-privilege property", uname);
  697. return 0;
  698. }
  699. f->usable_privilege = be32_to_cpup(prop);
  700. prop = of_get_flat_dt_prop(node, "hv-support", &len);
  701. if (prop)
  702. f->hv_support = be32_to_cpup(prop);
  703. else
  704. f->hv_support = HV_SUPPORT_NONE;
  705. prop = of_get_flat_dt_prop(node, "os-support", &len);
  706. if (prop)
  707. f->os_support = be32_to_cpup(prop);
  708. else
  709. f->os_support = OS_SUPPORT_NONE;
  710. prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
  711. if (prop)
  712. f->hfscr_bit_nr = be32_to_cpup(prop);
  713. else
  714. f->hfscr_bit_nr = -1;
  715. prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
  716. if (prop)
  717. f->fscr_bit_nr = be32_to_cpup(prop);
  718. else
  719. f->fscr_bit_nr = -1;
  720. prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
  721. if (prop)
  722. f->hwcap_bit_nr = be32_to_cpup(prop);
  723. else
  724. f->hwcap_bit_nr = -1;
  725. if (f->usable_privilege & USABLE_HV) {
  726. if (!(mfmsr() & MSR_HV)) {
  727. pr_warn("%s: HV feature passed to guest\n", uname);
  728. return 0;
  729. }
  730. if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
  731. pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
  732. return 0;
  733. }
  734. if (f->hv_support == HV_SUPPORT_HFSCR) {
  735. if (f->hfscr_bit_nr == -1) {
  736. pr_warn("%s: missing hfscr_bit_nr\n", uname);
  737. return 0;
  738. }
  739. }
  740. } else {
  741. if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
  742. pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
  743. return 0;
  744. }
  745. }
  746. if (f->usable_privilege & USABLE_OS) {
  747. if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
  748. pr_warn("%s: unwanted fscr_bit_nr\n", uname);
  749. return 0;
  750. }
  751. if (f->os_support == OS_SUPPORT_FSCR) {
  752. if (f->fscr_bit_nr == -1) {
  753. pr_warn("%s: missing fscr_bit_nr\n", uname);
  754. return 0;
  755. }
  756. }
  757. } else {
  758. if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
  759. pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
  760. return 0;
  761. }
  762. }
  763. if (!(f->usable_privilege & USABLE_PR)) {
  764. if (f->hwcap_bit_nr != -1) {
  765. pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
  766. return 0;
  767. }
  768. }
  769. /* Do all the independent features in the first pass */
  770. if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
  771. if (cpufeatures_process_feature(f))
  772. f->enabled = 1;
  773. else
  774. f->disabled = 1;
  775. }
  776. return 0;
  777. }
  778. static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
  779. {
  780. const __be32 *prop;
  781. int len;
  782. int nr_deps;
  783. int i;
  784. if (f->enabled || f->disabled)
  785. return;
  786. prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
  787. if (!prop) {
  788. pr_warn("%s: missing dependencies property", f->name);
  789. return;
  790. }
  791. nr_deps = len / sizeof(int);
  792. for (i = 0; i < nr_deps; i++) {
  793. unsigned long phandle = be32_to_cpu(prop[i]);
  794. int j;
  795. for (j = 0; j < nr_dt_cpu_features; j++) {
  796. struct dt_cpu_feature *d = &dt_cpu_features[j];
  797. if (of_get_flat_dt_phandle(d->node) == phandle) {
  798. cpufeatures_deps_enable(d);
  799. if (d->disabled) {
  800. f->disabled = 1;
  801. return;
  802. }
  803. }
  804. }
  805. }
  806. if (cpufeatures_process_feature(f))
  807. f->enabled = 1;
  808. else
  809. f->disabled = 1;
  810. }
  811. static int __init scan_cpufeatures_subnodes(unsigned long node,
  812. const char *uname,
  813. void *data)
  814. {
  815. int *count = data;
  816. process_cpufeatures_node(node, uname, *count);
  817. (*count)++;
  818. return 0;
  819. }
  820. static int __init count_cpufeatures_subnodes(unsigned long node,
  821. const char *uname,
  822. void *data)
  823. {
  824. int *count = data;
  825. (*count)++;
  826. return 0;
  827. }
  828. static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
  829. *uname, int depth, void *data)
  830. {
  831. const __be32 *prop;
  832. int count, i;
  833. u32 isa;
  834. /* We are scanning "ibm,powerpc-cpu-features" nodes only */
  835. if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
  836. return 0;
  837. prop = of_get_flat_dt_prop(node, "isa", NULL);
  838. if (!prop)
  839. /* We checked before, "can't happen" */
  840. return 0;
  841. isa = be32_to_cpup(prop);
  842. /* Count and allocate space for cpu features */
  843. of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
  844. &nr_dt_cpu_features);
  845. dt_cpu_features = __va(
  846. memblock_alloc(sizeof(struct dt_cpu_feature)*
  847. nr_dt_cpu_features, PAGE_SIZE));
  848. cpufeatures_setup_start(isa);
  849. /* Scan nodes into dt_cpu_features and enable those without deps */
  850. count = 0;
  851. of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
  852. /* Recursive enable remaining features with dependencies */
  853. for (i = 0; i < nr_dt_cpu_features; i++) {
  854. struct dt_cpu_feature *f = &dt_cpu_features[i];
  855. cpufeatures_deps_enable(f);
  856. }
  857. prop = of_get_flat_dt_prop(node, "display-name", NULL);
  858. if (prop && strlen((char *)prop) != 0) {
  859. strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
  860. cur_cpu_spec->cpu_name = dt_cpu_name;
  861. }
  862. cpufeatures_setup_finished();
  863. memblock_free(__pa(dt_cpu_features),
  864. sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
  865. return 0;
  866. }
  867. void __init dt_cpu_ftrs_scan(void)
  868. {
  869. if (!using_dt_cpu_ftrs)
  870. return;
  871. of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
  872. }