dt_cpu_ftrs.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /*
  2. * Copyright 2017, Nicholas Piggin, IBM Corporation
  3. * Licensed under GPLv2.
  4. */
  5. #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/jump_label.h>
  9. #include <linux/memblock.h>
  10. #include <linux/printk.h>
  11. #include <linux/sched.h>
  12. #include <linux/string.h>
  13. #include <linux/threads.h>
  14. #include <asm/cputable.h>
  15. #include <asm/dt_cpu_ftrs.h>
  16. #include <asm/mmu.h>
  17. #include <asm/oprofile_impl.h>
  18. #include <asm/prom.h>
  19. #include <asm/setup.h>
  20. /* Device-tree visible constants follow */
  21. #define ISA_V2_07B 2070
  22. #define ISA_V3_0B 3000
  23. #define USABLE_PR (1U << 0)
  24. #define USABLE_OS (1U << 1)
  25. #define USABLE_HV (1U << 2)
  26. #define HV_SUPPORT_HFSCR (1U << 0)
  27. #define OS_SUPPORT_FSCR (1U << 0)
  28. /* For parsing, we define all bits set as "NONE" case */
  29. #define HV_SUPPORT_NONE 0xffffffffU
  30. #define OS_SUPPORT_NONE 0xffffffffU
  31. struct dt_cpu_feature {
  32. const char *name;
  33. uint32_t isa;
  34. uint32_t usable_privilege;
  35. uint32_t hv_support;
  36. uint32_t os_support;
  37. uint32_t hfscr_bit_nr;
  38. uint32_t fscr_bit_nr;
  39. uint32_t hwcap_bit_nr;
  40. /* fdt parsing */
  41. unsigned long node;
  42. int enabled;
  43. int disabled;
  44. };
  45. #define CPU_FTRS_BASE \
  46. (CPU_FTR_USE_TB | \
  47. CPU_FTR_LWSYNC | \
  48. CPU_FTR_FPU_UNAVAILABLE |\
  49. CPU_FTR_NODSISRALIGN |\
  50. CPU_FTR_NOEXECUTE |\
  51. CPU_FTR_COHERENT_ICACHE | \
  52. CPU_FTR_STCX_CHECKS_ADDRESS |\
  53. CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
  54. CPU_FTR_DAWR | \
  55. CPU_FTR_ARCH_206 |\
  56. CPU_FTR_ARCH_207S)
  57. #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
  58. #define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
  59. PPC_FEATURE_ARCH_2_06 |\
  60. PPC_FEATURE_ICACHE_SNOOP)
  61. #define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
  62. PPC_FEATURE2_ISEL)
  63. /*
  64. * Set up the base CPU
  65. */
  66. extern void __flush_tlb_power8(unsigned int action);
  67. extern void __flush_tlb_power9(unsigned int action);
  68. extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
  69. extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
  70. static int hv_mode;
  71. static struct {
  72. u64 lpcr;
  73. u64 hfscr;
  74. u64 fscr;
  75. } system_registers;
  76. static void (*init_pmu_registers)(void);
  77. static void cpufeatures_flush_tlb(void)
  78. {
  79. unsigned long rb;
  80. unsigned int i, num_sets;
  81. /*
  82. * This is a temporary measure to keep equivalent TLB flush as the
  83. * cputable based setup code.
  84. */
  85. switch (PVR_VER(mfspr(SPRN_PVR))) {
  86. case PVR_POWER8:
  87. case PVR_POWER8E:
  88. case PVR_POWER8NVL:
  89. num_sets = POWER8_TLB_SETS;
  90. break;
  91. case PVR_POWER9:
  92. num_sets = POWER9_TLB_SETS_HASH;
  93. break;
  94. default:
  95. num_sets = 1;
  96. pr_err("unknown CPU version for boot TLB flush\n");
  97. break;
  98. }
  99. asm volatile("ptesync" : : : "memory");
  100. rb = TLBIEL_INVAL_SET;
  101. for (i = 0; i < num_sets; i++) {
  102. asm volatile("tlbiel %0" : : "r" (rb));
  103. rb += 1 << TLBIEL_INVAL_SET_SHIFT;
  104. }
  105. asm volatile("ptesync" : : : "memory");
  106. }
  107. static void __restore_cpu_cpufeatures(void)
  108. {
  109. /*
  110. * LPCR is restored by the power on engine already. It can be changed
  111. * after early init e.g., by radix enable, and we have no unified API
  112. * for saving and restoring such SPRs.
  113. *
  114. * This ->restore hook should really be removed from idle and register
  115. * restore moved directly into the idle restore code, because this code
  116. * doesn't know how idle is implemented or what it needs restored here.
  117. *
  118. * The best we can do to accommodate secondary boot and idle restore
  119. * for now is "or" LPCR with existing.
  120. */
  121. mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
  122. if (hv_mode) {
  123. mtspr(SPRN_LPID, 0);
  124. mtspr(SPRN_HFSCR, system_registers.hfscr);
  125. }
  126. mtspr(SPRN_FSCR, system_registers.fscr);
  127. if (init_pmu_registers)
  128. init_pmu_registers();
  129. cpufeatures_flush_tlb();
  130. }
  131. static char dt_cpu_name[64];
  132. static struct cpu_spec __initdata base_cpu_spec = {
  133. .cpu_name = NULL,
  134. .cpu_features = CPU_FTRS_BASE,
  135. .cpu_user_features = COMMON_USER_BASE,
  136. .cpu_user_features2 = COMMON_USER2_BASE,
  137. .mmu_features = 0,
  138. .icache_bsize = 32, /* minimum block size, fixed by */
  139. .dcache_bsize = 32, /* cache info init. */
  140. .num_pmcs = 0,
  141. .pmc_type = PPC_PMC_DEFAULT,
  142. .oprofile_cpu_type = NULL,
  143. .oprofile_type = PPC_OPROFILE_INVALID,
  144. .cpu_setup = NULL,
  145. .cpu_restore = __restore_cpu_cpufeatures,
  146. .flush_tlb = NULL,
  147. .machine_check_early = NULL,
  148. .platform = NULL,
  149. };
  150. static void __init cpufeatures_setup_cpu(void)
  151. {
  152. set_cur_cpu_spec(&base_cpu_spec);
  153. cur_cpu_spec->pvr_mask = -1;
  154. cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
  155. /* Initialize the base environment -- clear FSCR/HFSCR. */
  156. hv_mode = !!(mfmsr() & MSR_HV);
  157. if (hv_mode) {
  158. /* CPU_FTR_HVMODE is used early in PACA setup */
  159. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  160. mtspr(SPRN_HFSCR, 0);
  161. }
  162. mtspr(SPRN_FSCR, 0);
  163. /*
  164. * LPCR does not get cleared, to match behaviour with secondaries
  165. * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
  166. * could clear LPCR too.
  167. */
  168. }
  169. static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
  170. {
  171. if (f->hv_support == HV_SUPPORT_NONE) {
  172. } else if (f->hv_support & HV_SUPPORT_HFSCR) {
  173. u64 hfscr = mfspr(SPRN_HFSCR);
  174. hfscr |= 1UL << f->hfscr_bit_nr;
  175. mtspr(SPRN_HFSCR, hfscr);
  176. } else {
  177. /* Does not have a known recipe */
  178. return 0;
  179. }
  180. if (f->os_support == OS_SUPPORT_NONE) {
  181. } else if (f->os_support & OS_SUPPORT_FSCR) {
  182. u64 fscr = mfspr(SPRN_FSCR);
  183. fscr |= 1UL << f->fscr_bit_nr;
  184. mtspr(SPRN_FSCR, fscr);
  185. } else {
  186. /* Does not have a known recipe */
  187. return 0;
  188. }
  189. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  190. uint32_t word = f->hwcap_bit_nr / 32;
  191. uint32_t bit = f->hwcap_bit_nr % 32;
  192. if (word == 0)
  193. cur_cpu_spec->cpu_user_features |= 1U << bit;
  194. else if (word == 1)
  195. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  196. else
  197. pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
  198. }
  199. return 1;
  200. }
  201. static int __init feat_enable(struct dt_cpu_feature *f)
  202. {
  203. if (f->hv_support != HV_SUPPORT_NONE) {
  204. if (f->hfscr_bit_nr != -1) {
  205. u64 hfscr = mfspr(SPRN_HFSCR);
  206. hfscr |= 1UL << f->hfscr_bit_nr;
  207. mtspr(SPRN_HFSCR, hfscr);
  208. }
  209. }
  210. if (f->os_support != OS_SUPPORT_NONE) {
  211. if (f->fscr_bit_nr != -1) {
  212. u64 fscr = mfspr(SPRN_FSCR);
  213. fscr |= 1UL << f->fscr_bit_nr;
  214. mtspr(SPRN_FSCR, fscr);
  215. }
  216. }
  217. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  218. uint32_t word = f->hwcap_bit_nr / 32;
  219. uint32_t bit = f->hwcap_bit_nr % 32;
  220. if (word == 0)
  221. cur_cpu_spec->cpu_user_features |= 1U << bit;
  222. else if (word == 1)
  223. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  224. else
  225. pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
  226. }
  227. return 1;
  228. }
  229. static int __init feat_disable(struct dt_cpu_feature *f)
  230. {
  231. return 0;
  232. }
  233. static int __init feat_enable_hv(struct dt_cpu_feature *f)
  234. {
  235. u64 lpcr;
  236. if (!hv_mode) {
  237. pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
  238. return 0;
  239. }
  240. mtspr(SPRN_LPID, 0);
  241. lpcr = mfspr(SPRN_LPCR);
  242. lpcr &= ~LPCR_LPES0; /* HV external interrupts */
  243. mtspr(SPRN_LPCR, lpcr);
  244. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  245. return 1;
  246. }
  247. static int __init feat_enable_le(struct dt_cpu_feature *f)
  248. {
  249. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
  250. return 1;
  251. }
  252. static int __init feat_enable_smt(struct dt_cpu_feature *f)
  253. {
  254. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  255. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
  256. return 1;
  257. }
  258. static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
  259. {
  260. u64 lpcr;
  261. /* Set PECE wakeup modes for ISA 207 */
  262. lpcr = mfspr(SPRN_LPCR);
  263. lpcr |= LPCR_PECE0;
  264. lpcr |= LPCR_PECE1;
  265. lpcr |= LPCR_PECE2;
  266. mtspr(SPRN_LPCR, lpcr);
  267. return 1;
  268. }
  269. static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
  270. {
  271. cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
  272. return 1;
  273. }
  274. static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
  275. {
  276. u64 lpcr;
  277. /* Set PECE wakeup modes for ISAv3.0B */
  278. lpcr = mfspr(SPRN_LPCR);
  279. lpcr |= LPCR_PECE0;
  280. lpcr |= LPCR_PECE1;
  281. lpcr |= LPCR_PECE2;
  282. mtspr(SPRN_LPCR, lpcr);
  283. return 1;
  284. }
  285. static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
  286. {
  287. u64 lpcr;
  288. lpcr = mfspr(SPRN_LPCR);
  289. lpcr &= ~LPCR_ISL;
  290. /* VRMASD */
  291. lpcr |= LPCR_VPM0;
  292. lpcr &= ~LPCR_VPM1;
  293. lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
  294. mtspr(SPRN_LPCR, lpcr);
  295. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  296. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  297. return 1;
  298. }
  299. static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
  300. {
  301. u64 lpcr;
  302. lpcr = mfspr(SPRN_LPCR);
  303. lpcr &= ~LPCR_ISL;
  304. mtspr(SPRN_LPCR, lpcr);
  305. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  306. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  307. return 1;
  308. }
  309. static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
  310. {
  311. #ifdef CONFIG_PPC_RADIX_MMU
  312. cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
  313. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  314. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  315. return 1;
  316. #endif
  317. return 0;
  318. }
  319. static int __init feat_enable_dscr(struct dt_cpu_feature *f)
  320. {
  321. u64 lpcr;
  322. feat_enable(f);
  323. lpcr = mfspr(SPRN_LPCR);
  324. lpcr &= ~LPCR_DPFD;
  325. lpcr |= (4UL << LPCR_DPFD_SH);
  326. mtspr(SPRN_LPCR, lpcr);
  327. return 1;
  328. }
  329. static void hfscr_pmu_enable(void)
  330. {
  331. u64 hfscr = mfspr(SPRN_HFSCR);
  332. hfscr |= PPC_BIT(60);
  333. mtspr(SPRN_HFSCR, hfscr);
  334. }
  335. static void init_pmu_power8(void)
  336. {
  337. if (hv_mode) {
  338. mtspr(SPRN_MMCRC, 0);
  339. mtspr(SPRN_MMCRH, 0);
  340. }
  341. mtspr(SPRN_MMCRA, 0);
  342. mtspr(SPRN_MMCR0, 0);
  343. mtspr(SPRN_MMCR1, 0);
  344. mtspr(SPRN_MMCR2, 0);
  345. mtspr(SPRN_MMCRS, 0);
  346. }
  347. static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
  348. {
  349. cur_cpu_spec->platform = "power8";
  350. cur_cpu_spec->flush_tlb = __flush_tlb_power8;
  351. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
  352. return 1;
  353. }
  354. static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
  355. {
  356. hfscr_pmu_enable();
  357. init_pmu_power8();
  358. init_pmu_registers = init_pmu_power8;
  359. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  360. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  361. if (pvr_version_is(PVR_POWER8E))
  362. cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
  363. cur_cpu_spec->num_pmcs = 6;
  364. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  365. cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
  366. return 1;
  367. }
  368. static void init_pmu_power9(void)
  369. {
  370. if (hv_mode)
  371. mtspr(SPRN_MMCRC, 0);
  372. mtspr(SPRN_MMCRA, 0);
  373. mtspr(SPRN_MMCR0, 0);
  374. mtspr(SPRN_MMCR1, 0);
  375. mtspr(SPRN_MMCR2, 0);
  376. }
  377. static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
  378. {
  379. cur_cpu_spec->platform = "power9";
  380. cur_cpu_spec->flush_tlb = __flush_tlb_power9;
  381. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
  382. return 1;
  383. }
  384. static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
  385. {
  386. hfscr_pmu_enable();
  387. init_pmu_power9();
  388. init_pmu_registers = init_pmu_power9;
  389. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  390. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  391. cur_cpu_spec->num_pmcs = 6;
  392. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  393. cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
  394. return 1;
  395. }
  396. static int __init feat_enable_tm(struct dt_cpu_feature *f)
  397. {
  398. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  399. feat_enable(f);
  400. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
  401. return 1;
  402. #endif
  403. return 0;
  404. }
  405. static int __init feat_enable_fp(struct dt_cpu_feature *f)
  406. {
  407. feat_enable(f);
  408. cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
  409. return 1;
  410. }
  411. static int __init feat_enable_vector(struct dt_cpu_feature *f)
  412. {
  413. #ifdef CONFIG_ALTIVEC
  414. feat_enable(f);
  415. cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
  416. cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
  417. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
  418. return 1;
  419. #endif
  420. return 0;
  421. }
  422. static int __init feat_enable_vsx(struct dt_cpu_feature *f)
  423. {
  424. #ifdef CONFIG_VSX
  425. feat_enable(f);
  426. cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
  427. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
  428. return 1;
  429. #endif
  430. return 0;
  431. }
  432. static int __init feat_enable_purr(struct dt_cpu_feature *f)
  433. {
  434. cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
  435. return 1;
  436. }
  437. static int __init feat_enable_ebb(struct dt_cpu_feature *f)
  438. {
  439. /*
  440. * PPC_FEATURE2_EBB is enabled in PMU init code because it has
  441. * historically been related to the PMU facility. This may have
  442. * to be decoupled if EBB becomes more generic. For now, follow
  443. * existing convention.
  444. */
  445. f->hwcap_bit_nr = -1;
  446. feat_enable(f);
  447. return 1;
  448. }
  449. static int __init feat_enable_dbell(struct dt_cpu_feature *f)
  450. {
  451. u64 lpcr;
  452. /* P9 has an HFSCR for privileged state */
  453. feat_enable(f);
  454. cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
  455. lpcr = mfspr(SPRN_LPCR);
  456. lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
  457. mtspr(SPRN_LPCR, lpcr);
  458. return 1;
  459. }
  460. static int __init feat_enable_hvi(struct dt_cpu_feature *f)
  461. {
  462. u64 lpcr;
  463. /*
  464. * POWER9 XIVE interrupts including in OPAL XICS compatibility
  465. * are always delivered as hypervisor virtualization interrupts (HVI)
  466. * rather than EE.
  467. *
  468. * However LPES0 is not set here, in the chance that an EE does get
  469. * delivered to the host somehow, the EE handler would not expect it
  470. * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
  471. * happen if there is a bug in interrupt controller code, or IC is
  472. * misconfigured in systemsim.
  473. */
  474. lpcr = mfspr(SPRN_LPCR);
  475. lpcr |= LPCR_HVICE; /* enable hvi interrupts */
  476. lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
  477. lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
  478. mtspr(SPRN_LPCR, lpcr);
  479. return 1;
  480. }
  481. static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
  482. {
  483. cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
  484. return 1;
  485. }
  486. struct dt_cpu_feature_match {
  487. const char *name;
  488. int (*enable)(struct dt_cpu_feature *f);
  489. u64 cpu_ftr_bit_mask;
  490. };
  491. static struct dt_cpu_feature_match __initdata
  492. dt_cpu_feature_match_table[] = {
  493. {"hypervisor", feat_enable_hv, 0},
  494. {"big-endian", feat_enable, 0},
  495. {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
  496. {"smt", feat_enable_smt, 0},
  497. {"interrupt-facilities", feat_enable, 0},
  498. {"timer-facilities", feat_enable, 0},
  499. {"timer-facilities-v3", feat_enable, 0},
  500. {"debug-facilities", feat_enable, 0},
  501. {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
  502. {"branch-tracing", feat_enable, 0},
  503. {"floating-point", feat_enable_fp, 0},
  504. {"vector", feat_enable_vector, 0},
  505. {"vector-scalar", feat_enable_vsx, 0},
  506. {"vector-scalar-v3", feat_enable, 0},
  507. {"decimal-floating-point", feat_enable, 0},
  508. {"decimal-integer", feat_enable, 0},
  509. {"quadword-load-store", feat_enable, 0},
  510. {"vector-crypto", feat_enable, 0},
  511. {"mmu-hash", feat_enable_mmu_hash, 0},
  512. {"mmu-radix", feat_enable_mmu_radix, 0},
  513. {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
  514. {"virtual-page-class-key-protection", feat_enable, 0},
  515. {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
  516. {"transactional-memory-v3", feat_enable_tm, 0},
  517. {"idle-nap", feat_enable_idle_nap, 0},
  518. {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
  519. {"idle-stop", feat_enable_idle_stop, 0},
  520. {"machine-check-power8", feat_enable_mce_power8, 0},
  521. {"performance-monitor-power8", feat_enable_pmu_power8, 0},
  522. {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
  523. {"event-based-branch", feat_enable_ebb, 0},
  524. {"target-address-register", feat_enable, 0},
  525. {"branch-history-rolling-buffer", feat_enable, 0},
  526. {"control-register", feat_enable, CPU_FTR_CTRL},
  527. {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
  528. {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
  529. {"processor-utilization-of-resources-register", feat_enable_purr, 0},
  530. {"subcore", feat_enable, CPU_FTR_SUBCORE},
  531. {"no-execute", feat_enable, 0},
  532. {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
  533. {"cache-inhibited-large-page", feat_enable_large_ci, 0},
  534. {"coprocessor-icswx", feat_enable, CPU_FTR_ICSWX},
  535. {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
  536. {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
  537. {"wait", feat_enable, 0},
  538. {"atomic-memory-operations", feat_enable, 0},
  539. {"branch-v3", feat_enable, 0},
  540. {"copy-paste", feat_enable, 0},
  541. {"decimal-floating-point-v3", feat_enable, 0},
  542. {"decimal-integer-v3", feat_enable, 0},
  543. {"fixed-point-v3", feat_enable, 0},
  544. {"floating-point-v3", feat_enable, 0},
  545. {"group-start-register", feat_enable, 0},
  546. {"pc-relative-addressing", feat_enable, 0},
  547. {"machine-check-power9", feat_enable_mce_power9, 0},
  548. {"performance-monitor-power9", feat_enable_pmu_power9, 0},
  549. {"event-based-branch-v3", feat_enable, 0},
  550. {"random-number-generator", feat_enable, 0},
  551. {"system-call-vectored", feat_disable, 0},
  552. {"trace-interrupt-v3", feat_enable, 0},
  553. {"vector-v3", feat_enable, 0},
  554. {"vector-binary128", feat_enable, 0},
  555. {"vector-binary16", feat_enable, 0},
  556. {"wait-v3", feat_enable, 0},
  557. };
  558. /* XXX: how to configure this? Default + boot time? */
  559. #ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
  560. #define CPU_FEATURE_ENABLE_UNKNOWN 1
  561. #else
  562. #define CPU_FEATURE_ENABLE_UNKNOWN 0
  563. #endif
  564. static void __init cpufeatures_setup_start(u32 isa)
  565. {
  566. pr_info("setup for ISA %d\n", isa);
  567. if (isa >= 3000) {
  568. cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
  569. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
  570. }
  571. }
  572. static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
  573. {
  574. const struct dt_cpu_feature_match *m;
  575. bool known = false;
  576. int i;
  577. for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
  578. m = &dt_cpu_feature_match_table[i];
  579. if (!strcmp(f->name, m->name)) {
  580. known = true;
  581. if (m->enable(f))
  582. break;
  583. pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
  584. f->name);
  585. return false;
  586. }
  587. }
  588. if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
  589. if (!feat_try_enable_unknown(f)) {
  590. pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
  591. f->name);
  592. return false;
  593. }
  594. }
  595. if (m->cpu_ftr_bit_mask)
  596. cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
  597. if (known)
  598. pr_debug("enabling: %s\n", f->name);
  599. else
  600. pr_debug("enabling: %s (unknown)\n", f->name);
  601. return true;
  602. }
  603. static __init void cpufeatures_cpu_quirks(void)
  604. {
  605. int version = mfspr(SPRN_PVR);
  606. /*
  607. * Not all quirks can be derived from the cpufeatures device tree.
  608. */
  609. if ((version & 0xffffff00) == 0x004e0100)
  610. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
  611. }
  612. static void __init cpufeatures_setup_finished(void)
  613. {
  614. cpufeatures_cpu_quirks();
  615. if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
  616. pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
  617. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  618. }
  619. system_registers.lpcr = mfspr(SPRN_LPCR);
  620. system_registers.hfscr = mfspr(SPRN_HFSCR);
  621. system_registers.fscr = mfspr(SPRN_FSCR);
  622. cpufeatures_flush_tlb();
  623. pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
  624. cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
  625. }
  626. static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
  627. int depth, void *data)
  628. {
  629. if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
  630. && of_get_flat_dt_prop(node, "isa", NULL))
  631. return 1;
  632. return 0;
  633. }
  634. static bool __initdata using_dt_cpu_ftrs = false;
  635. bool __init dt_cpu_ftrs_in_use(void)
  636. {
  637. return using_dt_cpu_ftrs;
  638. }
  639. bool __init dt_cpu_ftrs_init(void *fdt)
  640. {
  641. /* Setup and verify the FDT, if it fails we just bail */
  642. if (!early_init_dt_verify(fdt))
  643. return false;
  644. if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
  645. return false;
  646. cpufeatures_setup_cpu();
  647. using_dt_cpu_ftrs = true;
  648. return true;
  649. }
  650. static int nr_dt_cpu_features;
  651. static struct dt_cpu_feature *dt_cpu_features;
  652. static int __init process_cpufeatures_node(unsigned long node,
  653. const char *uname, int i)
  654. {
  655. const __be32 *prop;
  656. struct dt_cpu_feature *f;
  657. int len;
  658. f = &dt_cpu_features[i];
  659. memset(f, 0, sizeof(struct dt_cpu_feature));
  660. f->node = node;
  661. f->name = uname;
  662. prop = of_get_flat_dt_prop(node, "isa", &len);
  663. if (!prop) {
  664. pr_warn("%s: missing isa property\n", uname);
  665. return 0;
  666. }
  667. f->isa = be32_to_cpup(prop);
  668. prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
  669. if (!prop) {
  670. pr_warn("%s: missing usable-privilege property", uname);
  671. return 0;
  672. }
  673. f->usable_privilege = be32_to_cpup(prop);
  674. prop = of_get_flat_dt_prop(node, "hv-support", &len);
  675. if (prop)
  676. f->hv_support = be32_to_cpup(prop);
  677. else
  678. f->hv_support = HV_SUPPORT_NONE;
  679. prop = of_get_flat_dt_prop(node, "os-support", &len);
  680. if (prop)
  681. f->os_support = be32_to_cpup(prop);
  682. else
  683. f->os_support = OS_SUPPORT_NONE;
  684. prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
  685. if (prop)
  686. f->hfscr_bit_nr = be32_to_cpup(prop);
  687. else
  688. f->hfscr_bit_nr = -1;
  689. prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
  690. if (prop)
  691. f->fscr_bit_nr = be32_to_cpup(prop);
  692. else
  693. f->fscr_bit_nr = -1;
  694. prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
  695. if (prop)
  696. f->hwcap_bit_nr = be32_to_cpup(prop);
  697. else
  698. f->hwcap_bit_nr = -1;
  699. if (f->usable_privilege & USABLE_HV) {
  700. if (!(mfmsr() & MSR_HV)) {
  701. pr_warn("%s: HV feature passed to guest\n", uname);
  702. return 0;
  703. }
  704. if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
  705. pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
  706. return 0;
  707. }
  708. if (f->hv_support == HV_SUPPORT_HFSCR) {
  709. if (f->hfscr_bit_nr == -1) {
  710. pr_warn("%s: missing hfscr_bit_nr\n", uname);
  711. return 0;
  712. }
  713. }
  714. } else {
  715. if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
  716. pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
  717. return 0;
  718. }
  719. }
  720. if (f->usable_privilege & USABLE_OS) {
  721. if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
  722. pr_warn("%s: unwanted fscr_bit_nr\n", uname);
  723. return 0;
  724. }
  725. if (f->os_support == OS_SUPPORT_FSCR) {
  726. if (f->fscr_bit_nr == -1) {
  727. pr_warn("%s: missing fscr_bit_nr\n", uname);
  728. return 0;
  729. }
  730. }
  731. } else {
  732. if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
  733. pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
  734. return 0;
  735. }
  736. }
  737. if (!(f->usable_privilege & USABLE_PR)) {
  738. if (f->hwcap_bit_nr != -1) {
  739. pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
  740. return 0;
  741. }
  742. }
  743. /* Do all the independent features in the first pass */
  744. if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
  745. if (cpufeatures_process_feature(f))
  746. f->enabled = 1;
  747. else
  748. f->disabled = 1;
  749. }
  750. return 0;
  751. }
  752. static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
  753. {
  754. const __be32 *prop;
  755. int len;
  756. int nr_deps;
  757. int i;
  758. if (f->enabled || f->disabled)
  759. return;
  760. prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
  761. if (!prop) {
  762. pr_warn("%s: missing dependencies property", f->name);
  763. return;
  764. }
  765. nr_deps = len / sizeof(int);
  766. for (i = 0; i < nr_deps; i++) {
  767. unsigned long phandle = be32_to_cpu(prop[i]);
  768. int j;
  769. for (j = 0; j < nr_dt_cpu_features; j++) {
  770. struct dt_cpu_feature *d = &dt_cpu_features[j];
  771. if (of_get_flat_dt_phandle(d->node) == phandle) {
  772. cpufeatures_deps_enable(d);
  773. if (d->disabled) {
  774. f->disabled = 1;
  775. return;
  776. }
  777. }
  778. }
  779. }
  780. if (cpufeatures_process_feature(f))
  781. f->enabled = 1;
  782. else
  783. f->disabled = 1;
  784. }
  785. static int __init scan_cpufeatures_subnodes(unsigned long node,
  786. const char *uname,
  787. void *data)
  788. {
  789. int *count = data;
  790. process_cpufeatures_node(node, uname, *count);
  791. (*count)++;
  792. return 0;
  793. }
  794. static int __init count_cpufeatures_subnodes(unsigned long node,
  795. const char *uname,
  796. void *data)
  797. {
  798. int *count = data;
  799. (*count)++;
  800. return 0;
  801. }
  802. static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
  803. *uname, int depth, void *data)
  804. {
  805. const __be32 *prop;
  806. int count, i;
  807. u32 isa;
  808. /* We are scanning "ibm,powerpc-cpu-features" nodes only */
  809. if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
  810. return 0;
  811. prop = of_get_flat_dt_prop(node, "isa", NULL);
  812. if (!prop)
  813. /* We checked before, "can't happen" */
  814. return 0;
  815. isa = be32_to_cpup(prop);
  816. /* Count and allocate space for cpu features */
  817. of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
  818. &nr_dt_cpu_features);
  819. dt_cpu_features = __va(
  820. memblock_alloc(sizeof(struct dt_cpu_feature)*
  821. nr_dt_cpu_features, PAGE_SIZE));
  822. cpufeatures_setup_start(isa);
  823. /* Scan nodes into dt_cpu_features and enable those without deps */
  824. count = 0;
  825. of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
  826. /* Recursive enable remaining features with dependencies */
  827. for (i = 0; i < nr_dt_cpu_features; i++) {
  828. struct dt_cpu_feature *f = &dt_cpu_features[i];
  829. cpufeatures_deps_enable(f);
  830. }
  831. prop = of_get_flat_dt_prop(node, "display-name", NULL);
  832. if (prop && strlen((char *)prop) != 0) {
  833. strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
  834. cur_cpu_spec->cpu_name = dt_cpu_name;
  835. }
  836. cpufeatures_setup_finished();
  837. memblock_free(__pa(dt_cpu_features),
  838. sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
  839. return 0;
  840. }
  841. void __init dt_cpu_ftrs_scan(void)
  842. {
  843. of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
  844. }