amd.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087
  1. #include <linux/export.h>
  2. #include <linux/bitops.h>
  3. #include <linux/elf.h>
  4. #include <linux/mm.h>
  5. #include <linux/io.h>
  6. #include <linux/sched.h>
  7. #include <linux/sched/clock.h>
  8. #include <linux/random.h>
  9. #include <asm/processor.h>
  10. #include <asm/apic.h>
  11. #include <asm/cacheinfo.h>
  12. #include <asm/cpu.h>
  13. #include <asm/spec-ctrl.h>
  14. #include <asm/smp.h>
  15. #include <asm/pci-direct.h>
  16. #include <asm/delay.h>
  17. #ifdef CONFIG_X86_64
  18. # include <asm/mmconfig.h>
  19. # include <asm/set_memory.h>
  20. #endif
  21. #include "cpu.h"
  22. static const int amd_erratum_383[];
  23. static const int amd_erratum_400[];
  24. static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
  25. /*
  26. * nodes_per_socket: Stores the number of nodes per socket.
  27. * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
  28. * Node Identifiers[10:8]
  29. */
  30. static u32 nodes_per_socket = 1;
  31. static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  32. {
  33. u32 gprs[8] = { 0 };
  34. int err;
  35. WARN_ONCE((boot_cpu_data.x86 != 0xf),
  36. "%s should only be used on K8!\n", __func__);
  37. gprs[1] = msr;
  38. gprs[7] = 0x9c5a203a;
  39. err = rdmsr_safe_regs(gprs);
  40. *p = gprs[0] | ((u64)gprs[2] << 32);
  41. return err;
  42. }
  43. static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  44. {
  45. u32 gprs[8] = { 0 };
  46. WARN_ONCE((boot_cpu_data.x86 != 0xf),
  47. "%s should only be used on K8!\n", __func__);
  48. gprs[0] = (u32)val;
  49. gprs[1] = msr;
  50. gprs[2] = val >> 32;
  51. gprs[7] = 0x9c5a203a;
  52. return wrmsr_safe_regs(gprs);
  53. }
  54. /*
  55. * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  56. * misexecution of code under Linux. Owners of such processors should
  57. * contact AMD for precise details and a CPU swap.
  58. *
  59. * See http://www.multimania.com/poulot/k6bug.html
  60. * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  61. * (Publication # 21266 Issue Date: August 1998)
  62. *
  63. * The following test is erm.. interesting. AMD neglected to up
  64. * the chip setting when fixing the bug but they also tweaked some
  65. * performance at the same time..
  66. */
  67. extern __visible void vide(void);
  68. __asm__(".globl vide\n"
  69. ".type vide, @function\n"
  70. ".align 4\n"
  71. "vide: ret\n");
  72. static void init_amd_k5(struct cpuinfo_x86 *c)
  73. {
  74. #ifdef CONFIG_X86_32
  75. /*
  76. * General Systems BIOSen alias the cpu frequency registers
  77. * of the Elan at 0x000df000. Unfortunately, one of the Linux
  78. * drivers subsequently pokes it, and changes the CPU speed.
  79. * Workaround : Remove the unneeded alias.
  80. */
  81. #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
  82. #define CBAR_ENB (0x80000000)
  83. #define CBAR_KEY (0X000000CB)
  84. if (c->x86_model == 9 || c->x86_model == 10) {
  85. if (inl(CBAR) & CBAR_ENB)
  86. outl(0 | CBAR_KEY, CBAR);
  87. }
  88. #endif
  89. }
  90. static void init_amd_k6(struct cpuinfo_x86 *c)
  91. {
  92. #ifdef CONFIG_X86_32
  93. u32 l, h;
  94. int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
  95. if (c->x86_model < 6) {
  96. /* Based on AMD doc 20734R - June 2000 */
  97. if (c->x86_model == 0) {
  98. clear_cpu_cap(c, X86_FEATURE_APIC);
  99. set_cpu_cap(c, X86_FEATURE_PGE);
  100. }
  101. return;
  102. }
  103. if (c->x86_model == 6 && c->x86_stepping == 1) {
  104. const int K6_BUG_LOOP = 1000000;
  105. int n;
  106. void (*f_vide)(void);
  107. u64 d, d2;
  108. pr_info("AMD K6 stepping B detected - ");
  109. /*
  110. * It looks like AMD fixed the 2.6.2 bug and improved indirect
  111. * calls at the same time.
  112. */
  113. n = K6_BUG_LOOP;
  114. f_vide = vide;
  115. OPTIMIZER_HIDE_VAR(f_vide);
  116. d = rdtsc();
  117. while (n--)
  118. f_vide();
  119. d2 = rdtsc();
  120. d = d2-d;
  121. if (d > 20*K6_BUG_LOOP)
  122. pr_cont("system stability may be impaired when more than 32 MB are used.\n");
  123. else
  124. pr_cont("probably OK (after B9730xxxx).\n");
  125. }
  126. /* K6 with old style WHCR */
  127. if (c->x86_model < 8 ||
  128. (c->x86_model == 8 && c->x86_stepping < 8)) {
  129. /* We can only write allocate on the low 508Mb */
  130. if (mbytes > 508)
  131. mbytes = 508;
  132. rdmsr(MSR_K6_WHCR, l, h);
  133. if ((l&0x0000FFFF) == 0) {
  134. unsigned long flags;
  135. l = (1<<0)|((mbytes/4)<<1);
  136. local_irq_save(flags);
  137. wbinvd();
  138. wrmsr(MSR_K6_WHCR, l, h);
  139. local_irq_restore(flags);
  140. pr_info("Enabling old style K6 write allocation for %d Mb\n",
  141. mbytes);
  142. }
  143. return;
  144. }
  145. if ((c->x86_model == 8 && c->x86_stepping > 7) ||
  146. c->x86_model == 9 || c->x86_model == 13) {
  147. /* The more serious chips .. */
  148. if (mbytes > 4092)
  149. mbytes = 4092;
  150. rdmsr(MSR_K6_WHCR, l, h);
  151. if ((l&0xFFFF0000) == 0) {
  152. unsigned long flags;
  153. l = ((mbytes>>2)<<22)|(1<<16);
  154. local_irq_save(flags);
  155. wbinvd();
  156. wrmsr(MSR_K6_WHCR, l, h);
  157. local_irq_restore(flags);
  158. pr_info("Enabling new style K6 write allocation for %d Mb\n",
  159. mbytes);
  160. }
  161. return;
  162. }
  163. if (c->x86_model == 10) {
  164. /* AMD Geode LX is model 10 */
  165. /* placeholder for any needed mods */
  166. return;
  167. }
  168. #endif
  169. }
  170. static void init_amd_k7(struct cpuinfo_x86 *c)
  171. {
  172. #ifdef CONFIG_X86_32
  173. u32 l, h;
  174. /*
  175. * Bit 15 of Athlon specific MSR 15, needs to be 0
  176. * to enable SSE on Palomino/Morgan/Barton CPU's.
  177. * If the BIOS didn't enable it already, enable it here.
  178. */
  179. if (c->x86_model >= 6 && c->x86_model <= 10) {
  180. if (!cpu_has(c, X86_FEATURE_XMM)) {
  181. pr_info("Enabling disabled K7/SSE Support.\n");
  182. msr_clear_bit(MSR_K7_HWCR, 15);
  183. set_cpu_cap(c, X86_FEATURE_XMM);
  184. }
  185. }
  186. /*
  187. * It's been determined by AMD that Athlons since model 8 stepping 1
  188. * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
  189. * As per AMD technical note 27212 0.2
  190. */
  191. if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
  192. rdmsr(MSR_K7_CLK_CTL, l, h);
  193. if ((l & 0xfff00000) != 0x20000000) {
  194. pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
  195. l, ((l & 0x000fffff)|0x20000000));
  196. wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
  197. }
  198. }
  199. set_cpu_cap(c, X86_FEATURE_K7);
  200. /* calling is from identify_secondary_cpu() ? */
  201. if (!c->cpu_index)
  202. return;
  203. /*
  204. * Certain Athlons might work (for various values of 'work') in SMP
  205. * but they are not certified as MP capable.
  206. */
  207. /* Athlon 660/661 is valid. */
  208. if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
  209. (c->x86_stepping == 1)))
  210. return;
  211. /* Duron 670 is valid */
  212. if ((c->x86_model == 7) && (c->x86_stepping == 0))
  213. return;
  214. /*
  215. * Athlon 662, Duron 671, and Athlon >model 7 have capability
  216. * bit. It's worth noting that the A5 stepping (662) of some
  217. * Athlon XP's have the MP bit set.
  218. * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
  219. * more.
  220. */
  221. if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
  222. ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
  223. (c->x86_model > 7))
  224. if (cpu_has(c, X86_FEATURE_MP))
  225. return;
  226. /* If we get here, not a certified SMP capable AMD system. */
  227. /*
  228. * Don't taint if we are running SMP kernel on a single non-MP
  229. * approved Athlon
  230. */
  231. WARN_ONCE(1, "WARNING: This combination of AMD"
  232. " processors is not suitable for SMP.\n");
  233. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
  234. #endif
  235. }
  236. #ifdef CONFIG_NUMA
  237. /*
  238. * To workaround broken NUMA config. Read the comment in
  239. * srat_detect_node().
  240. */
  241. static int nearby_node(int apicid)
  242. {
  243. int i, node;
  244. for (i = apicid - 1; i >= 0; i--) {
  245. node = __apicid_to_node[i];
  246. if (node != NUMA_NO_NODE && node_online(node))
  247. return node;
  248. }
  249. for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
  250. node = __apicid_to_node[i];
  251. if (node != NUMA_NO_NODE && node_online(node))
  252. return node;
  253. }
  254. return first_node(node_online_map); /* Shouldn't happen */
  255. }
  256. #endif
  257. /*
  258. * Fix up cpu_core_id for pre-F17h systems to be in the
  259. * [0 .. cores_per_node - 1] range. Not really needed but
  260. * kept so as not to break existing setups.
  261. */
  262. static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
  263. {
  264. u32 cus_per_node;
  265. if (c->x86 >= 0x17)
  266. return;
  267. cus_per_node = c->x86_max_cores / nodes_per_socket;
  268. c->cpu_core_id %= cus_per_node;
  269. }
  270. /*
  271. * Fixup core topology information for
  272. * (1) AMD multi-node processors
  273. * Assumption: Number of cores in each internal node is the same.
  274. * (2) AMD processors supporting compute units
  275. */
  276. static void amd_get_topology(struct cpuinfo_x86 *c)
  277. {
  278. u8 node_id;
  279. int cpu = smp_processor_id();
  280. /* get information required for multi-node processors */
  281. if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
  282. int err;
  283. u32 eax, ebx, ecx, edx;
  284. cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
  285. node_id = ecx & 0xff;
  286. smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
  287. if (c->x86 == 0x15)
  288. c->cu_id = ebx & 0xff;
  289. if (c->x86 >= 0x17) {
  290. c->cpu_core_id = ebx & 0xff;
  291. if (smp_num_siblings > 1)
  292. c->x86_max_cores /= smp_num_siblings;
  293. }
  294. /*
  295. * In case leaf B is available, use it to derive
  296. * topology information.
  297. */
  298. err = detect_extended_topology(c);
  299. if (!err)
  300. c->x86_coreid_bits = get_count_order(c->x86_max_cores);
  301. cacheinfo_amd_init_llc_id(c, cpu, node_id);
  302. } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
  303. u64 value;
  304. rdmsrl(MSR_FAM10H_NODE_ID, value);
  305. node_id = value & 7;
  306. per_cpu(cpu_llc_id, cpu) = node_id;
  307. } else
  308. return;
  309. if (nodes_per_socket > 1) {
  310. set_cpu_cap(c, X86_FEATURE_AMD_DCM);
  311. legacy_fixup_core_id(c);
  312. }
  313. }
  314. /*
  315. * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
  316. * Assumes number of cores is a power of two.
  317. */
  318. static void amd_detect_cmp(struct cpuinfo_x86 *c)
  319. {
  320. unsigned bits;
  321. int cpu = smp_processor_id();
  322. bits = c->x86_coreid_bits;
  323. /* Low order bits define the core id (index of core in socket) */
  324. c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
  325. /* Convert the initial APIC ID into the socket ID */
  326. c->phys_proc_id = c->initial_apicid >> bits;
  327. /* use socket ID also for last level cache */
  328. per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
  329. }
  330. u16 amd_get_nb_id(int cpu)
  331. {
  332. return per_cpu(cpu_llc_id, cpu);
  333. }
  334. EXPORT_SYMBOL_GPL(amd_get_nb_id);
  335. u32 amd_get_nodes_per_socket(void)
  336. {
  337. return nodes_per_socket;
  338. }
  339. EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
  340. static void srat_detect_node(struct cpuinfo_x86 *c)
  341. {
  342. #ifdef CONFIG_NUMA
  343. int cpu = smp_processor_id();
  344. int node;
  345. unsigned apicid = c->apicid;
  346. node = numa_cpu_node(cpu);
  347. if (node == NUMA_NO_NODE)
  348. node = per_cpu(cpu_llc_id, cpu);
  349. /*
  350. * On multi-fabric platform (e.g. Numascale NumaChip) a
  351. * platform-specific handler needs to be called to fixup some
  352. * IDs of the CPU.
  353. */
  354. if (x86_cpuinit.fixup_cpu_id)
  355. x86_cpuinit.fixup_cpu_id(c, node);
  356. if (!node_online(node)) {
  357. /*
  358. * Two possibilities here:
  359. *
  360. * - The CPU is missing memory and no node was created. In
  361. * that case try picking one from a nearby CPU.
  362. *
  363. * - The APIC IDs differ from the HyperTransport node IDs
  364. * which the K8 northbridge parsing fills in. Assume
  365. * they are all increased by a constant offset, but in
  366. * the same order as the HT nodeids. If that doesn't
  367. * result in a usable node fall back to the path for the
  368. * previous case.
  369. *
  370. * This workaround operates directly on the mapping between
  371. * APIC ID and NUMA node, assuming certain relationship
  372. * between APIC ID, HT node ID and NUMA topology. As going
  373. * through CPU mapping may alter the outcome, directly
  374. * access __apicid_to_node[].
  375. */
  376. int ht_nodeid = c->initial_apicid;
  377. if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
  378. node = __apicid_to_node[ht_nodeid];
  379. /* Pick a nearby node */
  380. if (!node_online(node))
  381. node = nearby_node(apicid);
  382. }
  383. numa_set_node(cpu, node);
  384. #endif
  385. }
  386. static void early_init_amd_mc(struct cpuinfo_x86 *c)
  387. {
  388. #ifdef CONFIG_SMP
  389. unsigned bits, ecx;
  390. /* Multi core CPU? */
  391. if (c->extended_cpuid_level < 0x80000008)
  392. return;
  393. ecx = cpuid_ecx(0x80000008);
  394. c->x86_max_cores = (ecx & 0xff) + 1;
  395. /* CPU telling us the core id bits shift? */
  396. bits = (ecx >> 12) & 0xF;
  397. /* Otherwise recompute */
  398. if (bits == 0) {
  399. while ((1 << bits) < c->x86_max_cores)
  400. bits++;
  401. }
  402. c->x86_coreid_bits = bits;
  403. #endif
  404. }
  405. static void bsp_init_amd(struct cpuinfo_x86 *c)
  406. {
  407. #ifdef CONFIG_X86_64
  408. if (c->x86 >= 0xf) {
  409. unsigned long long tseg;
  410. /*
  411. * Split up direct mapping around the TSEG SMM area.
  412. * Don't do it for gbpages because there seems very little
  413. * benefit in doing so.
  414. */
  415. if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
  416. unsigned long pfn = tseg >> PAGE_SHIFT;
  417. pr_debug("tseg: %010llx\n", tseg);
  418. if (pfn_range_is_mapped(pfn, pfn + 1))
  419. set_memory_4k((unsigned long)__va(tseg), 1);
  420. }
  421. }
  422. #endif
  423. if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
  424. if (c->x86 > 0x10 ||
  425. (c->x86 == 0x10 && c->x86_model >= 0x2)) {
  426. u64 val;
  427. rdmsrl(MSR_K7_HWCR, val);
  428. if (!(val & BIT(24)))
  429. pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
  430. }
  431. }
  432. if (c->x86 == 0x15) {
  433. unsigned long upperbit;
  434. u32 cpuid, assoc;
  435. cpuid = cpuid_edx(0x80000005);
  436. assoc = cpuid >> 16 & 0xff;
  437. upperbit = ((cpuid >> 24) << 10) / assoc;
  438. va_align.mask = (upperbit - 1) & PAGE_MASK;
  439. va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
  440. /* A random value per boot for bit slice [12:upper_bit) */
  441. va_align.bits = get_random_int() & va_align.mask;
  442. }
  443. if (cpu_has(c, X86_FEATURE_MWAITX))
  444. use_mwaitx_delay();
  445. if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
  446. u32 ecx;
  447. ecx = cpuid_ecx(0x8000001e);
  448. nodes_per_socket = ((ecx >> 8) & 7) + 1;
  449. } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
  450. u64 value;
  451. rdmsrl(MSR_FAM10H_NODE_ID, value);
  452. nodes_per_socket = ((value >> 3) & 7) + 1;
  453. }
  454. if (c->x86 >= 0x15 && c->x86 <= 0x17) {
  455. unsigned int bit;
  456. switch (c->x86) {
  457. case 0x15: bit = 54; break;
  458. case 0x16: bit = 33; break;
  459. case 0x17: bit = 10; break;
  460. default: return;
  461. }
  462. /*
  463. * Try to cache the base value so further operations can
  464. * avoid RMW. If that faults, do not enable SSBD.
  465. */
  466. if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
  467. setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
  468. setup_force_cpu_cap(X86_FEATURE_SSBD);
  469. x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
  470. }
  471. }
  472. }
  473. static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
  474. {
  475. u64 msr;
  476. /*
  477. * BIOS support is required for SME and SEV.
  478. * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
  479. * the SME physical address space reduction value.
  480. * If BIOS has not enabled SME then don't advertise the
  481. * SME feature (set in scattered.c).
  482. * For SEV: If BIOS has not enabled SEV then don't advertise the
  483. * SEV feature (set in scattered.c).
  484. *
  485. * In all cases, since support for SME and SEV requires long mode,
  486. * don't advertise the feature under CONFIG_X86_32.
  487. */
  488. if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
  489. /* Check if memory encryption is enabled */
  490. rdmsrl(MSR_K8_SYSCFG, msr);
  491. if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
  492. goto clear_all;
  493. /*
  494. * Always adjust physical address bits. Even though this
  495. * will be a value above 32-bits this is still done for
  496. * CONFIG_X86_32 so that accurate values are reported.
  497. */
  498. c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
  499. if (IS_ENABLED(CONFIG_X86_32))
  500. goto clear_all;
  501. rdmsrl(MSR_K7_HWCR, msr);
  502. if (!(msr & MSR_K7_HWCR_SMMLOCK))
  503. goto clear_sev;
  504. return;
  505. clear_all:
  506. clear_cpu_cap(c, X86_FEATURE_SME);
  507. clear_sev:
  508. clear_cpu_cap(c, X86_FEATURE_SEV);
  509. }
  510. }
  511. static void early_init_amd(struct cpuinfo_x86 *c)
  512. {
  513. u32 dummy;
  514. early_init_amd_mc(c);
  515. rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
  516. /*
  517. * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
  518. * with P/T states and does not stop in deep C-states
  519. */
  520. if (c->x86_power & (1 << 8)) {
  521. set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  522. set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
  523. }
  524. /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
  525. if (c->x86_power & BIT(12))
  526. set_cpu_cap(c, X86_FEATURE_ACC_POWER);
  527. #ifdef CONFIG_X86_64
  528. set_cpu_cap(c, X86_FEATURE_SYSCALL32);
  529. #else
  530. /* Set MTRR capability flag if appropriate */
  531. if (c->x86 == 5)
  532. if (c->x86_model == 13 || c->x86_model == 9 ||
  533. (c->x86_model == 8 && c->x86_stepping >= 8))
  534. set_cpu_cap(c, X86_FEATURE_K6_MTRR);
  535. #endif
  536. #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
  537. /*
  538. * ApicID can always be treated as an 8-bit value for AMD APIC versions
  539. * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
  540. * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
  541. * after 16h.
  542. */
  543. if (boot_cpu_has(X86_FEATURE_APIC)) {
  544. if (c->x86 > 0x16)
  545. set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
  546. else if (c->x86 >= 0xf) {
  547. /* check CPU config space for extended APIC ID */
  548. unsigned int val;
  549. val = read_pci_config(0, 24, 0, 0x68);
  550. if ((val >> 17 & 0x3) == 0x3)
  551. set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
  552. }
  553. }
  554. #endif
  555. /*
  556. * This is only needed to tell the kernel whether to use VMCALL
  557. * and VMMCALL. VMMCALL is never executed except under virt, so
  558. * we can set it unconditionally.
  559. */
  560. set_cpu_cap(c, X86_FEATURE_VMMCALL);
  561. /* F16h erratum 793, CVE-2013-6885 */
  562. if (c->x86 == 0x16 && c->x86_model <= 0xf)
  563. msr_set_bit(MSR_AMD64_LS_CFG, 15);
  564. /*
  565. * Check whether the machine is affected by erratum 400. This is
  566. * used to select the proper idle routine and to enable the check
  567. * whether the machine is affected in arch_post_acpi_init(), which
  568. * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
  569. */
  570. if (cpu_has_amd_erratum(c, amd_erratum_400))
  571. set_cpu_bug(c, X86_BUG_AMD_E400);
  572. early_detect_mem_encrypt(c);
  573. }
  574. static void init_amd_k8(struct cpuinfo_x86 *c)
  575. {
  576. u32 level;
  577. u64 value;
  578. /* On C+ stepping K8 rep microcode works well for copy/memset */
  579. level = cpuid_eax(1);
  580. if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
  581. set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  582. /*
  583. * Some BIOSes incorrectly force this feature, but only K8 revision D
  584. * (model = 0x14) and later actually support it.
  585. * (AMD Erratum #110, docId: 25759).
  586. */
  587. if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
  588. clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
  589. if (!rdmsrl_amd_safe(0xc001100d, &value)) {
  590. value &= ~BIT_64(32);
  591. wrmsrl_amd_safe(0xc001100d, value);
  592. }
  593. }
  594. if (!c->x86_model_id[0])
  595. strcpy(c->x86_model_id, "Hammer");
  596. #ifdef CONFIG_SMP
  597. /*
  598. * Disable TLB flush filter by setting HWCR.FFDIS on K8
  599. * bit 6 of msr C001_0015
  600. *
  601. * Errata 63 for SH-B3 steppings
  602. * Errata 122 for all steppings (F+ have it disabled by default)
  603. */
  604. msr_set_bit(MSR_K7_HWCR, 6);
  605. #endif
  606. set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
  607. }
  608. static void init_amd_gh(struct cpuinfo_x86 *c)
  609. {
  610. #ifdef CONFIG_MMCONF_FAM10H
  611. /* do this for boot cpu */
  612. if (c == &boot_cpu_data)
  613. check_enable_amd_mmconf_dmi();
  614. fam10h_check_enable_mmcfg();
  615. #endif
  616. /*
  617. * Disable GART TLB Walk Errors on Fam10h. We do this here because this
  618. * is always needed when GART is enabled, even in a kernel which has no
  619. * MCE support built in. BIOS should disable GartTlbWlk Errors already.
  620. * If it doesn't, we do it here as suggested by the BKDG.
  621. *
  622. * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
  623. */
  624. msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
  625. /*
  626. * On family 10h BIOS may not have properly enabled WC+ support, causing
  627. * it to be converted to CD memtype. This may result in performance
  628. * degradation for certain nested-paging guests. Prevent this conversion
  629. * by clearing bit 24 in MSR_AMD64_BU_CFG2.
  630. *
  631. * NOTE: we want to use the _safe accessors so as not to #GP kvm
  632. * guests on older kvm hosts.
  633. */
  634. msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
  635. if (cpu_has_amd_erratum(c, amd_erratum_383))
  636. set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
  637. }
  638. #define MSR_AMD64_DE_CFG 0xC0011029
  639. static void init_amd_ln(struct cpuinfo_x86 *c)
  640. {
  641. /*
  642. * Apply erratum 665 fix unconditionally so machines without a BIOS
  643. * fix work.
  644. */
  645. msr_set_bit(MSR_AMD64_DE_CFG, 31);
  646. }
  647. static void init_amd_bd(struct cpuinfo_x86 *c)
  648. {
  649. u64 value;
  650. /* re-enable TopologyExtensions if switched off by BIOS */
  651. if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
  652. !cpu_has(c, X86_FEATURE_TOPOEXT)) {
  653. if (msr_set_bit(0xc0011005, 54) > 0) {
  654. rdmsrl(0xc0011005, value);
  655. if (value & BIT_64(54)) {
  656. set_cpu_cap(c, X86_FEATURE_TOPOEXT);
  657. pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
  658. }
  659. }
  660. }
  661. /*
  662. * The way access filter has a performance penalty on some workloads.
  663. * Disable it on the affected CPUs.
  664. */
  665. if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
  666. if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
  667. value |= 0x1E;
  668. wrmsrl_safe(MSR_F15H_IC_CFG, value);
  669. }
  670. }
  671. }
  672. static void init_amd_zn(struct cpuinfo_x86 *c)
  673. {
  674. set_cpu_cap(c, X86_FEATURE_ZEN);
  675. /*
  676. * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
  677. * all up to and including B1.
  678. */
  679. if (c->x86_model <= 1 && c->x86_stepping <= 1)
  680. set_cpu_cap(c, X86_FEATURE_CPB);
  681. }
  682. static void init_amd(struct cpuinfo_x86 *c)
  683. {
  684. early_init_amd(c);
  685. /*
  686. * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
  687. * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
  688. */
  689. clear_cpu_cap(c, 0*32+31);
  690. if (c->x86 >= 0x10)
  691. set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  692. /* get apicid instead of initial apic id from cpuid */
  693. c->apicid = hard_smp_processor_id();
  694. /* K6s reports MCEs but don't actually have all the MSRs */
  695. if (c->x86 < 6)
  696. clear_cpu_cap(c, X86_FEATURE_MCE);
  697. switch (c->x86) {
  698. case 4: init_amd_k5(c); break;
  699. case 5: init_amd_k6(c); break;
  700. case 6: init_amd_k7(c); break;
  701. case 0xf: init_amd_k8(c); break;
  702. case 0x10: init_amd_gh(c); break;
  703. case 0x12: init_amd_ln(c); break;
  704. case 0x15: init_amd_bd(c); break;
  705. case 0x17: init_amd_zn(c); break;
  706. }
  707. /*
  708. * Enable workaround for FXSAVE leak on CPUs
  709. * without a XSaveErPtr feature
  710. */
  711. if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
  712. set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
  713. cpu_detect_cache_sizes(c);
  714. /* Multi core CPU? */
  715. if (c->extended_cpuid_level >= 0x80000008) {
  716. amd_detect_cmp(c);
  717. amd_get_topology(c);
  718. srat_detect_node(c);
  719. }
  720. #ifdef CONFIG_X86_32
  721. detect_ht(c);
  722. #endif
  723. init_amd_cacheinfo(c);
  724. if (c->x86 >= 0xf)
  725. set_cpu_cap(c, X86_FEATURE_K8);
  726. if (cpu_has(c, X86_FEATURE_XMM2)) {
  727. unsigned long long val;
  728. int ret;
  729. /*
  730. * A serializing LFENCE has less overhead than MFENCE, so
  731. * use it for execution serialization. On families which
  732. * don't have that MSR, LFENCE is already serializing.
  733. * msr_set_bit() uses the safe accessors, too, even if the MSR
  734. * is not present.
  735. */
  736. msr_set_bit(MSR_F10H_DECFG,
  737. MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
  738. /*
  739. * Verify that the MSR write was successful (could be running
  740. * under a hypervisor) and only then assume that LFENCE is
  741. * serializing.
  742. */
  743. ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
  744. if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
  745. /* A serializing LFENCE stops RDTSC speculation */
  746. set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
  747. } else {
  748. /* MFENCE stops RDTSC speculation */
  749. set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
  750. }
  751. }
  752. /*
  753. * Family 0x12 and above processors have APIC timer
  754. * running in deep C states.
  755. */
  756. if (c->x86 > 0x11)
  757. set_cpu_cap(c, X86_FEATURE_ARAT);
  758. /* 3DNow or LM implies PREFETCHW */
  759. if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
  760. if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
  761. set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
  762. /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
  763. if (!cpu_has(c, X86_FEATURE_XENPV))
  764. set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
  765. }
  766. #ifdef CONFIG_X86_32
  767. static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
  768. {
  769. /* AMD errata T13 (order #21922) */
  770. if ((c->x86 == 6)) {
  771. /* Duron Rev A0 */
  772. if (c->x86_model == 3 && c->x86_stepping == 0)
  773. size = 64;
  774. /* Tbird rev A1/A2 */
  775. if (c->x86_model == 4 &&
  776. (c->x86_stepping == 0 || c->x86_stepping == 1))
  777. size = 256;
  778. }
  779. return size;
  780. }
  781. #endif
  782. static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
  783. {
  784. u32 ebx, eax, ecx, edx;
  785. u16 mask = 0xfff;
  786. if (c->x86 < 0xf)
  787. return;
  788. if (c->extended_cpuid_level < 0x80000006)
  789. return;
  790. cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
  791. tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
  792. tlb_lli_4k[ENTRIES] = ebx & mask;
  793. /*
  794. * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
  795. * characteristics from the CPUID function 0x80000005 instead.
  796. */
  797. if (c->x86 == 0xf) {
  798. cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
  799. mask = 0xff;
  800. }
  801. /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
  802. if (!((eax >> 16) & mask))
  803. tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
  804. else
  805. tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
  806. /* a 4M entry uses two 2M entries */
  807. tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
  808. /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
  809. if (!(eax & mask)) {
  810. /* Erratum 658 */
  811. if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
  812. tlb_lli_2m[ENTRIES] = 1024;
  813. } else {
  814. cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
  815. tlb_lli_2m[ENTRIES] = eax & 0xff;
  816. }
  817. } else
  818. tlb_lli_2m[ENTRIES] = eax & mask;
  819. tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
  820. }
  821. static const struct cpu_dev amd_cpu_dev = {
  822. .c_vendor = "AMD",
  823. .c_ident = { "AuthenticAMD" },
  824. #ifdef CONFIG_X86_32
  825. .legacy_models = {
  826. { .family = 4, .model_names =
  827. {
  828. [3] = "486 DX/2",
  829. [7] = "486 DX/2-WB",
  830. [8] = "486 DX/4",
  831. [9] = "486 DX/4-WB",
  832. [14] = "Am5x86-WT",
  833. [15] = "Am5x86-WB"
  834. }
  835. },
  836. },
  837. .legacy_cache_size = amd_size_cache,
  838. #endif
  839. .c_early_init = early_init_amd,
  840. .c_detect_tlb = cpu_detect_tlb_amd,
  841. .c_bsp_init = bsp_init_amd,
  842. .c_init = init_amd,
  843. .c_x86_vendor = X86_VENDOR_AMD,
  844. };
  845. cpu_dev_register(amd_cpu_dev);
  846. /*
  847. * AMD errata checking
  848. *
  849. * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
  850. * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
  851. * have an OSVW id assigned, which it takes as first argument. Both take a
  852. * variable number of family-specific model-stepping ranges created by
  853. * AMD_MODEL_RANGE().
  854. *
  855. * Example:
  856. *
  857. * const int amd_erratum_319[] =
  858. * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
  859. * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
  860. * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
  861. */
  862. #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
  863. #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
  864. #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
  865. ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
  866. #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
  867. #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
  868. #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
  869. static const int amd_erratum_400[] =
  870. AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
  871. AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
  872. static const int amd_erratum_383[] =
  873. AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
  874. static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
  875. {
  876. int osvw_id = *erratum++;
  877. u32 range;
  878. u32 ms;
  879. if (osvw_id >= 0 && osvw_id < 65536 &&
  880. cpu_has(cpu, X86_FEATURE_OSVW)) {
  881. u64 osvw_len;
  882. rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
  883. if (osvw_id < osvw_len) {
  884. u64 osvw_bits;
  885. rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
  886. osvw_bits);
  887. return osvw_bits & (1ULL << (osvw_id & 0x3f));
  888. }
  889. }
  890. /* OSVW unavailable or ID unknown, match family-model-stepping range */
  891. ms = (cpu->x86_model << 4) | cpu->x86_stepping;
  892. while ((range = *erratum++))
  893. if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
  894. (ms >= AMD_MODEL_RANGE_START(range)) &&
  895. (ms <= AMD_MODEL_RANGE_END(range)))
  896. return true;
  897. return false;
  898. }
  899. void set_dr_addr_mask(unsigned long mask, int dr)
  900. {
  901. if (!boot_cpu_has(X86_FEATURE_BPEXT))
  902. return;
  903. switch (dr) {
  904. case 0:
  905. wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
  906. break;
  907. case 1:
  908. case 2:
  909. case 3:
  910. wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
  911. break;
  912. default:
  913. break;
  914. }
  915. }