amd.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902
  1. #include <linux/export.h>
  2. #include <linux/bitops.h>
  3. #include <linux/elf.h>
  4. #include <linux/mm.h>
  5. #include <linux/io.h>
  6. #include <linux/sched.h>
  7. #include <linux/random.h>
  8. #include <asm/processor.h>
  9. #include <asm/apic.h>
  10. #include <asm/cpu.h>
  11. #include <asm/smp.h>
  12. #include <asm/pci-direct.h>
  13. #ifdef CONFIG_X86_64
  14. # include <asm/mmconfig.h>
  15. # include <asm/cacheflush.h>
  16. #endif
  17. #include "cpu.h"
  18. static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  19. {
  20. u32 gprs[8] = { 0 };
  21. int err;
  22. WARN_ONCE((boot_cpu_data.x86 != 0xf),
  23. "%s should only be used on K8!\n", __func__);
  24. gprs[1] = msr;
  25. gprs[7] = 0x9c5a203a;
  26. err = rdmsr_safe_regs(gprs);
  27. *p = gprs[0] | ((u64)gprs[2] << 32);
  28. return err;
  29. }
  30. static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  31. {
  32. u32 gprs[8] = { 0 };
  33. WARN_ONCE((boot_cpu_data.x86 != 0xf),
  34. "%s should only be used on K8!\n", __func__);
  35. gprs[0] = (u32)val;
  36. gprs[1] = msr;
  37. gprs[2] = val >> 32;
  38. gprs[7] = 0x9c5a203a;
  39. return wrmsr_safe_regs(gprs);
  40. }
  41. /*
  42. * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  43. * misexecution of code under Linux. Owners of such processors should
  44. * contact AMD for precise details and a CPU swap.
  45. *
  46. * See http://www.multimania.com/poulot/k6bug.html
  47. * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  48. * (Publication # 21266 Issue Date: August 1998)
  49. *
  50. * The following test is erm.. interesting. AMD neglected to up
  51. * the chip setting when fixing the bug but they also tweaked some
  52. * performance at the same time..
  53. */
  54. extern __visible void vide(void);
  55. __asm__(".globl vide\n\t.align 4\nvide: ret");
  56. static void init_amd_k5(struct cpuinfo_x86 *c)
  57. {
  58. #ifdef CONFIG_X86_32
  59. /*
  60. * General Systems BIOSen alias the cpu frequency registers
  61. * of the Elan at 0x000df000. Unfortuantly, one of the Linux
  62. * drivers subsequently pokes it, and changes the CPU speed.
  63. * Workaround : Remove the unneeded alias.
  64. */
  65. #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
  66. #define CBAR_ENB (0x80000000)
  67. #define CBAR_KEY (0X000000CB)
  68. if (c->x86_model == 9 || c->x86_model == 10) {
  69. if (inl(CBAR) & CBAR_ENB)
  70. outl(0 | CBAR_KEY, CBAR);
  71. }
  72. #endif
  73. }
  74. static void init_amd_k6(struct cpuinfo_x86 *c)
  75. {
  76. #ifdef CONFIG_X86_32
  77. u32 l, h;
  78. int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
  79. if (c->x86_model < 6) {
  80. /* Based on AMD doc 20734R - June 2000 */
  81. if (c->x86_model == 0) {
  82. clear_cpu_cap(c, X86_FEATURE_APIC);
  83. set_cpu_cap(c, X86_FEATURE_PGE);
  84. }
  85. return;
  86. }
  87. if (c->x86_model == 6 && c->x86_mask == 1) {
  88. const int K6_BUG_LOOP = 1000000;
  89. int n;
  90. void (*f_vide)(void);
  91. unsigned long d, d2;
  92. printk(KERN_INFO "AMD K6 stepping B detected - ");
  93. /*
  94. * It looks like AMD fixed the 2.6.2 bug and improved indirect
  95. * calls at the same time.
  96. */
  97. n = K6_BUG_LOOP;
  98. f_vide = vide;
  99. rdtscl(d);
  100. while (n--)
  101. f_vide();
  102. rdtscl(d2);
  103. d = d2-d;
  104. if (d > 20*K6_BUG_LOOP)
  105. printk(KERN_CONT
  106. "system stability may be impaired when more than 32 MB are used.\n");
  107. else
  108. printk(KERN_CONT "probably OK (after B9730xxxx).\n");
  109. }
  110. /* K6 with old style WHCR */
  111. if (c->x86_model < 8 ||
  112. (c->x86_model == 8 && c->x86_mask < 8)) {
  113. /* We can only write allocate on the low 508Mb */
  114. if (mbytes > 508)
  115. mbytes = 508;
  116. rdmsr(MSR_K6_WHCR, l, h);
  117. if ((l&0x0000FFFF) == 0) {
  118. unsigned long flags;
  119. l = (1<<0)|((mbytes/4)<<1);
  120. local_irq_save(flags);
  121. wbinvd();
  122. wrmsr(MSR_K6_WHCR, l, h);
  123. local_irq_restore(flags);
  124. printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
  125. mbytes);
  126. }
  127. return;
  128. }
  129. if ((c->x86_model == 8 && c->x86_mask > 7) ||
  130. c->x86_model == 9 || c->x86_model == 13) {
  131. /* The more serious chips .. */
  132. if (mbytes > 4092)
  133. mbytes = 4092;
  134. rdmsr(MSR_K6_WHCR, l, h);
  135. if ((l&0xFFFF0000) == 0) {
  136. unsigned long flags;
  137. l = ((mbytes>>2)<<22)|(1<<16);
  138. local_irq_save(flags);
  139. wbinvd();
  140. wrmsr(MSR_K6_WHCR, l, h);
  141. local_irq_restore(flags);
  142. printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
  143. mbytes);
  144. }
  145. return;
  146. }
  147. if (c->x86_model == 10) {
  148. /* AMD Geode LX is model 10 */
  149. /* placeholder for any needed mods */
  150. return;
  151. }
  152. #endif
  153. }
  154. static void init_amd_k7(struct cpuinfo_x86 *c)
  155. {
  156. #ifdef CONFIG_X86_32
  157. u32 l, h;
  158. /*
  159. * Bit 15 of Athlon specific MSR 15, needs to be 0
  160. * to enable SSE on Palomino/Morgan/Barton CPU's.
  161. * If the BIOS didn't enable it already, enable it here.
  162. */
  163. if (c->x86_model >= 6 && c->x86_model <= 10) {
  164. if (!cpu_has(c, X86_FEATURE_XMM)) {
  165. printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
  166. msr_clear_bit(MSR_K7_HWCR, 15);
  167. set_cpu_cap(c, X86_FEATURE_XMM);
  168. }
  169. }
  170. /*
  171. * It's been determined by AMD that Athlons since model 8 stepping 1
  172. * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
  173. * As per AMD technical note 27212 0.2
  174. */
  175. if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
  176. rdmsr(MSR_K7_CLK_CTL, l, h);
  177. if ((l & 0xfff00000) != 0x20000000) {
  178. printk(KERN_INFO
  179. "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
  180. l, ((l & 0x000fffff)|0x20000000));
  181. wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
  182. }
  183. }
  184. set_cpu_cap(c, X86_FEATURE_K7);
  185. /* calling is from identify_secondary_cpu() ? */
  186. if (!c->cpu_index)
  187. return;
  188. /*
  189. * Certain Athlons might work (for various values of 'work') in SMP
  190. * but they are not certified as MP capable.
  191. */
  192. /* Athlon 660/661 is valid. */
  193. if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
  194. (c->x86_mask == 1)))
  195. return;
  196. /* Duron 670 is valid */
  197. if ((c->x86_model == 7) && (c->x86_mask == 0))
  198. return;
  199. /*
  200. * Athlon 662, Duron 671, and Athlon >model 7 have capability
  201. * bit. It's worth noting that the A5 stepping (662) of some
  202. * Athlon XP's have the MP bit set.
  203. * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
  204. * more.
  205. */
  206. if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
  207. ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
  208. (c->x86_model > 7))
  209. if (cpu_has(c, X86_FEATURE_MP))
  210. return;
  211. /* If we get here, not a certified SMP capable AMD system. */
  212. /*
  213. * Don't taint if we are running SMP kernel on a single non-MP
  214. * approved Athlon
  215. */
  216. WARN_ONCE(1, "WARNING: This combination of AMD"
  217. " processors is not suitable for SMP.\n");
  218. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
  219. #endif
  220. }
  221. #ifdef CONFIG_NUMA
  222. /*
  223. * To workaround broken NUMA config. Read the comment in
  224. * srat_detect_node().
  225. */
  226. static int nearby_node(int apicid)
  227. {
  228. int i, node;
  229. for (i = apicid - 1; i >= 0; i--) {
  230. node = __apicid_to_node[i];
  231. if (node != NUMA_NO_NODE && node_online(node))
  232. return node;
  233. }
  234. for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
  235. node = __apicid_to_node[i];
  236. if (node != NUMA_NO_NODE && node_online(node))
  237. return node;
  238. }
  239. return first_node(node_online_map); /* Shouldn't happen */
  240. }
  241. #endif
  242. /*
  243. * Fixup core topology information for
  244. * (1) AMD multi-node processors
  245. * Assumption: Number of cores in each internal node is the same.
  246. * (2) AMD processors supporting compute units
  247. */
  248. #ifdef CONFIG_X86_HT
  249. static void amd_get_topology(struct cpuinfo_x86 *c)
  250. {
  251. u32 nodes, cores_per_cu = 1;
  252. u8 node_id;
  253. int cpu = smp_processor_id();
  254. /* get information required for multi-node processors */
  255. if (cpu_has_topoext) {
  256. u32 eax, ebx, ecx, edx;
  257. cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
  258. nodes = ((ecx >> 8) & 7) + 1;
  259. node_id = ecx & 7;
  260. /* get compute unit information */
  261. smp_num_siblings = ((ebx >> 8) & 3) + 1;
  262. c->compute_unit_id = ebx & 0xff;
  263. cores_per_cu += ((ebx >> 8) & 3);
  264. } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
  265. u64 value;
  266. rdmsrl(MSR_FAM10H_NODE_ID, value);
  267. nodes = ((value >> 3) & 7) + 1;
  268. node_id = value & 7;
  269. } else
  270. return;
  271. /* fixup multi-node processor information */
  272. if (nodes > 1) {
  273. u32 cores_per_node;
  274. u32 cus_per_node;
  275. set_cpu_cap(c, X86_FEATURE_AMD_DCM);
  276. cores_per_node = c->x86_max_cores / nodes;
  277. cus_per_node = cores_per_node / cores_per_cu;
  278. /* store NodeID, use llc_shared_map to store sibling info */
  279. per_cpu(cpu_llc_id, cpu) = node_id;
  280. /* core id has to be in the [0 .. cores_per_node - 1] range */
  281. c->cpu_core_id %= cores_per_node;
  282. c->compute_unit_id %= cus_per_node;
  283. }
  284. }
  285. #endif
  286. /*
  287. * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
  288. * Assumes number of cores is a power of two.
  289. */
  290. static void amd_detect_cmp(struct cpuinfo_x86 *c)
  291. {
  292. #ifdef CONFIG_X86_HT
  293. unsigned bits;
  294. int cpu = smp_processor_id();
  295. bits = c->x86_coreid_bits;
  296. /* Low order bits define the core id (index of core in socket) */
  297. c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
  298. /* Convert the initial APIC ID into the socket ID */
  299. c->phys_proc_id = c->initial_apicid >> bits;
  300. /* use socket ID also for last level cache */
  301. per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
  302. amd_get_topology(c);
  303. #endif
  304. }
  305. u16 amd_get_nb_id(int cpu)
  306. {
  307. u16 id = 0;
  308. #ifdef CONFIG_SMP
  309. id = per_cpu(cpu_llc_id, cpu);
  310. #endif
  311. return id;
  312. }
  313. EXPORT_SYMBOL_GPL(amd_get_nb_id);
  314. static void srat_detect_node(struct cpuinfo_x86 *c)
  315. {
  316. #ifdef CONFIG_NUMA
  317. int cpu = smp_processor_id();
  318. int node;
  319. unsigned apicid = c->apicid;
  320. node = numa_cpu_node(cpu);
  321. if (node == NUMA_NO_NODE)
  322. node = per_cpu(cpu_llc_id, cpu);
  323. /*
  324. * On multi-fabric platform (e.g. Numascale NumaChip) a
  325. * platform-specific handler needs to be called to fixup some
  326. * IDs of the CPU.
  327. */
  328. if (x86_cpuinit.fixup_cpu_id)
  329. x86_cpuinit.fixup_cpu_id(c, node);
  330. if (!node_online(node)) {
  331. /*
  332. * Two possibilities here:
  333. *
  334. * - The CPU is missing memory and no node was created. In
  335. * that case try picking one from a nearby CPU.
  336. *
  337. * - The APIC IDs differ from the HyperTransport node IDs
  338. * which the K8 northbridge parsing fills in. Assume
  339. * they are all increased by a constant offset, but in
  340. * the same order as the HT nodeids. If that doesn't
  341. * result in a usable node fall back to the path for the
  342. * previous case.
  343. *
  344. * This workaround operates directly on the mapping between
  345. * APIC ID and NUMA node, assuming certain relationship
  346. * between APIC ID, HT node ID and NUMA topology. As going
  347. * through CPU mapping may alter the outcome, directly
  348. * access __apicid_to_node[].
  349. */
  350. int ht_nodeid = c->initial_apicid;
  351. if (ht_nodeid >= 0 &&
  352. __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
  353. node = __apicid_to_node[ht_nodeid];
  354. /* Pick a nearby node */
  355. if (!node_online(node))
  356. node = nearby_node(apicid);
  357. }
  358. numa_set_node(cpu, node);
  359. #endif
  360. }
  361. static void early_init_amd_mc(struct cpuinfo_x86 *c)
  362. {
  363. #ifdef CONFIG_X86_HT
  364. unsigned bits, ecx;
  365. /* Multi core CPU? */
  366. if (c->extended_cpuid_level < 0x80000008)
  367. return;
  368. ecx = cpuid_ecx(0x80000008);
  369. c->x86_max_cores = (ecx & 0xff) + 1;
  370. /* CPU telling us the core id bits shift? */
  371. bits = (ecx >> 12) & 0xF;
  372. /* Otherwise recompute */
  373. if (bits == 0) {
  374. while ((1 << bits) < c->x86_max_cores)
  375. bits++;
  376. }
  377. c->x86_coreid_bits = bits;
  378. #endif
  379. }
  380. static void bsp_init_amd(struct cpuinfo_x86 *c)
  381. {
  382. #ifdef CONFIG_X86_64
  383. if (c->x86 >= 0xf) {
  384. unsigned long long tseg;
  385. /*
  386. * Split up direct mapping around the TSEG SMM area.
  387. * Don't do it for gbpages because there seems very little
  388. * benefit in doing so.
  389. */
  390. if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
  391. unsigned long pfn = tseg >> PAGE_SHIFT;
  392. printk(KERN_DEBUG "tseg: %010llx\n", tseg);
  393. if (pfn_range_is_mapped(pfn, pfn + 1))
  394. set_memory_4k((unsigned long)__va(tseg), 1);
  395. }
  396. }
  397. #endif
  398. if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
  399. if (c->x86 > 0x10 ||
  400. (c->x86 == 0x10 && c->x86_model >= 0x2)) {
  401. u64 val;
  402. rdmsrl(MSR_K7_HWCR, val);
  403. if (!(val & BIT(24)))
  404. printk(KERN_WARNING FW_BUG "TSC doesn't count "
  405. "with P0 frequency!\n");
  406. }
  407. }
  408. if (c->x86 == 0x15) {
  409. unsigned long upperbit;
  410. u32 cpuid, assoc;
  411. cpuid = cpuid_edx(0x80000005);
  412. assoc = cpuid >> 16 & 0xff;
  413. upperbit = ((cpuid >> 24) << 10) / assoc;
  414. va_align.mask = (upperbit - 1) & PAGE_MASK;
  415. va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
  416. /* A random value per boot for bit slice [12:upper_bit) */
  417. va_align.bits = get_random_int() & va_align.mask;
  418. }
  419. }
  420. static void early_init_amd(struct cpuinfo_x86 *c)
  421. {
  422. early_init_amd_mc(c);
  423. /*
  424. * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
  425. * with P/T states and does not stop in deep C-states
  426. */
  427. if (c->x86_power & (1 << 8)) {
  428. set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  429. set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
  430. if (!check_tsc_unstable())
  431. set_sched_clock_stable();
  432. }
  433. #ifdef CONFIG_X86_64
  434. set_cpu_cap(c, X86_FEATURE_SYSCALL32);
  435. #else
  436. /* Set MTRR capability flag if appropriate */
  437. if (c->x86 == 5)
  438. if (c->x86_model == 13 || c->x86_model == 9 ||
  439. (c->x86_model == 8 && c->x86_mask >= 8))
  440. set_cpu_cap(c, X86_FEATURE_K6_MTRR);
  441. #endif
  442. #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
  443. /* check CPU config space for extended APIC ID */
  444. if (cpu_has_apic && c->x86 >= 0xf) {
  445. unsigned int val;
  446. val = read_pci_config(0, 24, 0, 0x68);
  447. if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
  448. set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
  449. }
  450. #endif
  451. /*
  452. * This is only needed to tell the kernel whether to use VMCALL
  453. * and VMMCALL. VMMCALL is never executed except under virt, so
  454. * we can set it unconditionally.
  455. */
  456. set_cpu_cap(c, X86_FEATURE_VMMCALL);
  457. /* F16h erratum 793, CVE-2013-6885 */
  458. if (c->x86 == 0x16 && c->x86_model <= 0xf)
  459. msr_set_bit(MSR_AMD64_LS_CFG, 15);
  460. }
  461. static const int amd_erratum_383[];
  462. static const int amd_erratum_400[];
  463. static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
  464. static void init_amd_k8(struct cpuinfo_x86 *c)
  465. {
  466. u32 level;
  467. u64 value;
  468. /* On C+ stepping K8 rep microcode works well for copy/memset */
  469. level = cpuid_eax(1);
  470. if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
  471. set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  472. /*
  473. * Some BIOSes incorrectly force this feature, but only K8 revision D
  474. * (model = 0x14) and later actually support it.
  475. * (AMD Erratum #110, docId: 25759).
  476. */
  477. if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
  478. clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
  479. if (!rdmsrl_amd_safe(0xc001100d, &value)) {
  480. value &= ~BIT_64(32);
  481. wrmsrl_amd_safe(0xc001100d, value);
  482. }
  483. }
  484. if (!c->x86_model_id[0])
  485. strcpy(c->x86_model_id, "Hammer");
  486. #ifdef CONFIG_SMP
  487. /*
  488. * Disable TLB flush filter by setting HWCR.FFDIS on K8
  489. * bit 6 of msr C001_0015
  490. *
  491. * Errata 63 for SH-B3 steppings
  492. * Errata 122 for all steppings (F+ have it disabled by default)
  493. */
  494. msr_set_bit(MSR_K7_HWCR, 6);
  495. #endif
  496. }
  497. static void init_amd_gh(struct cpuinfo_x86 *c)
  498. {
  499. #ifdef CONFIG_X86_64
  500. /* do this for boot cpu */
  501. if (c == &boot_cpu_data)
  502. check_enable_amd_mmconf_dmi();
  503. fam10h_check_enable_mmcfg();
  504. #endif
  505. /*
  506. * Disable GART TLB Walk Errors on Fam10h. We do this here because this
  507. * is always needed when GART is enabled, even in a kernel which has no
  508. * MCE support built in. BIOS should disable GartTlbWlk Errors already.
  509. * If it doesn't, we do it here as suggested by the BKDG.
  510. *
  511. * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
  512. */
  513. msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
  514. /*
  515. * On family 10h BIOS may not have properly enabled WC+ support, causing
  516. * it to be converted to CD memtype. This may result in performance
  517. * degradation for certain nested-paging guests. Prevent this conversion
  518. * by clearing bit 24 in MSR_AMD64_BU_CFG2.
  519. *
  520. * NOTE: we want to use the _safe accessors so as not to #GP kvm
  521. * guests on older kvm hosts.
  522. */
  523. msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
  524. if (cpu_has_amd_erratum(c, amd_erratum_383))
  525. set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
  526. }
  527. static void init_amd_bd(struct cpuinfo_x86 *c)
  528. {
  529. u64 value;
  530. /* re-enable TopologyExtensions if switched off by BIOS */
  531. if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
  532. !cpu_has(c, X86_FEATURE_TOPOEXT)) {
  533. if (msr_set_bit(0xc0011005, 54) > 0) {
  534. rdmsrl(0xc0011005, value);
  535. if (value & BIT_64(54)) {
  536. set_cpu_cap(c, X86_FEATURE_TOPOEXT);
  537. pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
  538. }
  539. }
  540. }
  541. /*
  542. * The way access filter has a performance penalty on some workloads.
  543. * Disable it on the affected CPUs.
  544. */
  545. if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
  546. if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
  547. value |= 0x1E;
  548. wrmsrl_safe(0xc0011021, value);
  549. }
  550. }
  551. }
  552. static void init_amd(struct cpuinfo_x86 *c)
  553. {
  554. u32 dummy;
  555. early_init_amd(c);
  556. /*
  557. * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
  558. * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
  559. */
  560. clear_cpu_cap(c, 0*32+31);
  561. if (c->x86 >= 0x10)
  562. set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  563. /* get apicid instead of initial apic id from cpuid */
  564. c->apicid = hard_smp_processor_id();
  565. /* K6s reports MCEs but don't actually have all the MSRs */
  566. if (c->x86 < 6)
  567. clear_cpu_cap(c, X86_FEATURE_MCE);
  568. switch (c->x86) {
  569. case 4: init_amd_k5(c); break;
  570. case 5: init_amd_k6(c); break;
  571. case 6: init_amd_k7(c); break;
  572. case 0xf: init_amd_k8(c); break;
  573. case 0x10: init_amd_gh(c); break;
  574. case 0x15: init_amd_bd(c); break;
  575. }
  576. /* Enable workaround for FXSAVE leak */
  577. if (c->x86 >= 6)
  578. set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
  579. cpu_detect_cache_sizes(c);
  580. /* Multi core CPU? */
  581. if (c->extended_cpuid_level >= 0x80000008) {
  582. amd_detect_cmp(c);
  583. srat_detect_node(c);
  584. }
  585. #ifdef CONFIG_X86_32
  586. detect_ht(c);
  587. #endif
  588. init_amd_cacheinfo(c);
  589. if (c->x86 >= 0xf)
  590. set_cpu_cap(c, X86_FEATURE_K8);
  591. if (cpu_has_xmm2) {
  592. /* MFENCE stops RDTSC speculation */
  593. set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
  594. }
  595. /*
  596. * Family 0x12 and above processors have APIC timer
  597. * running in deep C states.
  598. */
  599. if (c->x86 > 0x11)
  600. set_cpu_cap(c, X86_FEATURE_ARAT);
  601. if (cpu_has_amd_erratum(c, amd_erratum_400))
  602. set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
  603. rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
  604. /* 3DNow or LM implies PREFETCHW */
  605. if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
  606. if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
  607. set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
  608. /* AMD CPUs don't reset SS attributes on SYSRET */
  609. set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
  610. }
  611. #ifdef CONFIG_X86_32
  612. static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
  613. {
  614. /* AMD errata T13 (order #21922) */
  615. if ((c->x86 == 6)) {
  616. /* Duron Rev A0 */
  617. if (c->x86_model == 3 && c->x86_mask == 0)
  618. size = 64;
  619. /* Tbird rev A1/A2 */
  620. if (c->x86_model == 4 &&
  621. (c->x86_mask == 0 || c->x86_mask == 1))
  622. size = 256;
  623. }
  624. return size;
  625. }
  626. #endif
  627. static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
  628. {
  629. u32 ebx, eax, ecx, edx;
  630. u16 mask = 0xfff;
  631. if (c->x86 < 0xf)
  632. return;
  633. if (c->extended_cpuid_level < 0x80000006)
  634. return;
  635. cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
  636. tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
  637. tlb_lli_4k[ENTRIES] = ebx & mask;
  638. /*
  639. * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
  640. * characteristics from the CPUID function 0x80000005 instead.
  641. */
  642. if (c->x86 == 0xf) {
  643. cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
  644. mask = 0xff;
  645. }
  646. /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
  647. if (!((eax >> 16) & mask))
  648. tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
  649. else
  650. tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
  651. /* a 4M entry uses two 2M entries */
  652. tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
  653. /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
  654. if (!(eax & mask)) {
  655. /* Erratum 658 */
  656. if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
  657. tlb_lli_2m[ENTRIES] = 1024;
  658. } else {
  659. cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
  660. tlb_lli_2m[ENTRIES] = eax & 0xff;
  661. }
  662. } else
  663. tlb_lli_2m[ENTRIES] = eax & mask;
  664. tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
  665. }
  666. static const struct cpu_dev amd_cpu_dev = {
  667. .c_vendor = "AMD",
  668. .c_ident = { "AuthenticAMD" },
  669. #ifdef CONFIG_X86_32
  670. .legacy_models = {
  671. { .family = 4, .model_names =
  672. {
  673. [3] = "486 DX/2",
  674. [7] = "486 DX/2-WB",
  675. [8] = "486 DX/4",
  676. [9] = "486 DX/4-WB",
  677. [14] = "Am5x86-WT",
  678. [15] = "Am5x86-WB"
  679. }
  680. },
  681. },
  682. .legacy_cache_size = amd_size_cache,
  683. #endif
  684. .c_early_init = early_init_amd,
  685. .c_detect_tlb = cpu_detect_tlb_amd,
  686. .c_bsp_init = bsp_init_amd,
  687. .c_init = init_amd,
  688. .c_x86_vendor = X86_VENDOR_AMD,
  689. };
  690. cpu_dev_register(amd_cpu_dev);
  691. /*
  692. * AMD errata checking
  693. *
  694. * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
  695. * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
  696. * have an OSVW id assigned, which it takes as first argument. Both take a
  697. * variable number of family-specific model-stepping ranges created by
  698. * AMD_MODEL_RANGE().
  699. *
  700. * Example:
  701. *
  702. * const int amd_erratum_319[] =
  703. * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
  704. * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
  705. * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
  706. */
  707. #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
  708. #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
  709. #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
  710. ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
  711. #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
  712. #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
  713. #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
  714. static const int amd_erratum_400[] =
  715. AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
  716. AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
  717. static const int amd_erratum_383[] =
  718. AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
  719. static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
  720. {
  721. int osvw_id = *erratum++;
  722. u32 range;
  723. u32 ms;
  724. if (osvw_id >= 0 && osvw_id < 65536 &&
  725. cpu_has(cpu, X86_FEATURE_OSVW)) {
  726. u64 osvw_len;
  727. rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
  728. if (osvw_id < osvw_len) {
  729. u64 osvw_bits;
  730. rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
  731. osvw_bits);
  732. return osvw_bits & (1ULL << (osvw_id & 0x3f));
  733. }
  734. }
  735. /* OSVW unavailable or ID unknown, match family-model-stepping range */
  736. ms = (cpu->x86_model << 4) | cpu->x86_mask;
  737. while ((range = *erratum++))
  738. if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
  739. (ms >= AMD_MODEL_RANGE_START(range)) &&
  740. (ms <= AMD_MODEL_RANGE_END(range)))
  741. return true;
  742. return false;
  743. }
  744. void set_dr_addr_mask(unsigned long mask, int dr)
  745. {
  746. if (!cpu_has_bpext)
  747. return;
  748. switch (dr) {
  749. case 0:
  750. wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
  751. break;
  752. case 1:
  753. case 2:
  754. case 3:
  755. wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
  756. break;
  757. default:
  758. break;
  759. }
  760. }