intel.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. /*
  2. * Intel CPU Microcode Update Driver for Linux
  3. *
  4. * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  5. * 2006 Shaohua Li <shaohua.li@intel.com>
  6. *
  7. * Intel CPU microcode early update for Linux
  8. *
  9. * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  10. * H Peter Anvin" <hpa@zytor.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. /*
  18. * This needs to be before all headers so that pr_debug in printk.h doesn't turn
  19. * printk calls into no_printk().
  20. *
  21. *#define DEBUG
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/earlycpio.h>
  25. #include <linux/firmware.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/vmalloc.h>
  28. #include <linux/initrd.h>
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/cpu.h>
  33. #include <linux/mm.h>
  34. #include <asm/microcode_intel.h>
  35. #include <asm/processor.h>
  36. #include <asm/tlbflush.h>
  37. #include <asm/setup.h>
  38. #include <asm/msr.h>
  39. MODULE_DESCRIPTION("Microcode Update Driver");
  40. MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
  41. MODULE_LICENSE("GPL");
  42. static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
  43. static struct mc_saved_data {
  44. unsigned int mc_saved_count;
  45. struct microcode_intel **mc_saved;
  46. } mc_saved_data;
  47. static enum ucode_state
  48. load_microcode_early(struct microcode_intel **saved,
  49. unsigned int num_saved, struct ucode_cpu_info *uci)
  50. {
  51. struct microcode_intel *ucode_ptr, *new_mc = NULL;
  52. struct microcode_header_intel *mc_hdr;
  53. int new_rev, ret, i;
  54. new_rev = uci->cpu_sig.rev;
  55. for (i = 0; i < num_saved; i++) {
  56. ucode_ptr = saved[i];
  57. mc_hdr = (struct microcode_header_intel *)ucode_ptr;
  58. ret = has_newer_microcode(ucode_ptr,
  59. uci->cpu_sig.sig,
  60. uci->cpu_sig.pf,
  61. new_rev);
  62. if (!ret)
  63. continue;
  64. new_rev = mc_hdr->rev;
  65. new_mc = ucode_ptr;
  66. }
  67. if (!new_mc)
  68. return UCODE_NFOUND;
  69. uci->mc = (struct microcode_intel *)new_mc;
  70. return UCODE_OK;
  71. }
  72. static inline void
  73. copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
  74. unsigned long off, int num_saved)
  75. {
  76. int i;
  77. for (i = 0; i < num_saved; i++)
  78. mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
  79. }
  80. #ifdef CONFIG_X86_32
  81. static void
  82. microcode_phys(struct microcode_intel **mc_saved_tmp,
  83. struct mc_saved_data *mc_saved_data)
  84. {
  85. int i;
  86. struct microcode_intel ***mc_saved;
  87. mc_saved = (struct microcode_intel ***)
  88. __pa_nodebug(&mc_saved_data->mc_saved);
  89. for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
  90. struct microcode_intel *p;
  91. p = *(struct microcode_intel **)
  92. __pa_nodebug(mc_saved_data->mc_saved + i);
  93. mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
  94. }
  95. }
  96. #endif
  97. static enum ucode_state
  98. load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
  99. unsigned long initrd_start, struct ucode_cpu_info *uci)
  100. {
  101. struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
  102. unsigned int count = mc_saved_data->mc_saved_count;
  103. if (!mc_saved_data->mc_saved) {
  104. copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
  105. return load_microcode_early(mc_saved_tmp, count, uci);
  106. } else {
  107. #ifdef CONFIG_X86_32
  108. microcode_phys(mc_saved_tmp, mc_saved_data);
  109. return load_microcode_early(mc_saved_tmp, count, uci);
  110. #else
  111. return load_microcode_early(mc_saved_data->mc_saved,
  112. count, uci);
  113. #endif
  114. }
  115. }
  116. /*
  117. * Given CPU signature and a microcode patch, this function finds if the
  118. * microcode patch has matching family and model with the CPU.
  119. */
  120. static enum ucode_state
  121. matching_model_microcode(struct microcode_header_intel *mc_header,
  122. unsigned long sig)
  123. {
  124. unsigned int fam, model;
  125. unsigned int fam_ucode, model_ucode;
  126. struct extended_sigtable *ext_header;
  127. unsigned long total_size = get_totalsize(mc_header);
  128. unsigned long data_size = get_datasize(mc_header);
  129. int ext_sigcount, i;
  130. struct extended_signature *ext_sig;
  131. fam = __x86_family(sig);
  132. model = x86_model(sig);
  133. fam_ucode = __x86_family(mc_header->sig);
  134. model_ucode = x86_model(mc_header->sig);
  135. if (fam == fam_ucode && model == model_ucode)
  136. return UCODE_OK;
  137. /* Look for ext. headers: */
  138. if (total_size <= data_size + MC_HEADER_SIZE)
  139. return UCODE_NFOUND;
  140. ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
  141. ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
  142. ext_sigcount = ext_header->count;
  143. for (i = 0; i < ext_sigcount; i++) {
  144. fam_ucode = __x86_family(ext_sig->sig);
  145. model_ucode = x86_model(ext_sig->sig);
  146. if (fam == fam_ucode && model == model_ucode)
  147. return UCODE_OK;
  148. ext_sig++;
  149. }
  150. return UCODE_NFOUND;
  151. }
  152. static int
  153. save_microcode(struct mc_saved_data *mc_saved_data,
  154. struct microcode_intel **mc_saved_src,
  155. unsigned int mc_saved_count)
  156. {
  157. int i, j;
  158. struct microcode_intel **saved_ptr;
  159. int ret;
  160. if (!mc_saved_count)
  161. return -EINVAL;
  162. /*
  163. * Copy new microcode data.
  164. */
  165. saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
  166. if (!saved_ptr)
  167. return -ENOMEM;
  168. for (i = 0; i < mc_saved_count; i++) {
  169. struct microcode_header_intel *mc_hdr;
  170. struct microcode_intel *mc;
  171. unsigned long size;
  172. if (!mc_saved_src[i]) {
  173. ret = -EINVAL;
  174. goto err;
  175. }
  176. mc = mc_saved_src[i];
  177. mc_hdr = &mc->hdr;
  178. size = get_totalsize(mc_hdr);
  179. saved_ptr[i] = kmalloc(size, GFP_KERNEL);
  180. if (!saved_ptr[i]) {
  181. ret = -ENOMEM;
  182. goto err;
  183. }
  184. memcpy(saved_ptr[i], mc, size);
  185. }
  186. /*
  187. * Point to newly saved microcode.
  188. */
  189. mc_saved_data->mc_saved = saved_ptr;
  190. mc_saved_data->mc_saved_count = mc_saved_count;
  191. return 0;
  192. err:
  193. for (j = 0; j <= i; j++)
  194. kfree(saved_ptr[j]);
  195. kfree(saved_ptr);
  196. return ret;
  197. }
  198. /*
  199. * A microcode patch in ucode_ptr is saved into mc_saved
  200. * - if it has matching signature and newer revision compared to an existing
  201. * patch mc_saved.
  202. * - or if it is a newly discovered microcode patch.
  203. *
  204. * The microcode patch should have matching model with CPU.
  205. *
  206. * Returns: The updated number @num_saved of saved microcode patches.
  207. */
  208. static unsigned int _save_mc(struct microcode_intel **mc_saved,
  209. u8 *ucode_ptr, unsigned int num_saved)
  210. {
  211. struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
  212. unsigned int sig, pf;
  213. int found = 0, i;
  214. mc_hdr = (struct microcode_header_intel *)ucode_ptr;
  215. for (i = 0; i < num_saved; i++) {
  216. mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
  217. sig = mc_saved_hdr->sig;
  218. pf = mc_saved_hdr->pf;
  219. if (!find_matching_signature(ucode_ptr, sig, pf))
  220. continue;
  221. found = 1;
  222. if (mc_hdr->rev <= mc_saved_hdr->rev)
  223. continue;
  224. /*
  225. * Found an older ucode saved earlier. Replace it with
  226. * this newer one.
  227. */
  228. mc_saved[i] = (struct microcode_intel *)ucode_ptr;
  229. break;
  230. }
  231. /* Newly detected microcode, save it to memory. */
  232. if (i >= num_saved && !found)
  233. mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
  234. return num_saved;
  235. }
  236. /*
  237. * Get microcode matching with BSP's model. Only CPUs with the same model as
  238. * BSP can stay in the platform.
  239. */
  240. static enum ucode_state __init
  241. get_matching_model_microcode(int cpu, unsigned long start,
  242. void *data, size_t size,
  243. struct mc_saved_data *mc_saved_data,
  244. unsigned long *mc_saved_in_initrd,
  245. struct ucode_cpu_info *uci)
  246. {
  247. u8 *ucode_ptr = data;
  248. unsigned int leftover = size;
  249. enum ucode_state state = UCODE_OK;
  250. unsigned int mc_size;
  251. struct microcode_header_intel *mc_header;
  252. struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
  253. unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
  254. int i;
  255. while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
  256. if (leftover < sizeof(mc_header))
  257. break;
  258. mc_header = (struct microcode_header_intel *)ucode_ptr;
  259. mc_size = get_totalsize(mc_header);
  260. if (!mc_size || mc_size > leftover ||
  261. microcode_sanity_check(ucode_ptr, 0) < 0)
  262. break;
  263. leftover -= mc_size;
  264. /*
  265. * Since APs with same family and model as the BSP may boot in
  266. * the platform, we need to find and save microcode patches
  267. * with the same family and model as the BSP.
  268. */
  269. if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
  270. UCODE_OK) {
  271. ucode_ptr += mc_size;
  272. continue;
  273. }
  274. mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
  275. ucode_ptr += mc_size;
  276. }
  277. if (leftover) {
  278. state = UCODE_ERROR;
  279. goto out;
  280. }
  281. if (mc_saved_count == 0) {
  282. state = UCODE_NFOUND;
  283. goto out;
  284. }
  285. for (i = 0; i < mc_saved_count; i++)
  286. mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
  287. mc_saved_data->mc_saved_count = mc_saved_count;
  288. out:
  289. return state;
  290. }
  291. static int collect_cpu_info_early(struct ucode_cpu_info *uci)
  292. {
  293. unsigned int val[2];
  294. unsigned int family, model;
  295. struct cpu_signature csig;
  296. unsigned int eax, ebx, ecx, edx;
  297. csig.sig = 0;
  298. csig.pf = 0;
  299. csig.rev = 0;
  300. memset(uci, 0, sizeof(*uci));
  301. eax = 0x00000001;
  302. ecx = 0;
  303. native_cpuid(&eax, &ebx, &ecx, &edx);
  304. csig.sig = eax;
  305. family = __x86_family(csig.sig);
  306. model = x86_model(csig.sig);
  307. if ((model >= 5) || (family > 6)) {
  308. /* get processor flags from MSR 0x17 */
  309. native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
  310. csig.pf = 1 << ((val[1] >> 18) & 7);
  311. }
  312. native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
  313. /* As documented in the SDM: Do a CPUID 1 here */
  314. sync_core();
  315. /* get the current revision from MSR 0x8B */
  316. native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
  317. csig.rev = val[1];
  318. uci->cpu_sig = csig;
  319. uci->valid = 1;
  320. return 0;
  321. }
  322. #ifdef DEBUG
  323. static void show_saved_mc(void)
  324. {
  325. int i, j;
  326. unsigned int sig, pf, rev, total_size, data_size, date;
  327. struct ucode_cpu_info uci;
  328. if (mc_saved_data.mc_saved_count == 0) {
  329. pr_debug("no microcode data saved.\n");
  330. return;
  331. }
  332. pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
  333. collect_cpu_info_early(&uci);
  334. sig = uci.cpu_sig.sig;
  335. pf = uci.cpu_sig.pf;
  336. rev = uci.cpu_sig.rev;
  337. pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
  338. for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
  339. struct microcode_header_intel *mc_saved_header;
  340. struct extended_sigtable *ext_header;
  341. int ext_sigcount;
  342. struct extended_signature *ext_sig;
  343. mc_saved_header = (struct microcode_header_intel *)
  344. mc_saved_data.mc_saved[i];
  345. sig = mc_saved_header->sig;
  346. pf = mc_saved_header->pf;
  347. rev = mc_saved_header->rev;
  348. total_size = get_totalsize(mc_saved_header);
  349. data_size = get_datasize(mc_saved_header);
  350. date = mc_saved_header->date;
  351. pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
  352. i, sig, pf, rev, total_size,
  353. date & 0xffff,
  354. date >> 24,
  355. (date >> 16) & 0xff);
  356. /* Look for ext. headers: */
  357. if (total_size <= data_size + MC_HEADER_SIZE)
  358. continue;
  359. ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
  360. ext_sigcount = ext_header->count;
  361. ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
  362. for (j = 0; j < ext_sigcount; j++) {
  363. sig = ext_sig->sig;
  364. pf = ext_sig->pf;
  365. pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
  366. j, sig, pf);
  367. ext_sig++;
  368. }
  369. }
  370. }
  371. #else
  372. static inline void show_saved_mc(void)
  373. {
  374. }
  375. #endif
  376. #ifdef CONFIG_HOTPLUG_CPU
  377. static DEFINE_MUTEX(x86_cpu_microcode_mutex);
  378. /*
  379. * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
  380. * hot added or resumes.
  381. *
  382. * Please make sure this mc should be a valid microcode patch before calling
  383. * this function.
  384. */
  385. int save_mc_for_early(u8 *mc)
  386. {
  387. struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
  388. unsigned int mc_saved_count_init;
  389. unsigned int mc_saved_count;
  390. struct microcode_intel **mc_saved;
  391. int ret = 0;
  392. int i;
  393. /*
  394. * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
  395. * hotplug.
  396. */
  397. mutex_lock(&x86_cpu_microcode_mutex);
  398. mc_saved_count_init = mc_saved_data.mc_saved_count;
  399. mc_saved_count = mc_saved_data.mc_saved_count;
  400. mc_saved = mc_saved_data.mc_saved;
  401. if (mc_saved && mc_saved_count)
  402. memcpy(mc_saved_tmp, mc_saved,
  403. mc_saved_count * sizeof(struct microcode_intel *));
  404. /*
  405. * Save the microcode patch mc in mc_save_tmp structure if it's a newer
  406. * version.
  407. */
  408. mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
  409. /*
  410. * Save the mc_save_tmp in global mc_saved_data.
  411. */
  412. ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
  413. if (ret) {
  414. pr_err("Cannot save microcode patch.\n");
  415. goto out;
  416. }
  417. show_saved_mc();
  418. /*
  419. * Free old saved microcode data.
  420. */
  421. if (mc_saved) {
  422. for (i = 0; i < mc_saved_count_init; i++)
  423. kfree(mc_saved[i]);
  424. kfree(mc_saved);
  425. }
  426. out:
  427. mutex_unlock(&x86_cpu_microcode_mutex);
  428. return ret;
  429. }
  430. EXPORT_SYMBOL_GPL(save_mc_for_early);
  431. #endif
  432. static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
  433. {
  434. #ifdef CONFIG_X86_64
  435. unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
  436. unsigned int family, model, stepping;
  437. char name[30];
  438. native_cpuid(&eax, &ebx, &ecx, &edx);
  439. family = __x86_family(eax);
  440. model = x86_model(eax);
  441. stepping = eax & 0xf;
  442. sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
  443. return get_builtin_firmware(cp, name);
  444. #else
  445. return false;
  446. #endif
  447. }
  448. static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
  449. static __init enum ucode_state
  450. scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
  451. unsigned long start, unsigned long size,
  452. struct ucode_cpu_info *uci)
  453. {
  454. struct cpio_data cd;
  455. long offset = 0;
  456. #ifdef CONFIG_X86_32
  457. char *p = (char *)__pa_nodebug(ucode_name);
  458. #else
  459. char *p = ucode_name;
  460. #endif
  461. cd.data = NULL;
  462. cd.size = 0;
  463. cd = find_cpio_data(p, (void *)start, size, &offset);
  464. if (!cd.data) {
  465. if (!load_builtin_intel_microcode(&cd))
  466. return UCODE_ERROR;
  467. }
  468. return get_matching_model_microcode(0, start, cd.data, cd.size,
  469. mc_saved_data, initrd, uci);
  470. }
  471. /*
  472. * Print ucode update info.
  473. */
  474. static void
  475. print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
  476. {
  477. int cpu = smp_processor_id();
  478. pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
  479. cpu,
  480. uci->cpu_sig.rev,
  481. date & 0xffff,
  482. date >> 24,
  483. (date >> 16) & 0xff);
  484. }
  485. #ifdef CONFIG_X86_32
  486. static int delay_ucode_info;
  487. static int current_mc_date;
  488. /*
  489. * Print early updated ucode info after printk works. This is delayed info dump.
  490. */
  491. void show_ucode_info_early(void)
  492. {
  493. struct ucode_cpu_info uci;
  494. if (delay_ucode_info) {
  495. collect_cpu_info_early(&uci);
  496. print_ucode_info(&uci, current_mc_date);
  497. delay_ucode_info = 0;
  498. }
  499. }
  500. /*
  501. * At this point, we can not call printk() yet. Keep microcode patch number in
  502. * mc_saved_data.mc_saved and delay printing microcode info in
  503. * show_ucode_info_early() until printk() works.
  504. */
  505. static void print_ucode(struct ucode_cpu_info *uci)
  506. {
  507. struct microcode_intel *mc_intel;
  508. int *delay_ucode_info_p;
  509. int *current_mc_date_p;
  510. mc_intel = uci->mc;
  511. if (mc_intel == NULL)
  512. return;
  513. delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
  514. current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
  515. *delay_ucode_info_p = 1;
  516. *current_mc_date_p = mc_intel->hdr.date;
  517. }
  518. #else
  519. /*
  520. * Flush global tlb. We only do this in x86_64 where paging has been enabled
  521. * already and PGE should be enabled as well.
  522. */
  523. static inline void flush_tlb_early(void)
  524. {
  525. __native_flush_tlb_global_irq_disabled();
  526. }
  527. static inline void print_ucode(struct ucode_cpu_info *uci)
  528. {
  529. struct microcode_intel *mc_intel;
  530. mc_intel = uci->mc;
  531. if (mc_intel == NULL)
  532. return;
  533. print_ucode_info(uci, mc_intel->hdr.date);
  534. }
  535. #endif
  536. static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
  537. {
  538. struct microcode_intel *mc_intel;
  539. unsigned int val[2];
  540. mc_intel = uci->mc;
  541. if (mc_intel == NULL)
  542. return 0;
  543. /* write microcode via MSR 0x79 */
  544. native_wrmsr(MSR_IA32_UCODE_WRITE,
  545. (unsigned long) mc_intel->bits,
  546. (unsigned long) mc_intel->bits >> 16 >> 16);
  547. native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
  548. /* As documented in the SDM: Do a CPUID 1 here */
  549. sync_core();
  550. /* get the current revision from MSR 0x8B */
  551. native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
  552. if (val[1] != mc_intel->hdr.rev)
  553. return -1;
  554. #ifdef CONFIG_X86_64
  555. /* Flush global tlb. This is precaution. */
  556. flush_tlb_early();
  557. #endif
  558. uci->cpu_sig.rev = val[1];
  559. if (early)
  560. print_ucode(uci);
  561. else
  562. print_ucode_info(uci, mc_intel->hdr.date);
  563. return 0;
  564. }
  565. /*
  566. * This function converts microcode patch offsets previously stored in
  567. * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
  568. */
  569. int __init save_microcode_in_initrd_intel(void)
  570. {
  571. unsigned int count = mc_saved_data.mc_saved_count;
  572. struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
  573. int ret = 0;
  574. if (count == 0)
  575. return ret;
  576. copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
  577. ret = save_microcode(&mc_saved_data, mc_saved, count);
  578. if (ret)
  579. pr_err("Cannot save microcode patches from initrd.\n");
  580. show_saved_mc();
  581. return ret;
  582. }
  583. static void __init
  584. _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
  585. unsigned long *initrd,
  586. unsigned long start, unsigned long size)
  587. {
  588. struct ucode_cpu_info uci;
  589. enum ucode_state ret;
  590. collect_cpu_info_early(&uci);
  591. ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
  592. if (ret != UCODE_OK)
  593. return;
  594. ret = load_microcode(mc_saved_data, initrd, start, &uci);
  595. if (ret != UCODE_OK)
  596. return;
  597. apply_microcode_early(&uci, true);
  598. }
  599. void __init load_ucode_intel_bsp(void)
  600. {
  601. u64 start, size;
  602. #ifdef CONFIG_X86_32
  603. struct boot_params *p;
  604. p = (struct boot_params *)__pa_nodebug(&boot_params);
  605. start = p->hdr.ramdisk_image;
  606. size = p->hdr.ramdisk_size;
  607. _load_ucode_intel_bsp(
  608. (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
  609. (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
  610. start, size);
  611. #else
  612. start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
  613. size = boot_params.hdr.ramdisk_size;
  614. _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
  615. #endif
  616. }
  617. void load_ucode_intel_ap(void)
  618. {
  619. struct mc_saved_data *mc_saved_data_p;
  620. struct ucode_cpu_info uci;
  621. unsigned long *mc_saved_in_initrd_p;
  622. unsigned long initrd_start_addr;
  623. enum ucode_state ret;
  624. #ifdef CONFIG_X86_32
  625. unsigned long *initrd_start_p;
  626. mc_saved_in_initrd_p =
  627. (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
  628. mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
  629. initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
  630. initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
  631. #else
  632. mc_saved_data_p = &mc_saved_data;
  633. mc_saved_in_initrd_p = mc_saved_in_initrd;
  634. initrd_start_addr = initrd_start;
  635. #endif
  636. /*
  637. * If there is no valid ucode previously saved in memory, no need to
  638. * update ucode on this AP.
  639. */
  640. if (mc_saved_data_p->mc_saved_count == 0)
  641. return;
  642. collect_cpu_info_early(&uci);
  643. ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
  644. initrd_start_addr, &uci);
  645. if (ret != UCODE_OK)
  646. return;
  647. apply_microcode_early(&uci, true);
  648. }
  649. void reload_ucode_intel(void)
  650. {
  651. struct ucode_cpu_info uci;
  652. enum ucode_state ret;
  653. if (!mc_saved_data.mc_saved_count)
  654. return;
  655. collect_cpu_info_early(&uci);
  656. ret = load_microcode_early(mc_saved_data.mc_saved,
  657. mc_saved_data.mc_saved_count, &uci);
  658. if (ret != UCODE_OK)
  659. return;
  660. apply_microcode_early(&uci, false);
  661. }
  662. static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
  663. {
  664. struct cpuinfo_x86 *c = &cpu_data(cpu_num);
  665. unsigned int val[2];
  666. memset(csig, 0, sizeof(*csig));
  667. csig->sig = cpuid_eax(0x00000001);
  668. if ((c->x86_model >= 5) || (c->x86 > 6)) {
  669. /* get processor flags from MSR 0x17 */
  670. rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
  671. csig->pf = 1 << ((val[1] >> 18) & 7);
  672. }
  673. csig->rev = c->microcode;
  674. pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
  675. cpu_num, csig->sig, csig->pf, csig->rev);
  676. return 0;
  677. }
  678. /*
  679. * return 0 - no update found
  680. * return 1 - found update
  681. */
  682. static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
  683. {
  684. struct cpu_signature cpu_sig;
  685. unsigned int csig, cpf, crev;
  686. collect_cpu_info(cpu, &cpu_sig);
  687. csig = cpu_sig.sig;
  688. cpf = cpu_sig.pf;
  689. crev = cpu_sig.rev;
  690. return has_newer_microcode(mc_intel, csig, cpf, crev);
  691. }
  692. static int apply_microcode_intel(int cpu)
  693. {
  694. struct microcode_intel *mc_intel;
  695. struct ucode_cpu_info *uci;
  696. unsigned int val[2];
  697. int cpu_num = raw_smp_processor_id();
  698. struct cpuinfo_x86 *c = &cpu_data(cpu_num);
  699. uci = ucode_cpu_info + cpu;
  700. mc_intel = uci->mc;
  701. /* We should bind the task to the CPU */
  702. BUG_ON(cpu_num != cpu);
  703. if (mc_intel == NULL)
  704. return 0;
  705. /*
  706. * Microcode on this CPU could be updated earlier. Only apply the
  707. * microcode patch in mc_intel when it is newer than the one on this
  708. * CPU.
  709. */
  710. if (get_matching_mc(mc_intel, cpu) == 0)
  711. return 0;
  712. /* write microcode via MSR 0x79 */
  713. wrmsr(MSR_IA32_UCODE_WRITE,
  714. (unsigned long) mc_intel->bits,
  715. (unsigned long) mc_intel->bits >> 16 >> 16);
  716. wrmsr(MSR_IA32_UCODE_REV, 0, 0);
  717. /* As documented in the SDM: Do a CPUID 1 here */
  718. sync_core();
  719. /* get the current revision from MSR 0x8B */
  720. rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
  721. if (val[1] != mc_intel->hdr.rev) {
  722. pr_err("CPU%d update to revision 0x%x failed\n",
  723. cpu_num, mc_intel->hdr.rev);
  724. return -1;
  725. }
  726. pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
  727. cpu_num, val[1],
  728. mc_intel->hdr.date & 0xffff,
  729. mc_intel->hdr.date >> 24,
  730. (mc_intel->hdr.date >> 16) & 0xff);
  731. uci->cpu_sig.rev = val[1];
  732. c->microcode = val[1];
  733. return 0;
  734. }
  735. static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
  736. int (*get_ucode_data)(void *, const void *, size_t))
  737. {
  738. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  739. u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
  740. int new_rev = uci->cpu_sig.rev;
  741. unsigned int leftover = size;
  742. enum ucode_state state = UCODE_OK;
  743. unsigned int curr_mc_size = 0;
  744. unsigned int csig, cpf;
  745. while (leftover) {
  746. struct microcode_header_intel mc_header;
  747. unsigned int mc_size;
  748. if (leftover < sizeof(mc_header)) {
  749. pr_err("error! Truncated header in microcode data file\n");
  750. break;
  751. }
  752. if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
  753. break;
  754. mc_size = get_totalsize(&mc_header);
  755. if (!mc_size || mc_size > leftover) {
  756. pr_err("error! Bad data in microcode data file\n");
  757. break;
  758. }
  759. /* For performance reasons, reuse mc area when possible */
  760. if (!mc || mc_size > curr_mc_size) {
  761. vfree(mc);
  762. mc = vmalloc(mc_size);
  763. if (!mc)
  764. break;
  765. curr_mc_size = mc_size;
  766. }
  767. if (get_ucode_data(mc, ucode_ptr, mc_size) ||
  768. microcode_sanity_check(mc, 1) < 0) {
  769. break;
  770. }
  771. csig = uci->cpu_sig.sig;
  772. cpf = uci->cpu_sig.pf;
  773. if (has_newer_microcode(mc, csig, cpf, new_rev)) {
  774. vfree(new_mc);
  775. new_rev = mc_header.rev;
  776. new_mc = mc;
  777. mc = NULL; /* trigger new vmalloc */
  778. }
  779. ucode_ptr += mc_size;
  780. leftover -= mc_size;
  781. }
  782. vfree(mc);
  783. if (leftover) {
  784. vfree(new_mc);
  785. state = UCODE_ERROR;
  786. goto out;
  787. }
  788. if (!new_mc) {
  789. state = UCODE_NFOUND;
  790. goto out;
  791. }
  792. vfree(uci->mc);
  793. uci->mc = (struct microcode_intel *)new_mc;
  794. /*
  795. * If early loading microcode is supported, save this mc into
  796. * permanent memory. So it will be loaded early when a CPU is hot added
  797. * or resumes.
  798. */
  799. save_mc_for_early(new_mc);
  800. pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
  801. cpu, new_rev, uci->cpu_sig.rev);
  802. out:
  803. return state;
  804. }
  805. static int get_ucode_fw(void *to, const void *from, size_t n)
  806. {
  807. memcpy(to, from, n);
  808. return 0;
  809. }
  810. static enum ucode_state request_microcode_fw(int cpu, struct device *device,
  811. bool refresh_fw)
  812. {
  813. char name[30];
  814. struct cpuinfo_x86 *c = &cpu_data(cpu);
  815. const struct firmware *firmware;
  816. enum ucode_state ret;
  817. sprintf(name, "intel-ucode/%02x-%02x-%02x",
  818. c->x86, c->x86_model, c->x86_mask);
  819. if (request_firmware_direct(&firmware, name, device)) {
  820. pr_debug("data file %s load failed\n", name);
  821. return UCODE_NFOUND;
  822. }
  823. ret = generic_load_microcode(cpu, (void *)firmware->data,
  824. firmware->size, &get_ucode_fw);
  825. release_firmware(firmware);
  826. return ret;
  827. }
  828. static int get_ucode_user(void *to, const void *from, size_t n)
  829. {
  830. return copy_from_user(to, from, n);
  831. }
  832. static enum ucode_state
  833. request_microcode_user(int cpu, const void __user *buf, size_t size)
  834. {
  835. return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
  836. }
  837. static void microcode_fini_cpu(int cpu)
  838. {
  839. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  840. vfree(uci->mc);
  841. uci->mc = NULL;
  842. }
  843. static struct microcode_ops microcode_intel_ops = {
  844. .request_microcode_user = request_microcode_user,
  845. .request_microcode_fw = request_microcode_fw,
  846. .collect_cpu_info = collect_cpu_info,
  847. .apply_microcode = apply_microcode_intel,
  848. .microcode_fini_cpu = microcode_fini_cpu,
  849. };
  850. struct microcode_ops * __init init_intel_microcode(void)
  851. {
  852. struct cpuinfo_x86 *c = &boot_cpu_data;
  853. if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
  854. cpu_has(c, X86_FEATURE_IA64)) {
  855. pr_err("Intel CPU family 0x%x not supported\n", c->x86);
  856. return NULL;
  857. }
  858. return &microcode_intel_ops;
  859. }