core.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. /*
  2. * CPU Microcode Update Driver for Linux
  3. *
  4. * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  5. * 2006 Shaohua Li <shaohua.li@intel.com>
  6. * 2013-2015 Borislav Petkov <bp@alien8.de>
  7. *
  8. * X86 CPU microcode early update for Linux:
  9. *
  10. * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  11. * H Peter Anvin" <hpa@zytor.com>
  12. * (C) 2015 Borislav Petkov <bp@alien8.de>
  13. *
  14. * This driver allows to upgrade microcode on x86 processors.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License
  18. * as published by the Free Software Foundation; either version
  19. * 2 of the License, or (at your option) any later version.
  20. */
  21. #define pr_fmt(fmt) "microcode: " fmt
  22. #include <linux/platform_device.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/capability.h>
  26. #include <linux/firmware.h>
  27. #include <linux/kernel.h>
  28. #include <linux/mutex.h>
  29. #include <linux/cpu.h>
  30. #include <linux/fs.h>
  31. #include <linux/mm.h>
  32. #include <asm/microcode_intel.h>
  33. #include <asm/cpu_device_id.h>
  34. #include <asm/microcode_amd.h>
  35. #include <asm/perf_event.h>
  36. #include <asm/microcode.h>
  37. #include <asm/processor.h>
  38. #include <asm/cmdline.h>
  39. #define MICROCODE_VERSION "2.01"
  40. static struct microcode_ops *microcode_ops;
  41. static bool dis_ucode_ldr;
  42. /*
  43. * Synchronization.
  44. *
  45. * All non cpu-hotplug-callback call sites use:
  46. *
  47. * - microcode_mutex to synchronize with each other;
  48. * - get/put_online_cpus() to synchronize with
  49. * the cpu-hotplug-callback call sites.
  50. *
  51. * We guarantee that only a single cpu is being
  52. * updated at any particular moment of time.
  53. */
  54. static DEFINE_MUTEX(microcode_mutex);
  55. struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
  56. /*
  57. * Operations that are run on a target cpu:
  58. */
  59. struct cpu_info_ctx {
  60. struct cpu_signature *cpu_sig;
  61. int err;
  62. };
  63. static bool __init check_loader_disabled_bsp(void)
  64. {
  65. static const char *__dis_opt_str = "dis_ucode_ldr";
  66. #ifdef CONFIG_X86_32
  67. const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
  68. const char *option = (const char *)__pa_nodebug(__dis_opt_str);
  69. bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
  70. #else /* CONFIG_X86_64 */
  71. const char *cmdline = boot_command_line;
  72. const char *option = __dis_opt_str;
  73. bool *res = &dis_ucode_ldr;
  74. #endif
  75. if (cmdline_find_option_bool(cmdline, option))
  76. *res = true;
  77. return *res;
  78. }
  79. extern struct builtin_fw __start_builtin_fw[];
  80. extern struct builtin_fw __end_builtin_fw[];
  81. bool get_builtin_firmware(struct cpio_data *cd, const char *name)
  82. {
  83. #ifdef CONFIG_FW_LOADER
  84. struct builtin_fw *b_fw;
  85. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
  86. if (!strcmp(name, b_fw->name)) {
  87. cd->size = b_fw->size;
  88. cd->data = b_fw->data;
  89. return true;
  90. }
  91. }
  92. #endif
  93. return false;
  94. }
  95. void __init load_ucode_bsp(void)
  96. {
  97. int vendor;
  98. unsigned int family;
  99. if (check_loader_disabled_bsp())
  100. return;
  101. if (!have_cpuid_p())
  102. return;
  103. vendor = x86_cpuid_vendor();
  104. family = x86_cpuid_family();
  105. switch (vendor) {
  106. case X86_VENDOR_INTEL:
  107. if (family >= 6)
  108. load_ucode_intel_bsp();
  109. break;
  110. case X86_VENDOR_AMD:
  111. if (family >= 0x10)
  112. load_ucode_amd_bsp(family);
  113. break;
  114. default:
  115. break;
  116. }
  117. }
  118. static bool check_loader_disabled_ap(void)
  119. {
  120. #ifdef CONFIG_X86_32
  121. return *((bool *)__pa_nodebug(&dis_ucode_ldr));
  122. #else
  123. return dis_ucode_ldr;
  124. #endif
  125. }
  126. void load_ucode_ap(void)
  127. {
  128. int vendor, family;
  129. if (check_loader_disabled_ap())
  130. return;
  131. if (!have_cpuid_p())
  132. return;
  133. vendor = x86_cpuid_vendor();
  134. family = x86_cpuid_family();
  135. switch (vendor) {
  136. case X86_VENDOR_INTEL:
  137. if (family >= 6)
  138. load_ucode_intel_ap();
  139. break;
  140. case X86_VENDOR_AMD:
  141. if (family >= 0x10)
  142. load_ucode_amd_ap();
  143. break;
  144. default:
  145. break;
  146. }
  147. }
  148. static int __init save_microcode_in_initrd(void)
  149. {
  150. struct cpuinfo_x86 *c = &boot_cpu_data;
  151. switch (c->x86_vendor) {
  152. case X86_VENDOR_INTEL:
  153. if (c->x86 >= 6)
  154. return save_microcode_in_initrd_intel();
  155. break;
  156. case X86_VENDOR_AMD:
  157. if (c->x86 >= 0x10)
  158. return save_microcode_in_initrd_amd();
  159. break;
  160. default:
  161. break;
  162. }
  163. return -EINVAL;
  164. }
  165. void reload_early_microcode(void)
  166. {
  167. int vendor, family;
  168. vendor = x86_cpuid_vendor();
  169. family = x86_cpuid_family();
  170. switch (vendor) {
  171. case X86_VENDOR_INTEL:
  172. if (family >= 6)
  173. reload_ucode_intel();
  174. break;
  175. case X86_VENDOR_AMD:
  176. if (family >= 0x10)
  177. reload_ucode_amd();
  178. break;
  179. default:
  180. break;
  181. }
  182. }
  183. static void collect_cpu_info_local(void *arg)
  184. {
  185. struct cpu_info_ctx *ctx = arg;
  186. ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
  187. ctx->cpu_sig);
  188. }
  189. static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
  190. {
  191. struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
  192. int ret;
  193. ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
  194. if (!ret)
  195. ret = ctx.err;
  196. return ret;
  197. }
  198. static int collect_cpu_info(int cpu)
  199. {
  200. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  201. int ret;
  202. memset(uci, 0, sizeof(*uci));
  203. ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
  204. if (!ret)
  205. uci->valid = 1;
  206. return ret;
  207. }
  208. struct apply_microcode_ctx {
  209. int err;
  210. };
  211. static void apply_microcode_local(void *arg)
  212. {
  213. struct apply_microcode_ctx *ctx = arg;
  214. ctx->err = microcode_ops->apply_microcode(smp_processor_id());
  215. }
  216. static int apply_microcode_on_target(int cpu)
  217. {
  218. struct apply_microcode_ctx ctx = { .err = 0 };
  219. int ret;
  220. ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
  221. if (!ret)
  222. ret = ctx.err;
  223. return ret;
  224. }
  225. #ifdef CONFIG_MICROCODE_OLD_INTERFACE
  226. static int do_microcode_update(const void __user *buf, size_t size)
  227. {
  228. int error = 0;
  229. int cpu;
  230. for_each_online_cpu(cpu) {
  231. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  232. enum ucode_state ustate;
  233. if (!uci->valid)
  234. continue;
  235. ustate = microcode_ops->request_microcode_user(cpu, buf, size);
  236. if (ustate == UCODE_ERROR) {
  237. error = -1;
  238. break;
  239. } else if (ustate == UCODE_OK)
  240. apply_microcode_on_target(cpu);
  241. }
  242. return error;
  243. }
  244. static int microcode_open(struct inode *inode, struct file *file)
  245. {
  246. return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
  247. }
  248. static ssize_t microcode_write(struct file *file, const char __user *buf,
  249. size_t len, loff_t *ppos)
  250. {
  251. ssize_t ret = -EINVAL;
  252. if ((len >> PAGE_SHIFT) > totalram_pages) {
  253. pr_err("too much data (max %ld pages)\n", totalram_pages);
  254. return ret;
  255. }
  256. get_online_cpus();
  257. mutex_lock(&microcode_mutex);
  258. if (do_microcode_update(buf, len) == 0)
  259. ret = (ssize_t)len;
  260. if (ret > 0)
  261. perf_check_microcode();
  262. mutex_unlock(&microcode_mutex);
  263. put_online_cpus();
  264. return ret;
  265. }
  266. static const struct file_operations microcode_fops = {
  267. .owner = THIS_MODULE,
  268. .write = microcode_write,
  269. .open = microcode_open,
  270. .llseek = no_llseek,
  271. };
  272. static struct miscdevice microcode_dev = {
  273. .minor = MICROCODE_MINOR,
  274. .name = "microcode",
  275. .nodename = "cpu/microcode",
  276. .fops = &microcode_fops,
  277. };
  278. static int __init microcode_dev_init(void)
  279. {
  280. int error;
  281. error = misc_register(&microcode_dev);
  282. if (error) {
  283. pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
  284. return error;
  285. }
  286. return 0;
  287. }
  288. static void __exit microcode_dev_exit(void)
  289. {
  290. misc_deregister(&microcode_dev);
  291. }
  292. #else
  293. #define microcode_dev_init() 0
  294. #define microcode_dev_exit() do { } while (0)
  295. #endif
  296. /* fake device for request_firmware */
  297. static struct platform_device *microcode_pdev;
  298. static int reload_for_cpu(int cpu)
  299. {
  300. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  301. enum ucode_state ustate;
  302. int err = 0;
  303. if (!uci->valid)
  304. return err;
  305. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
  306. if (ustate == UCODE_OK)
  307. apply_microcode_on_target(cpu);
  308. else
  309. if (ustate == UCODE_ERROR)
  310. err = -EINVAL;
  311. return err;
  312. }
  313. static ssize_t reload_store(struct device *dev,
  314. struct device_attribute *attr,
  315. const char *buf, size_t size)
  316. {
  317. unsigned long val;
  318. int cpu;
  319. ssize_t ret = 0, tmp_ret;
  320. ret = kstrtoul(buf, 0, &val);
  321. if (ret)
  322. return ret;
  323. if (val != 1)
  324. return size;
  325. get_online_cpus();
  326. mutex_lock(&microcode_mutex);
  327. for_each_online_cpu(cpu) {
  328. tmp_ret = reload_for_cpu(cpu);
  329. if (tmp_ret != 0)
  330. pr_warn("Error reloading microcode on CPU %d\n", cpu);
  331. /* save retval of the first encountered reload error */
  332. if (!ret)
  333. ret = tmp_ret;
  334. }
  335. if (!ret)
  336. perf_check_microcode();
  337. mutex_unlock(&microcode_mutex);
  338. put_online_cpus();
  339. if (!ret)
  340. ret = size;
  341. return ret;
  342. }
  343. static ssize_t version_show(struct device *dev,
  344. struct device_attribute *attr, char *buf)
  345. {
  346. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  347. return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
  348. }
  349. static ssize_t pf_show(struct device *dev,
  350. struct device_attribute *attr, char *buf)
  351. {
  352. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  353. return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
  354. }
  355. static DEVICE_ATTR(reload, 0200, NULL, reload_store);
  356. static DEVICE_ATTR(version, 0400, version_show, NULL);
  357. static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
  358. static struct attribute *mc_default_attrs[] = {
  359. &dev_attr_version.attr,
  360. &dev_attr_processor_flags.attr,
  361. NULL
  362. };
  363. static struct attribute_group mc_attr_group = {
  364. .attrs = mc_default_attrs,
  365. .name = "microcode",
  366. };
  367. static void microcode_fini_cpu(int cpu)
  368. {
  369. microcode_ops->microcode_fini_cpu(cpu);
  370. }
  371. static enum ucode_state microcode_resume_cpu(int cpu)
  372. {
  373. pr_debug("CPU%d updated upon resume\n", cpu);
  374. if (apply_microcode_on_target(cpu))
  375. return UCODE_ERROR;
  376. return UCODE_OK;
  377. }
  378. static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
  379. {
  380. enum ucode_state ustate;
  381. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  382. if (uci->valid)
  383. return UCODE_OK;
  384. if (collect_cpu_info(cpu))
  385. return UCODE_ERROR;
  386. /* --dimm. Trigger a delayed update? */
  387. if (system_state != SYSTEM_RUNNING)
  388. return UCODE_NFOUND;
  389. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
  390. refresh_fw);
  391. if (ustate == UCODE_OK) {
  392. pr_debug("CPU%d updated upon init\n", cpu);
  393. apply_microcode_on_target(cpu);
  394. }
  395. return ustate;
  396. }
  397. static enum ucode_state microcode_update_cpu(int cpu)
  398. {
  399. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  400. if (uci->valid)
  401. return microcode_resume_cpu(cpu);
  402. return microcode_init_cpu(cpu, false);
  403. }
  404. static int mc_device_add(struct device *dev, struct subsys_interface *sif)
  405. {
  406. int err, cpu = dev->id;
  407. if (!cpu_online(cpu))
  408. return 0;
  409. pr_debug("CPU%d added\n", cpu);
  410. err = sysfs_create_group(&dev->kobj, &mc_attr_group);
  411. if (err)
  412. return err;
  413. if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
  414. return -EINVAL;
  415. return err;
  416. }
  417. static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
  418. {
  419. int cpu = dev->id;
  420. if (!cpu_online(cpu))
  421. return;
  422. pr_debug("CPU%d removed\n", cpu);
  423. microcode_fini_cpu(cpu);
  424. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  425. }
  426. static struct subsys_interface mc_cpu_interface = {
  427. .name = "microcode",
  428. .subsys = &cpu_subsys,
  429. .add_dev = mc_device_add,
  430. .remove_dev = mc_device_remove,
  431. };
  432. /**
  433. * mc_bp_resume - Update boot CPU microcode during resume.
  434. */
  435. static void mc_bp_resume(void)
  436. {
  437. int cpu = smp_processor_id();
  438. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  439. if (uci->valid && uci->mc)
  440. microcode_ops->apply_microcode(cpu);
  441. else if (!uci->mc)
  442. reload_early_microcode();
  443. }
  444. static struct syscore_ops mc_syscore_ops = {
  445. .resume = mc_bp_resume,
  446. };
  447. static int mc_cpu_online(unsigned int cpu)
  448. {
  449. struct device *dev;
  450. dev = get_cpu_device(cpu);
  451. microcode_update_cpu(cpu);
  452. pr_debug("CPU%d added\n", cpu);
  453. if (sysfs_create_group(&dev->kobj, &mc_attr_group))
  454. pr_err("Failed to create group for CPU%d\n", cpu);
  455. return 0;
  456. }
  457. static int mc_cpu_down_prep(unsigned int cpu)
  458. {
  459. struct device *dev;
  460. dev = get_cpu_device(cpu);
  461. /* Suspend is in progress, only remove the interface */
  462. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  463. pr_debug("CPU%d removed\n", cpu);
  464. /*
  465. * When a CPU goes offline, don't free up or invalidate the copy of
  466. * the microcode in kernel memory, so that we can reuse it when the
  467. * CPU comes back online without unnecessarily requesting the userspace
  468. * for it again.
  469. */
  470. return 0;
  471. }
  472. static struct attribute *cpu_root_microcode_attrs[] = {
  473. &dev_attr_reload.attr,
  474. NULL
  475. };
  476. static struct attribute_group cpu_root_microcode_group = {
  477. .name = "microcode",
  478. .attrs = cpu_root_microcode_attrs,
  479. };
  480. int __init microcode_init(void)
  481. {
  482. struct cpuinfo_x86 *c = &boot_cpu_data;
  483. int error;
  484. if (dis_ucode_ldr)
  485. return -EINVAL;
  486. if (c->x86_vendor == X86_VENDOR_INTEL)
  487. microcode_ops = init_intel_microcode();
  488. else if (c->x86_vendor == X86_VENDOR_AMD)
  489. microcode_ops = init_amd_microcode();
  490. else
  491. pr_err("no support for this CPU vendor\n");
  492. if (!microcode_ops)
  493. return -ENODEV;
  494. microcode_pdev = platform_device_register_simple("microcode", -1,
  495. NULL, 0);
  496. if (IS_ERR(microcode_pdev))
  497. return PTR_ERR(microcode_pdev);
  498. get_online_cpus();
  499. mutex_lock(&microcode_mutex);
  500. error = subsys_interface_register(&mc_cpu_interface);
  501. if (!error)
  502. perf_check_microcode();
  503. mutex_unlock(&microcode_mutex);
  504. put_online_cpus();
  505. if (error)
  506. goto out_pdev;
  507. error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  508. &cpu_root_microcode_group);
  509. if (error) {
  510. pr_err("Error creating microcode group!\n");
  511. goto out_driver;
  512. }
  513. error = microcode_dev_init();
  514. if (error)
  515. goto out_ucode_group;
  516. register_syscore_ops(&mc_syscore_ops);
  517. cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
  518. mc_cpu_online, mc_cpu_down_prep);
  519. pr_info("Microcode Update Driver: v%s.", MICROCODE_VERSION);
  520. return 0;
  521. out_ucode_group:
  522. sysfs_remove_group(&cpu_subsys.dev_root->kobj,
  523. &cpu_root_microcode_group);
  524. out_driver:
  525. get_online_cpus();
  526. mutex_lock(&microcode_mutex);
  527. subsys_interface_unregister(&mc_cpu_interface);
  528. mutex_unlock(&microcode_mutex);
  529. put_online_cpus();
  530. out_pdev:
  531. platform_device_unregister(microcode_pdev);
  532. return error;
  533. }
  534. fs_initcall(save_microcode_in_initrd);
  535. late_initcall(microcode_init);