core.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * CPU Microcode Update Driver for Linux
  3. *
  4. * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  5. * 2006 Shaohua Li <shaohua.li@intel.com>
  6. * 2013-2015 Borislav Petkov <bp@alien8.de>
  7. *
  8. * X86 CPU microcode early update for Linux:
  9. *
  10. * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  11. * H Peter Anvin" <hpa@zytor.com>
  12. * (C) 2015 Borislav Petkov <bp@alien8.de>
  13. *
  14. * This driver allows to upgrade microcode on x86 processors.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License
  18. * as published by the Free Software Foundation; either version
  19. * 2 of the License, or (at your option) any later version.
  20. */
  21. #define pr_fmt(fmt) "microcode: " fmt
  22. #include <linux/platform_device.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/capability.h>
  26. #include <linux/firmware.h>
  27. #include <linux/kernel.h>
  28. #include <linux/mutex.h>
  29. #include <linux/cpu.h>
  30. #include <linux/fs.h>
  31. #include <linux/mm.h>
  32. #include <asm/microcode_intel.h>
  33. #include <asm/cpu_device_id.h>
  34. #include <asm/microcode_amd.h>
  35. #include <asm/perf_event.h>
  36. #include <asm/microcode.h>
  37. #include <asm/processor.h>
  38. #include <asm/cmdline.h>
  39. #define MICROCODE_VERSION "2.01"
  40. static struct microcode_ops *microcode_ops;
  41. static bool dis_ucode_ldr;
  42. static int __init disable_loader(char *str)
  43. {
  44. dis_ucode_ldr = true;
  45. return 1;
  46. }
  47. __setup("dis_ucode_ldr", disable_loader);
  48. /*
  49. * Synchronization.
  50. *
  51. * All non cpu-hotplug-callback call sites use:
  52. *
  53. * - microcode_mutex to synchronize with each other;
  54. * - get/put_online_cpus() to synchronize with
  55. * the cpu-hotplug-callback call sites.
  56. *
  57. * We guarantee that only a single cpu is being
  58. * updated at any particular moment of time.
  59. */
  60. static DEFINE_MUTEX(microcode_mutex);
  61. struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
  62. EXPORT_SYMBOL_GPL(ucode_cpu_info);
  63. /*
  64. * Operations that are run on a target cpu:
  65. */
  66. struct cpu_info_ctx {
  67. struct cpu_signature *cpu_sig;
  68. int err;
  69. };
  70. static bool __init check_loader_disabled_bsp(void)
  71. {
  72. #ifdef CONFIG_X86_32
  73. const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
  74. const char *opt = "dis_ucode_ldr";
  75. const char *option = (const char *)__pa_nodebug(opt);
  76. bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
  77. #else /* CONFIG_X86_64 */
  78. const char *cmdline = boot_command_line;
  79. const char *option = "dis_ucode_ldr";
  80. bool *res = &dis_ucode_ldr;
  81. #endif
  82. if (cmdline_find_option_bool(cmdline, option))
  83. *res = true;
  84. return *res;
  85. }
  86. extern struct builtin_fw __start_builtin_fw[];
  87. extern struct builtin_fw __end_builtin_fw[];
  88. bool get_builtin_firmware(struct cpio_data *cd, const char *name)
  89. {
  90. #ifdef CONFIG_FW_LOADER
  91. struct builtin_fw *b_fw;
  92. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
  93. if (!strcmp(name, b_fw->name)) {
  94. cd->size = b_fw->size;
  95. cd->data = b_fw->data;
  96. return true;
  97. }
  98. }
  99. #endif
  100. return false;
  101. }
  102. void __init load_ucode_bsp(void)
  103. {
  104. int vendor;
  105. unsigned int family;
  106. if (check_loader_disabled_bsp())
  107. return;
  108. if (!have_cpuid_p())
  109. return;
  110. vendor = x86_cpuid_vendor();
  111. family = x86_cpuid_family();
  112. switch (vendor) {
  113. case X86_VENDOR_INTEL:
  114. if (family >= 6)
  115. load_ucode_intel_bsp();
  116. break;
  117. case X86_VENDOR_AMD:
  118. if (family >= 0x10)
  119. load_ucode_amd_bsp(family);
  120. break;
  121. default:
  122. break;
  123. }
  124. }
  125. static bool check_loader_disabled_ap(void)
  126. {
  127. #ifdef CONFIG_X86_32
  128. return *((bool *)__pa_nodebug(&dis_ucode_ldr));
  129. #else
  130. return dis_ucode_ldr;
  131. #endif
  132. }
  133. void load_ucode_ap(void)
  134. {
  135. int vendor, family;
  136. if (check_loader_disabled_ap())
  137. return;
  138. if (!have_cpuid_p())
  139. return;
  140. vendor = x86_cpuid_vendor();
  141. family = x86_cpuid_family();
  142. switch (vendor) {
  143. case X86_VENDOR_INTEL:
  144. if (family >= 6)
  145. load_ucode_intel_ap();
  146. break;
  147. case X86_VENDOR_AMD:
  148. if (family >= 0x10)
  149. load_ucode_amd_ap();
  150. break;
  151. default:
  152. break;
  153. }
  154. }
  155. int __init save_microcode_in_initrd(void)
  156. {
  157. struct cpuinfo_x86 *c = &boot_cpu_data;
  158. switch (c->x86_vendor) {
  159. case X86_VENDOR_INTEL:
  160. if (c->x86 >= 6)
  161. save_microcode_in_initrd_intel();
  162. break;
  163. case X86_VENDOR_AMD:
  164. if (c->x86 >= 0x10)
  165. save_microcode_in_initrd_amd();
  166. break;
  167. default:
  168. break;
  169. }
  170. return 0;
  171. }
  172. void reload_early_microcode(void)
  173. {
  174. int vendor, family;
  175. vendor = x86_cpuid_vendor();
  176. family = x86_cpuid_family();
  177. switch (vendor) {
  178. case X86_VENDOR_INTEL:
  179. if (family >= 6)
  180. reload_ucode_intel();
  181. break;
  182. case X86_VENDOR_AMD:
  183. if (family >= 0x10)
  184. reload_ucode_amd();
  185. break;
  186. default:
  187. break;
  188. }
  189. }
  190. static void collect_cpu_info_local(void *arg)
  191. {
  192. struct cpu_info_ctx *ctx = arg;
  193. ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
  194. ctx->cpu_sig);
  195. }
  196. static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
  197. {
  198. struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
  199. int ret;
  200. ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
  201. if (!ret)
  202. ret = ctx.err;
  203. return ret;
  204. }
  205. static int collect_cpu_info(int cpu)
  206. {
  207. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  208. int ret;
  209. memset(uci, 0, sizeof(*uci));
  210. ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
  211. if (!ret)
  212. uci->valid = 1;
  213. return ret;
  214. }
  215. struct apply_microcode_ctx {
  216. int err;
  217. };
  218. static void apply_microcode_local(void *arg)
  219. {
  220. struct apply_microcode_ctx *ctx = arg;
  221. ctx->err = microcode_ops->apply_microcode(smp_processor_id());
  222. }
  223. static int apply_microcode_on_target(int cpu)
  224. {
  225. struct apply_microcode_ctx ctx = { .err = 0 };
  226. int ret;
  227. ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
  228. if (!ret)
  229. ret = ctx.err;
  230. return ret;
  231. }
  232. #ifdef CONFIG_MICROCODE_OLD_INTERFACE
  233. static int do_microcode_update(const void __user *buf, size_t size)
  234. {
  235. int error = 0;
  236. int cpu;
  237. for_each_online_cpu(cpu) {
  238. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  239. enum ucode_state ustate;
  240. if (!uci->valid)
  241. continue;
  242. ustate = microcode_ops->request_microcode_user(cpu, buf, size);
  243. if (ustate == UCODE_ERROR) {
  244. error = -1;
  245. break;
  246. } else if (ustate == UCODE_OK)
  247. apply_microcode_on_target(cpu);
  248. }
  249. return error;
  250. }
  251. static int microcode_open(struct inode *inode, struct file *file)
  252. {
  253. return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
  254. }
  255. static ssize_t microcode_write(struct file *file, const char __user *buf,
  256. size_t len, loff_t *ppos)
  257. {
  258. ssize_t ret = -EINVAL;
  259. if ((len >> PAGE_SHIFT) > totalram_pages) {
  260. pr_err("too much data (max %ld pages)\n", totalram_pages);
  261. return ret;
  262. }
  263. get_online_cpus();
  264. mutex_lock(&microcode_mutex);
  265. if (do_microcode_update(buf, len) == 0)
  266. ret = (ssize_t)len;
  267. if (ret > 0)
  268. perf_check_microcode();
  269. mutex_unlock(&microcode_mutex);
  270. put_online_cpus();
  271. return ret;
  272. }
  273. static const struct file_operations microcode_fops = {
  274. .owner = THIS_MODULE,
  275. .write = microcode_write,
  276. .open = microcode_open,
  277. .llseek = no_llseek,
  278. };
  279. static struct miscdevice microcode_dev = {
  280. .minor = MICROCODE_MINOR,
  281. .name = "microcode",
  282. .nodename = "cpu/microcode",
  283. .fops = &microcode_fops,
  284. };
  285. static int __init microcode_dev_init(void)
  286. {
  287. int error;
  288. error = misc_register(&microcode_dev);
  289. if (error) {
  290. pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
  291. return error;
  292. }
  293. return 0;
  294. }
  295. static void __exit microcode_dev_exit(void)
  296. {
  297. misc_deregister(&microcode_dev);
  298. }
  299. #else
  300. #define microcode_dev_init() 0
  301. #define microcode_dev_exit() do { } while (0)
  302. #endif
  303. /* fake device for request_firmware */
  304. static struct platform_device *microcode_pdev;
  305. static int reload_for_cpu(int cpu)
  306. {
  307. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  308. enum ucode_state ustate;
  309. int err = 0;
  310. if (!uci->valid)
  311. return err;
  312. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
  313. if (ustate == UCODE_OK)
  314. apply_microcode_on_target(cpu);
  315. else
  316. if (ustate == UCODE_ERROR)
  317. err = -EINVAL;
  318. return err;
  319. }
  320. static ssize_t reload_store(struct device *dev,
  321. struct device_attribute *attr,
  322. const char *buf, size_t size)
  323. {
  324. unsigned long val;
  325. int cpu;
  326. ssize_t ret = 0, tmp_ret;
  327. ret = kstrtoul(buf, 0, &val);
  328. if (ret)
  329. return ret;
  330. if (val != 1)
  331. return size;
  332. get_online_cpus();
  333. mutex_lock(&microcode_mutex);
  334. for_each_online_cpu(cpu) {
  335. tmp_ret = reload_for_cpu(cpu);
  336. if (tmp_ret != 0)
  337. pr_warn("Error reloading microcode on CPU %d\n", cpu);
  338. /* save retval of the first encountered reload error */
  339. if (!ret)
  340. ret = tmp_ret;
  341. }
  342. if (!ret)
  343. perf_check_microcode();
  344. mutex_unlock(&microcode_mutex);
  345. put_online_cpus();
  346. if (!ret)
  347. ret = size;
  348. return ret;
  349. }
  350. static ssize_t version_show(struct device *dev,
  351. struct device_attribute *attr, char *buf)
  352. {
  353. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  354. return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
  355. }
  356. static ssize_t pf_show(struct device *dev,
  357. struct device_attribute *attr, char *buf)
  358. {
  359. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  360. return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
  361. }
  362. static DEVICE_ATTR(reload, 0200, NULL, reload_store);
  363. static DEVICE_ATTR(version, 0400, version_show, NULL);
  364. static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
  365. static struct attribute *mc_default_attrs[] = {
  366. &dev_attr_version.attr,
  367. &dev_attr_processor_flags.attr,
  368. NULL
  369. };
  370. static struct attribute_group mc_attr_group = {
  371. .attrs = mc_default_attrs,
  372. .name = "microcode",
  373. };
  374. static void microcode_fini_cpu(int cpu)
  375. {
  376. microcode_ops->microcode_fini_cpu(cpu);
  377. }
  378. static enum ucode_state microcode_resume_cpu(int cpu)
  379. {
  380. pr_debug("CPU%d updated upon resume\n", cpu);
  381. if (apply_microcode_on_target(cpu))
  382. return UCODE_ERROR;
  383. return UCODE_OK;
  384. }
  385. static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
  386. {
  387. enum ucode_state ustate;
  388. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  389. if (uci && uci->valid)
  390. return UCODE_OK;
  391. if (collect_cpu_info(cpu))
  392. return UCODE_ERROR;
  393. /* --dimm. Trigger a delayed update? */
  394. if (system_state != SYSTEM_RUNNING)
  395. return UCODE_NFOUND;
  396. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
  397. refresh_fw);
  398. if (ustate == UCODE_OK) {
  399. pr_debug("CPU%d updated upon init\n", cpu);
  400. apply_microcode_on_target(cpu);
  401. }
  402. return ustate;
  403. }
  404. static enum ucode_state microcode_update_cpu(int cpu)
  405. {
  406. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  407. if (uci->valid)
  408. return microcode_resume_cpu(cpu);
  409. return microcode_init_cpu(cpu, false);
  410. }
  411. static int mc_device_add(struct device *dev, struct subsys_interface *sif)
  412. {
  413. int err, cpu = dev->id;
  414. if (!cpu_online(cpu))
  415. return 0;
  416. pr_debug("CPU%d added\n", cpu);
  417. err = sysfs_create_group(&dev->kobj, &mc_attr_group);
  418. if (err)
  419. return err;
  420. if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
  421. return -EINVAL;
  422. return err;
  423. }
  424. static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
  425. {
  426. int cpu = dev->id;
  427. if (!cpu_online(cpu))
  428. return;
  429. pr_debug("CPU%d removed\n", cpu);
  430. microcode_fini_cpu(cpu);
  431. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  432. }
  433. static struct subsys_interface mc_cpu_interface = {
  434. .name = "microcode",
  435. .subsys = &cpu_subsys,
  436. .add_dev = mc_device_add,
  437. .remove_dev = mc_device_remove,
  438. };
  439. /**
  440. * mc_bp_resume - Update boot CPU microcode during resume.
  441. */
  442. static void mc_bp_resume(void)
  443. {
  444. int cpu = smp_processor_id();
  445. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  446. if (uci->valid && uci->mc)
  447. microcode_ops->apply_microcode(cpu);
  448. else if (!uci->mc)
  449. reload_early_microcode();
  450. }
  451. static struct syscore_ops mc_syscore_ops = {
  452. .resume = mc_bp_resume,
  453. };
  454. static int
  455. mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
  456. {
  457. unsigned int cpu = (unsigned long)hcpu;
  458. struct device *dev;
  459. dev = get_cpu_device(cpu);
  460. switch (action & ~CPU_TASKS_FROZEN) {
  461. case CPU_ONLINE:
  462. microcode_update_cpu(cpu);
  463. pr_debug("CPU%d added\n", cpu);
  464. /*
  465. * "break" is missing on purpose here because we want to fall
  466. * through in order to create the sysfs group.
  467. */
  468. case CPU_DOWN_FAILED:
  469. if (sysfs_create_group(&dev->kobj, &mc_attr_group))
  470. pr_err("Failed to create group for CPU%d\n", cpu);
  471. break;
  472. case CPU_DOWN_PREPARE:
  473. /* Suspend is in progress, only remove the interface */
  474. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  475. pr_debug("CPU%d removed\n", cpu);
  476. break;
  477. /*
  478. * case CPU_DEAD:
  479. *
  480. * When a CPU goes offline, don't free up or invalidate the copy of
  481. * the microcode in kernel memory, so that we can reuse it when the
  482. * CPU comes back online without unnecessarily requesting the userspace
  483. * for it again.
  484. */
  485. }
  486. /* The CPU refused to come up during a system resume */
  487. if (action == CPU_UP_CANCELED_FROZEN)
  488. microcode_fini_cpu(cpu);
  489. return NOTIFY_OK;
  490. }
  491. static struct notifier_block mc_cpu_notifier = {
  492. .notifier_call = mc_cpu_callback,
  493. };
  494. static struct attribute *cpu_root_microcode_attrs[] = {
  495. &dev_attr_reload.attr,
  496. NULL
  497. };
  498. static struct attribute_group cpu_root_microcode_group = {
  499. .name = "microcode",
  500. .attrs = cpu_root_microcode_attrs,
  501. };
  502. int __init microcode_init(void)
  503. {
  504. struct cpuinfo_x86 *c = &boot_cpu_data;
  505. int error;
  506. if (paravirt_enabled() || dis_ucode_ldr)
  507. return -EINVAL;
  508. if (c->x86_vendor == X86_VENDOR_INTEL)
  509. microcode_ops = init_intel_microcode();
  510. else if (c->x86_vendor == X86_VENDOR_AMD)
  511. microcode_ops = init_amd_microcode();
  512. else
  513. pr_err("no support for this CPU vendor\n");
  514. if (!microcode_ops)
  515. return -ENODEV;
  516. microcode_pdev = platform_device_register_simple("microcode", -1,
  517. NULL, 0);
  518. if (IS_ERR(microcode_pdev))
  519. return PTR_ERR(microcode_pdev);
  520. get_online_cpus();
  521. mutex_lock(&microcode_mutex);
  522. error = subsys_interface_register(&mc_cpu_interface);
  523. if (!error)
  524. perf_check_microcode();
  525. mutex_unlock(&microcode_mutex);
  526. put_online_cpus();
  527. if (error)
  528. goto out_pdev;
  529. error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  530. &cpu_root_microcode_group);
  531. if (error) {
  532. pr_err("Error creating microcode group!\n");
  533. goto out_driver;
  534. }
  535. error = microcode_dev_init();
  536. if (error)
  537. goto out_ucode_group;
  538. register_syscore_ops(&mc_syscore_ops);
  539. register_hotcpu_notifier(&mc_cpu_notifier);
  540. pr_info("Microcode Update Driver: v" MICROCODE_VERSION
  541. " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
  542. return 0;
  543. out_ucode_group:
  544. sysfs_remove_group(&cpu_subsys.dev_root->kobj,
  545. &cpu_root_microcode_group);
  546. out_driver:
  547. get_online_cpus();
  548. mutex_lock(&microcode_mutex);
  549. subsys_interface_unregister(&mc_cpu_interface);
  550. mutex_unlock(&microcode_mutex);
  551. put_online_cpus();
  552. out_pdev:
  553. platform_device_unregister(microcode_pdev);
  554. return error;
  555. }
  556. late_initcall(microcode_init);