core.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /*
  2. * CPU Microcode Update Driver for Linux
  3. *
  4. * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
  5. * 2006 Shaohua Li <shaohua.li@intel.com>
  6. * 2013-2016 Borislav Petkov <bp@alien8.de>
  7. *
  8. * X86 CPU microcode early update for Linux:
  9. *
  10. * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  11. * H Peter Anvin" <hpa@zytor.com>
  12. * (C) 2015 Borislav Petkov <bp@alien8.de>
  13. *
  14. * This driver allows to upgrade microcode on x86 processors.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License
  18. * as published by the Free Software Foundation; either version
  19. * 2 of the License, or (at your option) any later version.
  20. */
  21. #define pr_fmt(fmt) "microcode: " fmt
  22. #include <linux/platform_device.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/capability.h>
  26. #include <linux/firmware.h>
  27. #include <linux/kernel.h>
  28. #include <linux/mutex.h>
  29. #include <linux/cpu.h>
  30. #include <linux/fs.h>
  31. #include <linux/mm.h>
  32. #include <asm/microcode_intel.h>
  33. #include <asm/cpu_device_id.h>
  34. #include <asm/microcode_amd.h>
  35. #include <asm/perf_event.h>
  36. #include <asm/microcode.h>
  37. #include <asm/processor.h>
  38. #include <asm/cmdline.h>
  39. #include <asm/setup.h>
  40. #define DRIVER_VERSION "2.2"
  41. static struct microcode_ops *microcode_ops;
  42. static bool dis_ucode_ldr = true;
  43. bool initrd_gone;
  44. LIST_HEAD(microcode_cache);
  45. /*
  46. * Synchronization.
  47. *
  48. * All non cpu-hotplug-callback call sites use:
  49. *
  50. * - microcode_mutex to synchronize with each other;
  51. * - get/put_online_cpus() to synchronize with
  52. * the cpu-hotplug-callback call sites.
  53. *
  54. * We guarantee that only a single cpu is being
  55. * updated at any particular moment of time.
  56. */
  57. static DEFINE_MUTEX(microcode_mutex);
  58. struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
  59. struct cpu_info_ctx {
  60. struct cpu_signature *cpu_sig;
  61. int err;
  62. };
  63. /*
  64. * Those patch levels cannot be updated to newer ones and thus should be final.
  65. */
  66. static u32 final_levels[] = {
  67. 0x01000098,
  68. 0x0100009f,
  69. 0x010000af,
  70. 0, /* T-101 terminator */
  71. };
  72. /*
  73. * Check the current patch level on this CPU.
  74. *
  75. * Returns:
  76. * - true: if update should stop
  77. * - false: otherwise
  78. */
  79. static bool amd_check_current_patch_level(void)
  80. {
  81. u32 lvl, dummy, i;
  82. u32 *levels;
  83. native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
  84. if (IS_ENABLED(CONFIG_X86_32))
  85. levels = (u32 *)__pa_nodebug(&final_levels);
  86. else
  87. levels = final_levels;
  88. for (i = 0; levels[i]; i++) {
  89. if (lvl == levels[i])
  90. return true;
  91. }
  92. return false;
  93. }
  94. static bool __init check_loader_disabled_bsp(void)
  95. {
  96. static const char *__dis_opt_str = "dis_ucode_ldr";
  97. #ifdef CONFIG_X86_32
  98. const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
  99. const char *option = (const char *)__pa_nodebug(__dis_opt_str);
  100. bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
  101. #else /* CONFIG_X86_64 */
  102. const char *cmdline = boot_command_line;
  103. const char *option = __dis_opt_str;
  104. bool *res = &dis_ucode_ldr;
  105. #endif
  106. if (!have_cpuid_p())
  107. return *res;
  108. /*
  109. * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
  110. * completely accurate as xen pv guests don't see that CPUID bit set but
  111. * that's good enough as they don't land on the BSP path anyway.
  112. */
  113. if (native_cpuid_ecx(1) & BIT(31))
  114. return *res;
  115. if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
  116. if (amd_check_current_patch_level())
  117. return *res;
  118. }
  119. if (cmdline_find_option_bool(cmdline, option) <= 0)
  120. *res = false;
  121. return *res;
  122. }
  123. extern struct builtin_fw __start_builtin_fw[];
  124. extern struct builtin_fw __end_builtin_fw[];
  125. bool get_builtin_firmware(struct cpio_data *cd, const char *name)
  126. {
  127. #ifdef CONFIG_FW_LOADER
  128. struct builtin_fw *b_fw;
  129. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
  130. if (!strcmp(name, b_fw->name)) {
  131. cd->size = b_fw->size;
  132. cd->data = b_fw->data;
  133. return true;
  134. }
  135. }
  136. #endif
  137. return false;
  138. }
  139. void __init load_ucode_bsp(void)
  140. {
  141. unsigned int cpuid_1_eax;
  142. if (check_loader_disabled_bsp())
  143. return;
  144. cpuid_1_eax = native_cpuid_eax(1);
  145. switch (x86_cpuid_vendor()) {
  146. case X86_VENDOR_INTEL:
  147. if (x86_family(cpuid_1_eax) >= 6)
  148. load_ucode_intel_bsp();
  149. break;
  150. case X86_VENDOR_AMD:
  151. if (x86_family(cpuid_1_eax) >= 0x10)
  152. load_ucode_amd_bsp(cpuid_1_eax);
  153. break;
  154. default:
  155. break;
  156. }
  157. }
  158. static bool check_loader_disabled_ap(void)
  159. {
  160. #ifdef CONFIG_X86_32
  161. return *((bool *)__pa_nodebug(&dis_ucode_ldr));
  162. #else
  163. return dis_ucode_ldr;
  164. #endif
  165. }
  166. void load_ucode_ap(void)
  167. {
  168. unsigned int cpuid_1_eax;
  169. if (check_loader_disabled_ap())
  170. return;
  171. cpuid_1_eax = native_cpuid_eax(1);
  172. switch (x86_cpuid_vendor()) {
  173. case X86_VENDOR_INTEL:
  174. if (x86_family(cpuid_1_eax) >= 6)
  175. load_ucode_intel_ap();
  176. break;
  177. case X86_VENDOR_AMD:
  178. if (x86_family(cpuid_1_eax) >= 0x10)
  179. load_ucode_amd_ap(cpuid_1_eax);
  180. break;
  181. default:
  182. break;
  183. }
  184. }
  185. static int __init save_microcode_in_initrd(void)
  186. {
  187. struct cpuinfo_x86 *c = &boot_cpu_data;
  188. int ret = -EINVAL;
  189. switch (c->x86_vendor) {
  190. case X86_VENDOR_INTEL:
  191. if (c->x86 >= 6)
  192. ret = save_microcode_in_initrd_intel();
  193. break;
  194. case X86_VENDOR_AMD:
  195. if (c->x86 >= 0x10)
  196. return save_microcode_in_initrd_amd(cpuid_eax(1));
  197. break;
  198. default:
  199. break;
  200. }
  201. initrd_gone = true;
  202. return ret;
  203. }
  204. struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
  205. {
  206. #ifdef CONFIG_BLK_DEV_INITRD
  207. unsigned long start = 0;
  208. size_t size;
  209. #ifdef CONFIG_X86_32
  210. struct boot_params *params;
  211. if (use_pa)
  212. params = (struct boot_params *)__pa_nodebug(&boot_params);
  213. else
  214. params = &boot_params;
  215. size = params->hdr.ramdisk_size;
  216. /*
  217. * Set start only if we have an initrd image. We cannot use initrd_start
  218. * because it is not set that early yet.
  219. */
  220. if (size)
  221. start = params->hdr.ramdisk_image;
  222. # else /* CONFIG_X86_64 */
  223. size = (unsigned long)boot_params.ext_ramdisk_size << 32;
  224. size |= boot_params.hdr.ramdisk_size;
  225. if (size) {
  226. start = (unsigned long)boot_params.ext_ramdisk_image << 32;
  227. start |= boot_params.hdr.ramdisk_image;
  228. start += PAGE_OFFSET;
  229. }
  230. # endif
  231. /*
  232. * Fixup the start address: after reserve_initrd() runs, initrd_start
  233. * has the virtual address of the beginning of the initrd. It also
  234. * possibly relocates the ramdisk. In either case, initrd_start contains
  235. * the updated address so use that instead.
  236. *
  237. * initrd_gone is for the hotplug case where we've thrown out initrd
  238. * already.
  239. */
  240. if (!use_pa) {
  241. if (initrd_gone)
  242. return (struct cpio_data){ NULL, 0, "" };
  243. if (initrd_start)
  244. start = initrd_start;
  245. } else {
  246. /*
  247. * The picture with physical addresses is a bit different: we
  248. * need to get the *physical* address to which the ramdisk was
  249. * relocated, i.e., relocated_ramdisk (not initrd_start) and
  250. * since we're running from physical addresses, we need to access
  251. * relocated_ramdisk through its *physical* address too.
  252. */
  253. u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
  254. if (*rr)
  255. start = *rr;
  256. }
  257. return find_cpio_data(path, (void *)start, size, NULL);
  258. #else /* !CONFIG_BLK_DEV_INITRD */
  259. return (struct cpio_data){ NULL, 0, "" };
  260. #endif
  261. }
  262. void reload_early_microcode(void)
  263. {
  264. int vendor, family;
  265. vendor = x86_cpuid_vendor();
  266. family = x86_cpuid_family();
  267. switch (vendor) {
  268. case X86_VENDOR_INTEL:
  269. if (family >= 6)
  270. reload_ucode_intel();
  271. break;
  272. case X86_VENDOR_AMD:
  273. if (family >= 0x10)
  274. reload_ucode_amd();
  275. break;
  276. default:
  277. break;
  278. }
  279. }
  280. static void collect_cpu_info_local(void *arg)
  281. {
  282. struct cpu_info_ctx *ctx = arg;
  283. ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
  284. ctx->cpu_sig);
  285. }
  286. static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
  287. {
  288. struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
  289. int ret;
  290. ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
  291. if (!ret)
  292. ret = ctx.err;
  293. return ret;
  294. }
  295. static int collect_cpu_info(int cpu)
  296. {
  297. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  298. int ret;
  299. memset(uci, 0, sizeof(*uci));
  300. ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
  301. if (!ret)
  302. uci->valid = 1;
  303. return ret;
  304. }
  305. struct apply_microcode_ctx {
  306. int err;
  307. };
  308. static void apply_microcode_local(void *arg)
  309. {
  310. struct apply_microcode_ctx *ctx = arg;
  311. ctx->err = microcode_ops->apply_microcode(smp_processor_id());
  312. }
  313. static int apply_microcode_on_target(int cpu)
  314. {
  315. struct apply_microcode_ctx ctx = { .err = 0 };
  316. int ret;
  317. ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
  318. if (!ret)
  319. ret = ctx.err;
  320. return ret;
  321. }
  322. #ifdef CONFIG_MICROCODE_OLD_INTERFACE
  323. static int do_microcode_update(const void __user *buf, size_t size)
  324. {
  325. int error = 0;
  326. int cpu;
  327. for_each_online_cpu(cpu) {
  328. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  329. enum ucode_state ustate;
  330. if (!uci->valid)
  331. continue;
  332. ustate = microcode_ops->request_microcode_user(cpu, buf, size);
  333. if (ustate == UCODE_ERROR) {
  334. error = -1;
  335. break;
  336. } else if (ustate == UCODE_OK)
  337. apply_microcode_on_target(cpu);
  338. }
  339. return error;
  340. }
  341. static int microcode_open(struct inode *inode, struct file *file)
  342. {
  343. return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
  344. }
  345. static ssize_t microcode_write(struct file *file, const char __user *buf,
  346. size_t len, loff_t *ppos)
  347. {
  348. ssize_t ret = -EINVAL;
  349. if ((len >> PAGE_SHIFT) > totalram_pages) {
  350. pr_err("too much data (max %ld pages)\n", totalram_pages);
  351. return ret;
  352. }
  353. get_online_cpus();
  354. mutex_lock(&microcode_mutex);
  355. if (do_microcode_update(buf, len) == 0)
  356. ret = (ssize_t)len;
  357. if (ret > 0)
  358. perf_check_microcode();
  359. mutex_unlock(&microcode_mutex);
  360. put_online_cpus();
  361. return ret;
  362. }
  363. static const struct file_operations microcode_fops = {
  364. .owner = THIS_MODULE,
  365. .write = microcode_write,
  366. .open = microcode_open,
  367. .llseek = no_llseek,
  368. };
  369. static struct miscdevice microcode_dev = {
  370. .minor = MICROCODE_MINOR,
  371. .name = "microcode",
  372. .nodename = "cpu/microcode",
  373. .fops = &microcode_fops,
  374. };
  375. static int __init microcode_dev_init(void)
  376. {
  377. int error;
  378. error = misc_register(&microcode_dev);
  379. if (error) {
  380. pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
  381. return error;
  382. }
  383. return 0;
  384. }
  385. static void __exit microcode_dev_exit(void)
  386. {
  387. misc_deregister(&microcode_dev);
  388. }
  389. #else
  390. #define microcode_dev_init() 0
  391. #define microcode_dev_exit() do { } while (0)
  392. #endif
  393. /* fake device for request_firmware */
  394. static struct platform_device *microcode_pdev;
  395. static int reload_for_cpu(int cpu)
  396. {
  397. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  398. enum ucode_state ustate;
  399. int err = 0;
  400. if (!uci->valid)
  401. return err;
  402. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
  403. if (ustate == UCODE_OK)
  404. apply_microcode_on_target(cpu);
  405. else
  406. if (ustate == UCODE_ERROR)
  407. err = -EINVAL;
  408. return err;
  409. }
  410. static ssize_t reload_store(struct device *dev,
  411. struct device_attribute *attr,
  412. const char *buf, size_t size)
  413. {
  414. unsigned long val;
  415. int cpu;
  416. ssize_t ret = 0, tmp_ret;
  417. ret = kstrtoul(buf, 0, &val);
  418. if (ret)
  419. return ret;
  420. if (val != 1)
  421. return size;
  422. get_online_cpus();
  423. mutex_lock(&microcode_mutex);
  424. for_each_online_cpu(cpu) {
  425. tmp_ret = reload_for_cpu(cpu);
  426. if (tmp_ret != 0)
  427. pr_warn("Error reloading microcode on CPU %d\n", cpu);
  428. /* save retval of the first encountered reload error */
  429. if (!ret)
  430. ret = tmp_ret;
  431. }
  432. if (!ret)
  433. perf_check_microcode();
  434. mutex_unlock(&microcode_mutex);
  435. put_online_cpus();
  436. if (!ret)
  437. ret = size;
  438. return ret;
  439. }
  440. static ssize_t version_show(struct device *dev,
  441. struct device_attribute *attr, char *buf)
  442. {
  443. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  444. return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
  445. }
  446. static ssize_t pf_show(struct device *dev,
  447. struct device_attribute *attr, char *buf)
  448. {
  449. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  450. return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
  451. }
  452. static DEVICE_ATTR(reload, 0200, NULL, reload_store);
  453. static DEVICE_ATTR(version, 0400, version_show, NULL);
  454. static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
  455. static struct attribute *mc_default_attrs[] = {
  456. &dev_attr_version.attr,
  457. &dev_attr_processor_flags.attr,
  458. NULL
  459. };
  460. static struct attribute_group mc_attr_group = {
  461. .attrs = mc_default_attrs,
  462. .name = "microcode",
  463. };
  464. static void microcode_fini_cpu(int cpu)
  465. {
  466. if (microcode_ops->microcode_fini_cpu)
  467. microcode_ops->microcode_fini_cpu(cpu);
  468. }
  469. static enum ucode_state microcode_resume_cpu(int cpu)
  470. {
  471. if (apply_microcode_on_target(cpu))
  472. return UCODE_ERROR;
  473. pr_debug("CPU%d updated upon resume\n", cpu);
  474. return UCODE_OK;
  475. }
  476. static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
  477. {
  478. enum ucode_state ustate;
  479. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  480. if (uci->valid)
  481. return UCODE_OK;
  482. if (collect_cpu_info(cpu))
  483. return UCODE_ERROR;
  484. /* --dimm. Trigger a delayed update? */
  485. if (system_state != SYSTEM_RUNNING)
  486. return UCODE_NFOUND;
  487. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
  488. refresh_fw);
  489. if (ustate == UCODE_OK) {
  490. pr_debug("CPU%d updated upon init\n", cpu);
  491. apply_microcode_on_target(cpu);
  492. }
  493. return ustate;
  494. }
  495. static enum ucode_state microcode_update_cpu(int cpu)
  496. {
  497. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  498. /* Refresh CPU microcode revision after resume. */
  499. collect_cpu_info(cpu);
  500. if (uci->valid)
  501. return microcode_resume_cpu(cpu);
  502. return microcode_init_cpu(cpu, false);
  503. }
  504. static int mc_device_add(struct device *dev, struct subsys_interface *sif)
  505. {
  506. int err, cpu = dev->id;
  507. if (!cpu_online(cpu))
  508. return 0;
  509. pr_debug("CPU%d added\n", cpu);
  510. err = sysfs_create_group(&dev->kobj, &mc_attr_group);
  511. if (err)
  512. return err;
  513. if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
  514. return -EINVAL;
  515. return err;
  516. }
  517. static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
  518. {
  519. int cpu = dev->id;
  520. if (!cpu_online(cpu))
  521. return;
  522. pr_debug("CPU%d removed\n", cpu);
  523. microcode_fini_cpu(cpu);
  524. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  525. }
  526. static struct subsys_interface mc_cpu_interface = {
  527. .name = "microcode",
  528. .subsys = &cpu_subsys,
  529. .add_dev = mc_device_add,
  530. .remove_dev = mc_device_remove,
  531. };
  532. /**
  533. * mc_bp_resume - Update boot CPU microcode during resume.
  534. */
  535. static void mc_bp_resume(void)
  536. {
  537. int cpu = smp_processor_id();
  538. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  539. if (uci->valid && uci->mc)
  540. microcode_ops->apply_microcode(cpu);
  541. else if (!uci->mc)
  542. reload_early_microcode();
  543. }
  544. static struct syscore_ops mc_syscore_ops = {
  545. .resume = mc_bp_resume,
  546. };
  547. static int mc_cpu_online(unsigned int cpu)
  548. {
  549. struct device *dev;
  550. dev = get_cpu_device(cpu);
  551. microcode_update_cpu(cpu);
  552. pr_debug("CPU%d added\n", cpu);
  553. if (sysfs_create_group(&dev->kobj, &mc_attr_group))
  554. pr_err("Failed to create group for CPU%d\n", cpu);
  555. return 0;
  556. }
  557. static int mc_cpu_down_prep(unsigned int cpu)
  558. {
  559. struct device *dev;
  560. dev = get_cpu_device(cpu);
  561. /* Suspend is in progress, only remove the interface */
  562. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  563. pr_debug("CPU%d removed\n", cpu);
  564. return 0;
  565. }
  566. static struct attribute *cpu_root_microcode_attrs[] = {
  567. &dev_attr_reload.attr,
  568. NULL
  569. };
  570. static struct attribute_group cpu_root_microcode_group = {
  571. .name = "microcode",
  572. .attrs = cpu_root_microcode_attrs,
  573. };
  574. int __init microcode_init(void)
  575. {
  576. struct cpuinfo_x86 *c = &boot_cpu_data;
  577. int error;
  578. if (dis_ucode_ldr)
  579. return -EINVAL;
  580. if (c->x86_vendor == X86_VENDOR_INTEL)
  581. microcode_ops = init_intel_microcode();
  582. else if (c->x86_vendor == X86_VENDOR_AMD)
  583. microcode_ops = init_amd_microcode();
  584. else
  585. pr_err("no support for this CPU vendor\n");
  586. if (!microcode_ops)
  587. return -ENODEV;
  588. microcode_pdev = platform_device_register_simple("microcode", -1,
  589. NULL, 0);
  590. if (IS_ERR(microcode_pdev))
  591. return PTR_ERR(microcode_pdev);
  592. get_online_cpus();
  593. mutex_lock(&microcode_mutex);
  594. error = subsys_interface_register(&mc_cpu_interface);
  595. if (!error)
  596. perf_check_microcode();
  597. mutex_unlock(&microcode_mutex);
  598. put_online_cpus();
  599. if (error)
  600. goto out_pdev;
  601. error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  602. &cpu_root_microcode_group);
  603. if (error) {
  604. pr_err("Error creating microcode group!\n");
  605. goto out_driver;
  606. }
  607. error = microcode_dev_init();
  608. if (error)
  609. goto out_ucode_group;
  610. register_syscore_ops(&mc_syscore_ops);
  611. cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
  612. mc_cpu_online, mc_cpu_down_prep);
  613. pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
  614. return 0;
  615. out_ucode_group:
  616. sysfs_remove_group(&cpu_subsys.dev_root->kobj,
  617. &cpu_root_microcode_group);
  618. out_driver:
  619. get_online_cpus();
  620. mutex_lock(&microcode_mutex);
  621. subsys_interface_unregister(&mc_cpu_interface);
  622. mutex_unlock(&microcode_mutex);
  623. put_online_cpus();
  624. out_pdev:
  625. platform_device_unregister(microcode_pdev);
  626. return error;
  627. }
  628. fs_initcall(save_microcode_in_initrd);
  629. late_initcall(microcode_init);