core.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * CPU Microcode Update Driver for Linux
  3. *
  4. * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
  5. * 2006 Shaohua Li <shaohua.li@intel.com>
  6. * 2013-2016 Borislav Petkov <bp@alien8.de>
  7. *
  8. * X86 CPU microcode early update for Linux:
  9. *
  10. * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  11. * H Peter Anvin" <hpa@zytor.com>
  12. * (C) 2015 Borislav Petkov <bp@alien8.de>
  13. *
  14. * This driver allows to upgrade microcode on x86 processors.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License
  18. * as published by the Free Software Foundation; either version
  19. * 2 of the License, or (at your option) any later version.
  20. */
  21. #define pr_fmt(fmt) "microcode: " fmt
  22. #include <linux/platform_device.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/capability.h>
  26. #include <linux/firmware.h>
  27. #include <linux/kernel.h>
  28. #include <linux/mutex.h>
  29. #include <linux/cpu.h>
  30. #include <linux/fs.h>
  31. #include <linux/mm.h>
  32. #include <asm/microcode_intel.h>
  33. #include <asm/cpu_device_id.h>
  34. #include <asm/microcode_amd.h>
  35. #include <asm/perf_event.h>
  36. #include <asm/microcode.h>
  37. #include <asm/processor.h>
  38. #include <asm/cmdline.h>
  39. #include <asm/setup.h>
  40. #define DRIVER_VERSION "2.2"
  41. static struct microcode_ops *microcode_ops;
  42. static bool dis_ucode_ldr = true;
  43. bool initrd_gone;
  44. LIST_HEAD(microcode_cache);
  45. /*
  46. * Synchronization.
  47. *
  48. * All non cpu-hotplug-callback call sites use:
  49. *
  50. * - microcode_mutex to synchronize with each other;
  51. * - get/put_online_cpus() to synchronize with
  52. * the cpu-hotplug-callback call sites.
  53. *
  54. * We guarantee that only a single cpu is being
  55. * updated at any particular moment of time.
  56. */
  57. static DEFINE_MUTEX(microcode_mutex);
  58. struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
  59. struct cpu_info_ctx {
  60. struct cpu_signature *cpu_sig;
  61. int err;
  62. };
  63. /*
  64. * Those patch levels cannot be updated to newer ones and thus should be final.
  65. */
  66. static u32 final_levels[] = {
  67. 0x01000098,
  68. 0x0100009f,
  69. 0x010000af,
  70. 0, /* T-101 terminator */
  71. };
  72. /*
  73. * Check the current patch level on this CPU.
  74. *
  75. * Returns:
  76. * - true: if update should stop
  77. * - false: otherwise
  78. */
  79. static bool amd_check_current_patch_level(void)
  80. {
  81. u32 lvl, dummy, i;
  82. u32 *levels;
  83. native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
  84. if (IS_ENABLED(CONFIG_X86_32))
  85. levels = (u32 *)__pa_nodebug(&final_levels);
  86. else
  87. levels = final_levels;
  88. for (i = 0; levels[i]; i++) {
  89. if (lvl == levels[i])
  90. return true;
  91. }
  92. return false;
  93. }
  94. static bool __init check_loader_disabled_bsp(void)
  95. {
  96. static const char *__dis_opt_str = "dis_ucode_ldr";
  97. #ifdef CONFIG_X86_32
  98. const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
  99. const char *option = (const char *)__pa_nodebug(__dis_opt_str);
  100. bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
  101. #else /* CONFIG_X86_64 */
  102. const char *cmdline = boot_command_line;
  103. const char *option = __dis_opt_str;
  104. bool *res = &dis_ucode_ldr;
  105. #endif
  106. /*
  107. * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
  108. * completely accurate as xen pv guests don't see that CPUID bit set but
  109. * that's good enough as they don't land on the BSP path anyway.
  110. */
  111. if (native_cpuid_ecx(1) & BIT(31))
  112. return *res;
  113. if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
  114. if (amd_check_current_patch_level())
  115. return *res;
  116. }
  117. if (cmdline_find_option_bool(cmdline, option) <= 0)
  118. *res = false;
  119. return *res;
  120. }
  121. extern struct builtin_fw __start_builtin_fw[];
  122. extern struct builtin_fw __end_builtin_fw[];
  123. bool get_builtin_firmware(struct cpio_data *cd, const char *name)
  124. {
  125. #ifdef CONFIG_FW_LOADER
  126. struct builtin_fw *b_fw;
  127. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
  128. if (!strcmp(name, b_fw->name)) {
  129. cd->size = b_fw->size;
  130. cd->data = b_fw->data;
  131. return true;
  132. }
  133. }
  134. #endif
  135. return false;
  136. }
  137. void __init load_ucode_bsp(void)
  138. {
  139. unsigned int cpuid_1_eax;
  140. bool intel = true;
  141. if (!have_cpuid_p())
  142. return;
  143. cpuid_1_eax = native_cpuid_eax(1);
  144. switch (x86_cpuid_vendor()) {
  145. case X86_VENDOR_INTEL:
  146. if (x86_family(cpuid_1_eax) < 6)
  147. return;
  148. break;
  149. case X86_VENDOR_AMD:
  150. if (x86_family(cpuid_1_eax) < 0x10)
  151. return;
  152. intel = false;
  153. break;
  154. default:
  155. return;
  156. }
  157. if (check_loader_disabled_bsp())
  158. return;
  159. if (intel)
  160. load_ucode_intel_bsp();
  161. else
  162. load_ucode_amd_bsp(cpuid_1_eax);
  163. }
  164. static bool check_loader_disabled_ap(void)
  165. {
  166. #ifdef CONFIG_X86_32
  167. return *((bool *)__pa_nodebug(&dis_ucode_ldr));
  168. #else
  169. return dis_ucode_ldr;
  170. #endif
  171. }
  172. void load_ucode_ap(void)
  173. {
  174. unsigned int cpuid_1_eax;
  175. if (check_loader_disabled_ap())
  176. return;
  177. cpuid_1_eax = native_cpuid_eax(1);
  178. switch (x86_cpuid_vendor()) {
  179. case X86_VENDOR_INTEL:
  180. if (x86_family(cpuid_1_eax) >= 6)
  181. load_ucode_intel_ap();
  182. break;
  183. case X86_VENDOR_AMD:
  184. if (x86_family(cpuid_1_eax) >= 0x10)
  185. load_ucode_amd_ap(cpuid_1_eax);
  186. break;
  187. default:
  188. break;
  189. }
  190. }
  191. static int __init save_microcode_in_initrd(void)
  192. {
  193. struct cpuinfo_x86 *c = &boot_cpu_data;
  194. int ret = -EINVAL;
  195. switch (c->x86_vendor) {
  196. case X86_VENDOR_INTEL:
  197. if (c->x86 >= 6)
  198. ret = save_microcode_in_initrd_intel();
  199. break;
  200. case X86_VENDOR_AMD:
  201. if (c->x86 >= 0x10)
  202. return save_microcode_in_initrd_amd(cpuid_eax(1));
  203. break;
  204. default:
  205. break;
  206. }
  207. initrd_gone = true;
  208. return ret;
  209. }
  210. struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
  211. {
  212. #ifdef CONFIG_BLK_DEV_INITRD
  213. unsigned long start = 0;
  214. size_t size;
  215. #ifdef CONFIG_X86_32
  216. struct boot_params *params;
  217. if (use_pa)
  218. params = (struct boot_params *)__pa_nodebug(&boot_params);
  219. else
  220. params = &boot_params;
  221. size = params->hdr.ramdisk_size;
  222. /*
  223. * Set start only if we have an initrd image. We cannot use initrd_start
  224. * because it is not set that early yet.
  225. */
  226. if (size)
  227. start = params->hdr.ramdisk_image;
  228. # else /* CONFIG_X86_64 */
  229. size = (unsigned long)boot_params.ext_ramdisk_size << 32;
  230. size |= boot_params.hdr.ramdisk_size;
  231. if (size) {
  232. start = (unsigned long)boot_params.ext_ramdisk_image << 32;
  233. start |= boot_params.hdr.ramdisk_image;
  234. start += PAGE_OFFSET;
  235. }
  236. # endif
  237. /*
  238. * Fixup the start address: after reserve_initrd() runs, initrd_start
  239. * has the virtual address of the beginning of the initrd. It also
  240. * possibly relocates the ramdisk. In either case, initrd_start contains
  241. * the updated address so use that instead.
  242. *
  243. * initrd_gone is for the hotplug case where we've thrown out initrd
  244. * already.
  245. */
  246. if (!use_pa) {
  247. if (initrd_gone)
  248. return (struct cpio_data){ NULL, 0, "" };
  249. if (initrd_start)
  250. start = initrd_start;
  251. } else {
  252. /*
  253. * The picture with physical addresses is a bit different: we
  254. * need to get the *physical* address to which the ramdisk was
  255. * relocated, i.e., relocated_ramdisk (not initrd_start) and
  256. * since we're running from physical addresses, we need to access
  257. * relocated_ramdisk through its *physical* address too.
  258. */
  259. u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
  260. if (*rr)
  261. start = *rr;
  262. }
  263. return find_cpio_data(path, (void *)start, size, NULL);
  264. #else /* !CONFIG_BLK_DEV_INITRD */
  265. return (struct cpio_data){ NULL, 0, "" };
  266. #endif
  267. }
  268. void reload_early_microcode(void)
  269. {
  270. int vendor, family;
  271. vendor = x86_cpuid_vendor();
  272. family = x86_cpuid_family();
  273. switch (vendor) {
  274. case X86_VENDOR_INTEL:
  275. if (family >= 6)
  276. reload_ucode_intel();
  277. break;
  278. case X86_VENDOR_AMD:
  279. if (family >= 0x10)
  280. reload_ucode_amd();
  281. break;
  282. default:
  283. break;
  284. }
  285. }
  286. static void collect_cpu_info_local(void *arg)
  287. {
  288. struct cpu_info_ctx *ctx = arg;
  289. ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
  290. ctx->cpu_sig);
  291. }
  292. static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
  293. {
  294. struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
  295. int ret;
  296. ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
  297. if (!ret)
  298. ret = ctx.err;
  299. return ret;
  300. }
  301. static int collect_cpu_info(int cpu)
  302. {
  303. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  304. int ret;
  305. memset(uci, 0, sizeof(*uci));
  306. ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
  307. if (!ret)
  308. uci->valid = 1;
  309. return ret;
  310. }
  311. struct apply_microcode_ctx {
  312. int err;
  313. };
  314. static void apply_microcode_local(void *arg)
  315. {
  316. struct apply_microcode_ctx *ctx = arg;
  317. ctx->err = microcode_ops->apply_microcode(smp_processor_id());
  318. }
  319. static int apply_microcode_on_target(int cpu)
  320. {
  321. struct apply_microcode_ctx ctx = { .err = 0 };
  322. int ret;
  323. ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
  324. if (!ret)
  325. ret = ctx.err;
  326. return ret;
  327. }
  328. #ifdef CONFIG_MICROCODE_OLD_INTERFACE
  329. static int do_microcode_update(const void __user *buf, size_t size)
  330. {
  331. int error = 0;
  332. int cpu;
  333. for_each_online_cpu(cpu) {
  334. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  335. enum ucode_state ustate;
  336. if (!uci->valid)
  337. continue;
  338. ustate = microcode_ops->request_microcode_user(cpu, buf, size);
  339. if (ustate == UCODE_ERROR) {
  340. error = -1;
  341. break;
  342. } else if (ustate == UCODE_OK)
  343. apply_microcode_on_target(cpu);
  344. }
  345. return error;
  346. }
  347. static int microcode_open(struct inode *inode, struct file *file)
  348. {
  349. return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
  350. }
  351. static ssize_t microcode_write(struct file *file, const char __user *buf,
  352. size_t len, loff_t *ppos)
  353. {
  354. ssize_t ret = -EINVAL;
  355. if ((len >> PAGE_SHIFT) > totalram_pages) {
  356. pr_err("too much data (max %ld pages)\n", totalram_pages);
  357. return ret;
  358. }
  359. get_online_cpus();
  360. mutex_lock(&microcode_mutex);
  361. if (do_microcode_update(buf, len) == 0)
  362. ret = (ssize_t)len;
  363. if (ret > 0)
  364. perf_check_microcode();
  365. mutex_unlock(&microcode_mutex);
  366. put_online_cpus();
  367. return ret;
  368. }
  369. static const struct file_operations microcode_fops = {
  370. .owner = THIS_MODULE,
  371. .write = microcode_write,
  372. .open = microcode_open,
  373. .llseek = no_llseek,
  374. };
  375. static struct miscdevice microcode_dev = {
  376. .minor = MICROCODE_MINOR,
  377. .name = "microcode",
  378. .nodename = "cpu/microcode",
  379. .fops = &microcode_fops,
  380. };
  381. static int __init microcode_dev_init(void)
  382. {
  383. int error;
  384. error = misc_register(&microcode_dev);
  385. if (error) {
  386. pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
  387. return error;
  388. }
  389. return 0;
  390. }
  391. static void __exit microcode_dev_exit(void)
  392. {
  393. misc_deregister(&microcode_dev);
  394. }
  395. #else
  396. #define microcode_dev_init() 0
  397. #define microcode_dev_exit() do { } while (0)
  398. #endif
  399. /* fake device for request_firmware */
  400. static struct platform_device *microcode_pdev;
  401. static int reload_for_cpu(int cpu)
  402. {
  403. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  404. enum ucode_state ustate;
  405. int err = 0;
  406. if (!uci->valid)
  407. return err;
  408. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
  409. if (ustate == UCODE_OK)
  410. apply_microcode_on_target(cpu);
  411. else
  412. if (ustate == UCODE_ERROR)
  413. err = -EINVAL;
  414. return err;
  415. }
  416. static ssize_t reload_store(struct device *dev,
  417. struct device_attribute *attr,
  418. const char *buf, size_t size)
  419. {
  420. unsigned long val;
  421. int cpu;
  422. ssize_t ret = 0, tmp_ret;
  423. ret = kstrtoul(buf, 0, &val);
  424. if (ret)
  425. return ret;
  426. if (val != 1)
  427. return size;
  428. get_online_cpus();
  429. mutex_lock(&microcode_mutex);
  430. for_each_online_cpu(cpu) {
  431. tmp_ret = reload_for_cpu(cpu);
  432. if (tmp_ret != 0)
  433. pr_warn("Error reloading microcode on CPU %d\n", cpu);
  434. /* save retval of the first encountered reload error */
  435. if (!ret)
  436. ret = tmp_ret;
  437. }
  438. if (!ret)
  439. perf_check_microcode();
  440. mutex_unlock(&microcode_mutex);
  441. put_online_cpus();
  442. if (!ret)
  443. ret = size;
  444. return ret;
  445. }
  446. static ssize_t version_show(struct device *dev,
  447. struct device_attribute *attr, char *buf)
  448. {
  449. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  450. return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
  451. }
  452. static ssize_t pf_show(struct device *dev,
  453. struct device_attribute *attr, char *buf)
  454. {
  455. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  456. return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
  457. }
  458. static DEVICE_ATTR(reload, 0200, NULL, reload_store);
  459. static DEVICE_ATTR(version, 0400, version_show, NULL);
  460. static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
  461. static struct attribute *mc_default_attrs[] = {
  462. &dev_attr_version.attr,
  463. &dev_attr_processor_flags.attr,
  464. NULL
  465. };
  466. static const struct attribute_group mc_attr_group = {
  467. .attrs = mc_default_attrs,
  468. .name = "microcode",
  469. };
  470. static void microcode_fini_cpu(int cpu)
  471. {
  472. if (microcode_ops->microcode_fini_cpu)
  473. microcode_ops->microcode_fini_cpu(cpu);
  474. }
  475. static enum ucode_state microcode_resume_cpu(int cpu)
  476. {
  477. if (apply_microcode_on_target(cpu))
  478. return UCODE_ERROR;
  479. pr_debug("CPU%d updated upon resume\n", cpu);
  480. return UCODE_OK;
  481. }
  482. static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
  483. {
  484. enum ucode_state ustate;
  485. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  486. if (uci->valid)
  487. return UCODE_OK;
  488. if (collect_cpu_info(cpu))
  489. return UCODE_ERROR;
  490. /* --dimm. Trigger a delayed update? */
  491. if (system_state != SYSTEM_RUNNING)
  492. return UCODE_NFOUND;
  493. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
  494. refresh_fw);
  495. if (ustate == UCODE_OK) {
  496. pr_debug("CPU%d updated upon init\n", cpu);
  497. apply_microcode_on_target(cpu);
  498. }
  499. return ustate;
  500. }
  501. static enum ucode_state microcode_update_cpu(int cpu)
  502. {
  503. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  504. /* Refresh CPU microcode revision after resume. */
  505. collect_cpu_info(cpu);
  506. if (uci->valid)
  507. return microcode_resume_cpu(cpu);
  508. return microcode_init_cpu(cpu, false);
  509. }
  510. static int mc_device_add(struct device *dev, struct subsys_interface *sif)
  511. {
  512. int err, cpu = dev->id;
  513. if (!cpu_online(cpu))
  514. return 0;
  515. pr_debug("CPU%d added\n", cpu);
  516. err = sysfs_create_group(&dev->kobj, &mc_attr_group);
  517. if (err)
  518. return err;
  519. if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
  520. return -EINVAL;
  521. return err;
  522. }
  523. static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
  524. {
  525. int cpu = dev->id;
  526. if (!cpu_online(cpu))
  527. return;
  528. pr_debug("CPU%d removed\n", cpu);
  529. microcode_fini_cpu(cpu);
  530. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  531. }
  532. static struct subsys_interface mc_cpu_interface = {
  533. .name = "microcode",
  534. .subsys = &cpu_subsys,
  535. .add_dev = mc_device_add,
  536. .remove_dev = mc_device_remove,
  537. };
  538. /**
  539. * mc_bp_resume - Update boot CPU microcode during resume.
  540. */
  541. static void mc_bp_resume(void)
  542. {
  543. int cpu = smp_processor_id();
  544. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  545. if (uci->valid && uci->mc)
  546. microcode_ops->apply_microcode(cpu);
  547. else if (!uci->mc)
  548. reload_early_microcode();
  549. }
  550. static struct syscore_ops mc_syscore_ops = {
  551. .resume = mc_bp_resume,
  552. };
  553. static int mc_cpu_online(unsigned int cpu)
  554. {
  555. struct device *dev;
  556. dev = get_cpu_device(cpu);
  557. microcode_update_cpu(cpu);
  558. pr_debug("CPU%d added\n", cpu);
  559. if (sysfs_create_group(&dev->kobj, &mc_attr_group))
  560. pr_err("Failed to create group for CPU%d\n", cpu);
  561. return 0;
  562. }
  563. static int mc_cpu_down_prep(unsigned int cpu)
  564. {
  565. struct device *dev;
  566. dev = get_cpu_device(cpu);
  567. /* Suspend is in progress, only remove the interface */
  568. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  569. pr_debug("CPU%d removed\n", cpu);
  570. return 0;
  571. }
  572. static struct attribute *cpu_root_microcode_attrs[] = {
  573. &dev_attr_reload.attr,
  574. NULL
  575. };
  576. static const struct attribute_group cpu_root_microcode_group = {
  577. .name = "microcode",
  578. .attrs = cpu_root_microcode_attrs,
  579. };
  580. int __init microcode_init(void)
  581. {
  582. struct cpuinfo_x86 *c = &boot_cpu_data;
  583. int error;
  584. if (dis_ucode_ldr)
  585. return -EINVAL;
  586. if (c->x86_vendor == X86_VENDOR_INTEL)
  587. microcode_ops = init_intel_microcode();
  588. else if (c->x86_vendor == X86_VENDOR_AMD)
  589. microcode_ops = init_amd_microcode();
  590. else
  591. pr_err("no support for this CPU vendor\n");
  592. if (!microcode_ops)
  593. return -ENODEV;
  594. microcode_pdev = platform_device_register_simple("microcode", -1,
  595. NULL, 0);
  596. if (IS_ERR(microcode_pdev))
  597. return PTR_ERR(microcode_pdev);
  598. get_online_cpus();
  599. mutex_lock(&microcode_mutex);
  600. error = subsys_interface_register(&mc_cpu_interface);
  601. if (!error)
  602. perf_check_microcode();
  603. mutex_unlock(&microcode_mutex);
  604. put_online_cpus();
  605. if (error)
  606. goto out_pdev;
  607. error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  608. &cpu_root_microcode_group);
  609. if (error) {
  610. pr_err("Error creating microcode group!\n");
  611. goto out_driver;
  612. }
  613. error = microcode_dev_init();
  614. if (error)
  615. goto out_ucode_group;
  616. register_syscore_ops(&mc_syscore_ops);
  617. cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
  618. mc_cpu_online, mc_cpu_down_prep);
  619. pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
  620. return 0;
  621. out_ucode_group:
  622. sysfs_remove_group(&cpu_subsys.dev_root->kobj,
  623. &cpu_root_microcode_group);
  624. out_driver:
  625. get_online_cpus();
  626. mutex_lock(&microcode_mutex);
  627. subsys_interface_unregister(&mc_cpu_interface);
  628. mutex_unlock(&microcode_mutex);
  629. put_online_cpus();
  630. out_pdev:
  631. platform_device_unregister(microcode_pdev);
  632. return error;
  633. }
  634. fs_initcall(save_microcode_in_initrd);
  635. late_initcall(microcode_init);