core.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. /*
  2. * CPU Microcode Update Driver for Linux
  3. *
  4. * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
  5. * 2006 Shaohua Li <shaohua.li@intel.com>
  6. * 2013-2016 Borislav Petkov <bp@alien8.de>
  7. *
  8. * X86 CPU microcode early update for Linux:
  9. *
  10. * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  11. * H Peter Anvin" <hpa@zytor.com>
  12. * (C) 2015 Borislav Petkov <bp@alien8.de>
  13. *
  14. * This driver allows to upgrade microcode on x86 processors.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License
  18. * as published by the Free Software Foundation; either version
  19. * 2 of the License, or (at your option) any later version.
  20. */
  21. #define pr_fmt(fmt) "microcode: " fmt
  22. #include <linux/platform_device.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/capability.h>
  26. #include <linux/firmware.h>
  27. #include <linux/kernel.h>
  28. #include <linux/mutex.h>
  29. #include <linux/cpu.h>
  30. #include <linux/fs.h>
  31. #include <linux/mm.h>
  32. #include <asm/microcode_intel.h>
  33. #include <asm/cpu_device_id.h>
  34. #include <asm/microcode_amd.h>
  35. #include <asm/perf_event.h>
  36. #include <asm/microcode.h>
  37. #include <asm/processor.h>
  38. #include <asm/cmdline.h>
  39. #include <asm/setup.h>
  40. #define DRIVER_VERSION "2.2"
  41. static struct microcode_ops *microcode_ops;
  42. static bool dis_ucode_ldr = true;
  43. bool initrd_gone;
  44. LIST_HEAD(microcode_cache);
  45. /*
  46. * Synchronization.
  47. *
  48. * All non cpu-hotplug-callback call sites use:
  49. *
  50. * - microcode_mutex to synchronize with each other;
  51. * - get/put_online_cpus() to synchronize with
  52. * the cpu-hotplug-callback call sites.
  53. *
  54. * We guarantee that only a single cpu is being
  55. * updated at any particular moment of time.
  56. */
  57. static DEFINE_MUTEX(microcode_mutex);
  58. struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
  59. struct cpu_info_ctx {
  60. struct cpu_signature *cpu_sig;
  61. int err;
  62. };
  63. /*
  64. * Those patch levels cannot be updated to newer ones and thus should be final.
  65. */
  66. static u32 final_levels[] = {
  67. 0x01000098,
  68. 0x0100009f,
  69. 0x010000af,
  70. 0, /* T-101 terminator */
  71. };
  72. /*
  73. * Check the current patch level on this CPU.
  74. *
  75. * Returns:
  76. * - true: if update should stop
  77. * - false: otherwise
  78. */
  79. static bool amd_check_current_patch_level(void)
  80. {
  81. u32 lvl, dummy, i;
  82. u32 *levels;
  83. native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
  84. if (IS_ENABLED(CONFIG_X86_32))
  85. levels = (u32 *)__pa_nodebug(&final_levels);
  86. else
  87. levels = final_levels;
  88. for (i = 0; levels[i]; i++) {
  89. if (lvl == levels[i])
  90. return true;
  91. }
  92. return false;
  93. }
  94. static bool __init check_loader_disabled_bsp(void)
  95. {
  96. static const char *__dis_opt_str = "dis_ucode_ldr";
  97. #ifdef CONFIG_X86_32
  98. const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
  99. const char *option = (const char *)__pa_nodebug(__dis_opt_str);
  100. bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
  101. #else /* CONFIG_X86_64 */
  102. const char *cmdline = boot_command_line;
  103. const char *option = __dis_opt_str;
  104. bool *res = &dis_ucode_ldr;
  105. #endif
  106. if (!have_cpuid_p())
  107. return *res;
  108. /*
  109. * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
  110. * completely accurate as xen pv guests don't see that CPUID bit set but
  111. * that's good enough as they don't land on the BSP path anyway.
  112. */
  113. if (native_cpuid_ecx(1) & BIT(31))
  114. return *res;
  115. if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
  116. if (amd_check_current_patch_level())
  117. return *res;
  118. }
  119. if (cmdline_find_option_bool(cmdline, option) <= 0)
  120. *res = false;
  121. return *res;
  122. }
  123. extern struct builtin_fw __start_builtin_fw[];
  124. extern struct builtin_fw __end_builtin_fw[];
  125. bool get_builtin_firmware(struct cpio_data *cd, const char *name)
  126. {
  127. #ifdef CONFIG_FW_LOADER
  128. struct builtin_fw *b_fw;
  129. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
  130. if (!strcmp(name, b_fw->name)) {
  131. cd->size = b_fw->size;
  132. cd->data = b_fw->data;
  133. return true;
  134. }
  135. }
  136. #endif
  137. return false;
  138. }
  139. void __init load_ucode_bsp(void)
  140. {
  141. unsigned int cpuid_1_eax;
  142. if (check_loader_disabled_bsp())
  143. return;
  144. cpuid_1_eax = native_cpuid_eax(1);
  145. switch (x86_cpuid_vendor()) {
  146. case X86_VENDOR_INTEL:
  147. if (x86_family(cpuid_1_eax) >= 6)
  148. load_ucode_intel_bsp();
  149. break;
  150. case X86_VENDOR_AMD:
  151. if (x86_family(cpuid_1_eax) >= 0x10)
  152. load_ucode_amd_bsp(cpuid_1_eax);
  153. break;
  154. default:
  155. break;
  156. }
  157. }
  158. static bool check_loader_disabled_ap(void)
  159. {
  160. #ifdef CONFIG_X86_32
  161. return *((bool *)__pa_nodebug(&dis_ucode_ldr));
  162. #else
  163. return dis_ucode_ldr;
  164. #endif
  165. }
  166. void load_ucode_ap(void)
  167. {
  168. unsigned int cpuid_1_eax;
  169. if (check_loader_disabled_ap())
  170. return;
  171. cpuid_1_eax = native_cpuid_eax(1);
  172. switch (x86_cpuid_vendor()) {
  173. case X86_VENDOR_INTEL:
  174. if (x86_family(cpuid_1_eax) >= 6)
  175. load_ucode_intel_ap();
  176. break;
  177. case X86_VENDOR_AMD:
  178. if (x86_family(cpuid_1_eax) >= 0x10)
  179. load_ucode_amd_ap(cpuid_1_eax);
  180. break;
  181. default:
  182. break;
  183. }
  184. }
  185. static int __init save_microcode_in_initrd(void)
  186. {
  187. struct cpuinfo_x86 *c = &boot_cpu_data;
  188. int ret = -EINVAL;
  189. switch (c->x86_vendor) {
  190. case X86_VENDOR_INTEL:
  191. if (c->x86 >= 6)
  192. ret = save_microcode_in_initrd_intel();
  193. break;
  194. case X86_VENDOR_AMD:
  195. if (c->x86 >= 0x10)
  196. return save_microcode_in_initrd_amd(cpuid_eax(1));
  197. break;
  198. default:
  199. break;
  200. }
  201. initrd_gone = true;
  202. return ret;
  203. }
  204. struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
  205. {
  206. #ifdef CONFIG_BLK_DEV_INITRD
  207. unsigned long start = 0;
  208. size_t size;
  209. #ifdef CONFIG_X86_32
  210. struct boot_params *params;
  211. if (use_pa)
  212. params = (struct boot_params *)__pa_nodebug(&boot_params);
  213. else
  214. params = &boot_params;
  215. size = params->hdr.ramdisk_size;
  216. /*
  217. * Set start only if we have an initrd image. We cannot use initrd_start
  218. * because it is not set that early yet.
  219. */
  220. if (size)
  221. start = params->hdr.ramdisk_image;
  222. # else /* CONFIG_X86_64 */
  223. size = (unsigned long)boot_params.ext_ramdisk_size << 32;
  224. size |= boot_params.hdr.ramdisk_size;
  225. if (size) {
  226. start = (unsigned long)boot_params.ext_ramdisk_image << 32;
  227. start |= boot_params.hdr.ramdisk_image;
  228. start += PAGE_OFFSET;
  229. }
  230. # endif
  231. /*
  232. * Fixup the start address: after reserve_initrd() runs, initrd_start
  233. * has the virtual address of the beginning of the initrd. It also
  234. * possibly relocates the ramdisk. In either case, initrd_start contains
  235. * the updated address so use that instead.
  236. *
  237. * initrd_gone is for the hotplug case where we've thrown out initrd
  238. * already.
  239. */
  240. if (!use_pa) {
  241. if (initrd_gone)
  242. return (struct cpio_data){ NULL, 0, "" };
  243. if (initrd_start)
  244. start = initrd_start;
  245. }
  246. return find_cpio_data(path, (void *)start, size, NULL);
  247. #else /* !CONFIG_BLK_DEV_INITRD */
  248. return (struct cpio_data){ NULL, 0, "" };
  249. #endif
  250. }
  251. void reload_early_microcode(void)
  252. {
  253. int vendor, family;
  254. vendor = x86_cpuid_vendor();
  255. family = x86_cpuid_family();
  256. switch (vendor) {
  257. case X86_VENDOR_INTEL:
  258. if (family >= 6)
  259. reload_ucode_intel();
  260. break;
  261. case X86_VENDOR_AMD:
  262. if (family >= 0x10)
  263. reload_ucode_amd();
  264. break;
  265. default:
  266. break;
  267. }
  268. }
  269. static void collect_cpu_info_local(void *arg)
  270. {
  271. struct cpu_info_ctx *ctx = arg;
  272. ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
  273. ctx->cpu_sig);
  274. }
  275. static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
  276. {
  277. struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
  278. int ret;
  279. ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
  280. if (!ret)
  281. ret = ctx.err;
  282. return ret;
  283. }
  284. static int collect_cpu_info(int cpu)
  285. {
  286. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  287. int ret;
  288. memset(uci, 0, sizeof(*uci));
  289. ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
  290. if (!ret)
  291. uci->valid = 1;
  292. return ret;
  293. }
  294. struct apply_microcode_ctx {
  295. int err;
  296. };
  297. static void apply_microcode_local(void *arg)
  298. {
  299. struct apply_microcode_ctx *ctx = arg;
  300. ctx->err = microcode_ops->apply_microcode(smp_processor_id());
  301. }
  302. static int apply_microcode_on_target(int cpu)
  303. {
  304. struct apply_microcode_ctx ctx = { .err = 0 };
  305. int ret;
  306. ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
  307. if (!ret)
  308. ret = ctx.err;
  309. return ret;
  310. }
  311. #ifdef CONFIG_MICROCODE_OLD_INTERFACE
  312. static int do_microcode_update(const void __user *buf, size_t size)
  313. {
  314. int error = 0;
  315. int cpu;
  316. for_each_online_cpu(cpu) {
  317. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  318. enum ucode_state ustate;
  319. if (!uci->valid)
  320. continue;
  321. ustate = microcode_ops->request_microcode_user(cpu, buf, size);
  322. if (ustate == UCODE_ERROR) {
  323. error = -1;
  324. break;
  325. } else if (ustate == UCODE_OK)
  326. apply_microcode_on_target(cpu);
  327. }
  328. return error;
  329. }
  330. static int microcode_open(struct inode *inode, struct file *file)
  331. {
  332. return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
  333. }
  334. static ssize_t microcode_write(struct file *file, const char __user *buf,
  335. size_t len, loff_t *ppos)
  336. {
  337. ssize_t ret = -EINVAL;
  338. if ((len >> PAGE_SHIFT) > totalram_pages) {
  339. pr_err("too much data (max %ld pages)\n", totalram_pages);
  340. return ret;
  341. }
  342. get_online_cpus();
  343. mutex_lock(&microcode_mutex);
  344. if (do_microcode_update(buf, len) == 0)
  345. ret = (ssize_t)len;
  346. if (ret > 0)
  347. perf_check_microcode();
  348. mutex_unlock(&microcode_mutex);
  349. put_online_cpus();
  350. return ret;
  351. }
  352. static const struct file_operations microcode_fops = {
  353. .owner = THIS_MODULE,
  354. .write = microcode_write,
  355. .open = microcode_open,
  356. .llseek = no_llseek,
  357. };
  358. static struct miscdevice microcode_dev = {
  359. .minor = MICROCODE_MINOR,
  360. .name = "microcode",
  361. .nodename = "cpu/microcode",
  362. .fops = &microcode_fops,
  363. };
  364. static int __init microcode_dev_init(void)
  365. {
  366. int error;
  367. error = misc_register(&microcode_dev);
  368. if (error) {
  369. pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
  370. return error;
  371. }
  372. return 0;
  373. }
  374. static void __exit microcode_dev_exit(void)
  375. {
  376. misc_deregister(&microcode_dev);
  377. }
  378. #else
  379. #define microcode_dev_init() 0
  380. #define microcode_dev_exit() do { } while (0)
  381. #endif
  382. /* fake device for request_firmware */
  383. static struct platform_device *microcode_pdev;
  384. static int reload_for_cpu(int cpu)
  385. {
  386. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  387. enum ucode_state ustate;
  388. int err = 0;
  389. if (!uci->valid)
  390. return err;
  391. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
  392. if (ustate == UCODE_OK)
  393. apply_microcode_on_target(cpu);
  394. else
  395. if (ustate == UCODE_ERROR)
  396. err = -EINVAL;
  397. return err;
  398. }
  399. static ssize_t reload_store(struct device *dev,
  400. struct device_attribute *attr,
  401. const char *buf, size_t size)
  402. {
  403. unsigned long val;
  404. int cpu;
  405. ssize_t ret = 0, tmp_ret;
  406. ret = kstrtoul(buf, 0, &val);
  407. if (ret)
  408. return ret;
  409. if (val != 1)
  410. return size;
  411. get_online_cpus();
  412. mutex_lock(&microcode_mutex);
  413. for_each_online_cpu(cpu) {
  414. tmp_ret = reload_for_cpu(cpu);
  415. if (tmp_ret != 0)
  416. pr_warn("Error reloading microcode on CPU %d\n", cpu);
  417. /* save retval of the first encountered reload error */
  418. if (!ret)
  419. ret = tmp_ret;
  420. }
  421. if (!ret)
  422. perf_check_microcode();
  423. mutex_unlock(&microcode_mutex);
  424. put_online_cpus();
  425. if (!ret)
  426. ret = size;
  427. return ret;
  428. }
  429. static ssize_t version_show(struct device *dev,
  430. struct device_attribute *attr, char *buf)
  431. {
  432. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  433. return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
  434. }
  435. static ssize_t pf_show(struct device *dev,
  436. struct device_attribute *attr, char *buf)
  437. {
  438. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  439. return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
  440. }
  441. static DEVICE_ATTR(reload, 0200, NULL, reload_store);
  442. static DEVICE_ATTR(version, 0400, version_show, NULL);
  443. static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
  444. static struct attribute *mc_default_attrs[] = {
  445. &dev_attr_version.attr,
  446. &dev_attr_processor_flags.attr,
  447. NULL
  448. };
  449. static struct attribute_group mc_attr_group = {
  450. .attrs = mc_default_attrs,
  451. .name = "microcode",
  452. };
  453. static void microcode_fini_cpu(int cpu)
  454. {
  455. if (microcode_ops->microcode_fini_cpu)
  456. microcode_ops->microcode_fini_cpu(cpu);
  457. }
  458. static enum ucode_state microcode_resume_cpu(int cpu)
  459. {
  460. if (apply_microcode_on_target(cpu))
  461. return UCODE_ERROR;
  462. pr_debug("CPU%d updated upon resume\n", cpu);
  463. return UCODE_OK;
  464. }
  465. static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
  466. {
  467. enum ucode_state ustate;
  468. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  469. if (uci->valid)
  470. return UCODE_OK;
  471. if (collect_cpu_info(cpu))
  472. return UCODE_ERROR;
  473. /* --dimm. Trigger a delayed update? */
  474. if (system_state != SYSTEM_RUNNING)
  475. return UCODE_NFOUND;
  476. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
  477. refresh_fw);
  478. if (ustate == UCODE_OK) {
  479. pr_debug("CPU%d updated upon init\n", cpu);
  480. apply_microcode_on_target(cpu);
  481. }
  482. return ustate;
  483. }
  484. static enum ucode_state microcode_update_cpu(int cpu)
  485. {
  486. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  487. /* Refresh CPU microcode revision after resume. */
  488. collect_cpu_info(cpu);
  489. if (uci->valid)
  490. return microcode_resume_cpu(cpu);
  491. return microcode_init_cpu(cpu, false);
  492. }
  493. static int mc_device_add(struct device *dev, struct subsys_interface *sif)
  494. {
  495. int err, cpu = dev->id;
  496. if (!cpu_online(cpu))
  497. return 0;
  498. pr_debug("CPU%d added\n", cpu);
  499. err = sysfs_create_group(&dev->kobj, &mc_attr_group);
  500. if (err)
  501. return err;
  502. if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
  503. return -EINVAL;
  504. return err;
  505. }
  506. static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
  507. {
  508. int cpu = dev->id;
  509. if (!cpu_online(cpu))
  510. return;
  511. pr_debug("CPU%d removed\n", cpu);
  512. microcode_fini_cpu(cpu);
  513. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  514. }
  515. static struct subsys_interface mc_cpu_interface = {
  516. .name = "microcode",
  517. .subsys = &cpu_subsys,
  518. .add_dev = mc_device_add,
  519. .remove_dev = mc_device_remove,
  520. };
  521. /**
  522. * mc_bp_resume - Update boot CPU microcode during resume.
  523. */
  524. static void mc_bp_resume(void)
  525. {
  526. int cpu = smp_processor_id();
  527. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  528. if (uci->valid && uci->mc)
  529. microcode_ops->apply_microcode(cpu);
  530. else if (!uci->mc)
  531. reload_early_microcode();
  532. }
  533. static struct syscore_ops mc_syscore_ops = {
  534. .resume = mc_bp_resume,
  535. };
  536. static int mc_cpu_online(unsigned int cpu)
  537. {
  538. struct device *dev;
  539. dev = get_cpu_device(cpu);
  540. microcode_update_cpu(cpu);
  541. pr_debug("CPU%d added\n", cpu);
  542. if (sysfs_create_group(&dev->kobj, &mc_attr_group))
  543. pr_err("Failed to create group for CPU%d\n", cpu);
  544. return 0;
  545. }
  546. static int mc_cpu_down_prep(unsigned int cpu)
  547. {
  548. struct device *dev;
  549. dev = get_cpu_device(cpu);
  550. /* Suspend is in progress, only remove the interface */
  551. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  552. pr_debug("CPU%d removed\n", cpu);
  553. return 0;
  554. }
  555. static struct attribute *cpu_root_microcode_attrs[] = {
  556. &dev_attr_reload.attr,
  557. NULL
  558. };
  559. static struct attribute_group cpu_root_microcode_group = {
  560. .name = "microcode",
  561. .attrs = cpu_root_microcode_attrs,
  562. };
  563. int __init microcode_init(void)
  564. {
  565. struct cpuinfo_x86 *c = &boot_cpu_data;
  566. int error;
  567. if (dis_ucode_ldr)
  568. return -EINVAL;
  569. if (c->x86_vendor == X86_VENDOR_INTEL)
  570. microcode_ops = init_intel_microcode();
  571. else if (c->x86_vendor == X86_VENDOR_AMD)
  572. microcode_ops = init_amd_microcode();
  573. else
  574. pr_err("no support for this CPU vendor\n");
  575. if (!microcode_ops)
  576. return -ENODEV;
  577. microcode_pdev = platform_device_register_simple("microcode", -1,
  578. NULL, 0);
  579. if (IS_ERR(microcode_pdev))
  580. return PTR_ERR(microcode_pdev);
  581. get_online_cpus();
  582. mutex_lock(&microcode_mutex);
  583. error = subsys_interface_register(&mc_cpu_interface);
  584. if (!error)
  585. perf_check_microcode();
  586. mutex_unlock(&microcode_mutex);
  587. put_online_cpus();
  588. if (error)
  589. goto out_pdev;
  590. error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  591. &cpu_root_microcode_group);
  592. if (error) {
  593. pr_err("Error creating microcode group!\n");
  594. goto out_driver;
  595. }
  596. error = microcode_dev_init();
  597. if (error)
  598. goto out_ucode_group;
  599. register_syscore_ops(&mc_syscore_ops);
  600. cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
  601. mc_cpu_online, mc_cpu_down_prep);
  602. pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
  603. return 0;
  604. out_ucode_group:
  605. sysfs_remove_group(&cpu_subsys.dev_root->kobj,
  606. &cpu_root_microcode_group);
  607. out_driver:
  608. get_online_cpus();
  609. mutex_lock(&microcode_mutex);
  610. subsys_interface_unregister(&mc_cpu_interface);
  611. mutex_unlock(&microcode_mutex);
  612. put_online_cpus();
  613. out_pdev:
  614. platform_device_unregister(microcode_pdev);
  615. return error;
  616. }
  617. fs_initcall(save_microcode_in_initrd);
  618. late_initcall(microcode_init);