core.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901
  1. /*
  2. * CPU Microcode Update Driver for Linux
  3. *
  4. * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
  5. * 2006 Shaohua Li <shaohua.li@intel.com>
  6. * 2013-2016 Borislav Petkov <bp@alien8.de>
  7. *
  8. * X86 CPU microcode early update for Linux:
  9. *
  10. * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  11. * H Peter Anvin" <hpa@zytor.com>
  12. * (C) 2015 Borislav Petkov <bp@alien8.de>
  13. *
  14. * This driver allows to upgrade microcode on x86 processors.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License
  18. * as published by the Free Software Foundation; either version
  19. * 2 of the License, or (at your option) any later version.
  20. */
  21. #define pr_fmt(fmt) "microcode: " fmt
  22. #include <linux/platform_device.h>
  23. #include <linux/stop_machine.h>
  24. #include <linux/syscore_ops.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/capability.h>
  27. #include <linux/firmware.h>
  28. #include <linux/kernel.h>
  29. #include <linux/delay.h>
  30. #include <linux/mutex.h>
  31. #include <linux/cpu.h>
  32. #include <linux/nmi.h>
  33. #include <linux/fs.h>
  34. #include <linux/mm.h>
  35. #include <asm/microcode_intel.h>
  36. #include <asm/cpu_device_id.h>
  37. #include <asm/microcode_amd.h>
  38. #include <asm/perf_event.h>
  39. #include <asm/microcode.h>
  40. #include <asm/processor.h>
  41. #include <asm/cmdline.h>
  42. #include <asm/setup.h>
  43. #define DRIVER_VERSION "2.2"
  44. static struct microcode_ops *microcode_ops;
  45. static bool dis_ucode_ldr = true;
  46. bool initrd_gone;
  47. LIST_HEAD(microcode_cache);
  48. /*
  49. * Synchronization.
  50. *
  51. * All non cpu-hotplug-callback call sites use:
  52. *
  53. * - microcode_mutex to synchronize with each other;
  54. * - get/put_online_cpus() to synchronize with
  55. * the cpu-hotplug-callback call sites.
  56. *
  57. * We guarantee that only a single cpu is being
  58. * updated at any particular moment of time.
  59. */
  60. static DEFINE_MUTEX(microcode_mutex);
  61. /*
  62. * Serialize late loading so that CPUs get updated one-by-one.
  63. */
  64. static DEFINE_RAW_SPINLOCK(update_lock);
  65. struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
  66. struct cpu_info_ctx {
  67. struct cpu_signature *cpu_sig;
  68. int err;
  69. };
  70. /*
  71. * Those patch levels cannot be updated to newer ones and thus should be final.
  72. */
  73. static u32 final_levels[] = {
  74. 0x01000098,
  75. 0x0100009f,
  76. 0x010000af,
  77. 0, /* T-101 terminator */
  78. };
  79. /*
  80. * Check the current patch level on this CPU.
  81. *
  82. * Returns:
  83. * - true: if update should stop
  84. * - false: otherwise
  85. */
  86. static bool amd_check_current_patch_level(void)
  87. {
  88. u32 lvl, dummy, i;
  89. u32 *levels;
  90. native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
  91. if (IS_ENABLED(CONFIG_X86_32))
  92. levels = (u32 *)__pa_nodebug(&final_levels);
  93. else
  94. levels = final_levels;
  95. for (i = 0; levels[i]; i++) {
  96. if (lvl == levels[i])
  97. return true;
  98. }
  99. return false;
  100. }
  101. static bool __init check_loader_disabled_bsp(void)
  102. {
  103. static const char *__dis_opt_str = "dis_ucode_ldr";
  104. #ifdef CONFIG_X86_32
  105. const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
  106. const char *option = (const char *)__pa_nodebug(__dis_opt_str);
  107. bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
  108. #else /* CONFIG_X86_64 */
  109. const char *cmdline = boot_command_line;
  110. const char *option = __dis_opt_str;
  111. bool *res = &dis_ucode_ldr;
  112. #endif
  113. /*
  114. * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
  115. * completely accurate as xen pv guests don't see that CPUID bit set but
  116. * that's good enough as they don't land on the BSP path anyway.
  117. */
  118. if (native_cpuid_ecx(1) & BIT(31))
  119. return *res;
  120. if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
  121. if (amd_check_current_patch_level())
  122. return *res;
  123. }
  124. if (cmdline_find_option_bool(cmdline, option) <= 0)
  125. *res = false;
  126. return *res;
  127. }
  128. extern struct builtin_fw __start_builtin_fw[];
  129. extern struct builtin_fw __end_builtin_fw[];
  130. bool get_builtin_firmware(struct cpio_data *cd, const char *name)
  131. {
  132. #ifdef CONFIG_FW_LOADER
  133. struct builtin_fw *b_fw;
  134. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
  135. if (!strcmp(name, b_fw->name)) {
  136. cd->size = b_fw->size;
  137. cd->data = b_fw->data;
  138. return true;
  139. }
  140. }
  141. #endif
  142. return false;
  143. }
  144. void __init load_ucode_bsp(void)
  145. {
  146. unsigned int cpuid_1_eax;
  147. bool intel = true;
  148. if (!have_cpuid_p())
  149. return;
  150. cpuid_1_eax = native_cpuid_eax(1);
  151. switch (x86_cpuid_vendor()) {
  152. case X86_VENDOR_INTEL:
  153. if (x86_family(cpuid_1_eax) < 6)
  154. return;
  155. break;
  156. case X86_VENDOR_AMD:
  157. if (x86_family(cpuid_1_eax) < 0x10)
  158. return;
  159. intel = false;
  160. break;
  161. default:
  162. return;
  163. }
  164. if (check_loader_disabled_bsp())
  165. return;
  166. if (intel)
  167. load_ucode_intel_bsp();
  168. else
  169. load_ucode_amd_bsp(cpuid_1_eax);
  170. }
  171. static bool check_loader_disabled_ap(void)
  172. {
  173. #ifdef CONFIG_X86_32
  174. return *((bool *)__pa_nodebug(&dis_ucode_ldr));
  175. #else
  176. return dis_ucode_ldr;
  177. #endif
  178. }
  179. void load_ucode_ap(void)
  180. {
  181. unsigned int cpuid_1_eax;
  182. if (check_loader_disabled_ap())
  183. return;
  184. cpuid_1_eax = native_cpuid_eax(1);
  185. switch (x86_cpuid_vendor()) {
  186. case X86_VENDOR_INTEL:
  187. if (x86_family(cpuid_1_eax) >= 6)
  188. load_ucode_intel_ap();
  189. break;
  190. case X86_VENDOR_AMD:
  191. if (x86_family(cpuid_1_eax) >= 0x10)
  192. load_ucode_amd_ap(cpuid_1_eax);
  193. break;
  194. default:
  195. break;
  196. }
  197. }
  198. static int __init save_microcode_in_initrd(void)
  199. {
  200. struct cpuinfo_x86 *c = &boot_cpu_data;
  201. int ret = -EINVAL;
  202. switch (c->x86_vendor) {
  203. case X86_VENDOR_INTEL:
  204. if (c->x86 >= 6)
  205. ret = save_microcode_in_initrd_intel();
  206. break;
  207. case X86_VENDOR_AMD:
  208. if (c->x86 >= 0x10)
  209. ret = save_microcode_in_initrd_amd(cpuid_eax(1));
  210. break;
  211. default:
  212. break;
  213. }
  214. initrd_gone = true;
  215. return ret;
  216. }
  217. struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
  218. {
  219. #ifdef CONFIG_BLK_DEV_INITRD
  220. unsigned long start = 0;
  221. size_t size;
  222. #ifdef CONFIG_X86_32
  223. struct boot_params *params;
  224. if (use_pa)
  225. params = (struct boot_params *)__pa_nodebug(&boot_params);
  226. else
  227. params = &boot_params;
  228. size = params->hdr.ramdisk_size;
  229. /*
  230. * Set start only if we have an initrd image. We cannot use initrd_start
  231. * because it is not set that early yet.
  232. */
  233. if (size)
  234. start = params->hdr.ramdisk_image;
  235. # else /* CONFIG_X86_64 */
  236. size = (unsigned long)boot_params.ext_ramdisk_size << 32;
  237. size |= boot_params.hdr.ramdisk_size;
  238. if (size) {
  239. start = (unsigned long)boot_params.ext_ramdisk_image << 32;
  240. start |= boot_params.hdr.ramdisk_image;
  241. start += PAGE_OFFSET;
  242. }
  243. # endif
  244. /*
  245. * Fixup the start address: after reserve_initrd() runs, initrd_start
  246. * has the virtual address of the beginning of the initrd. It also
  247. * possibly relocates the ramdisk. In either case, initrd_start contains
  248. * the updated address so use that instead.
  249. *
  250. * initrd_gone is for the hotplug case where we've thrown out initrd
  251. * already.
  252. */
  253. if (!use_pa) {
  254. if (initrd_gone)
  255. return (struct cpio_data){ NULL, 0, "" };
  256. if (initrd_start)
  257. start = initrd_start;
  258. } else {
  259. /*
  260. * The picture with physical addresses is a bit different: we
  261. * need to get the *physical* address to which the ramdisk was
  262. * relocated, i.e., relocated_ramdisk (not initrd_start) and
  263. * since we're running from physical addresses, we need to access
  264. * relocated_ramdisk through its *physical* address too.
  265. */
  266. u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
  267. if (*rr)
  268. start = *rr;
  269. }
  270. return find_cpio_data(path, (void *)start, size, NULL);
  271. #else /* !CONFIG_BLK_DEV_INITRD */
  272. return (struct cpio_data){ NULL, 0, "" };
  273. #endif
  274. }
  275. void reload_early_microcode(void)
  276. {
  277. int vendor, family;
  278. vendor = x86_cpuid_vendor();
  279. family = x86_cpuid_family();
  280. switch (vendor) {
  281. case X86_VENDOR_INTEL:
  282. if (family >= 6)
  283. reload_ucode_intel();
  284. break;
  285. case X86_VENDOR_AMD:
  286. if (family >= 0x10)
  287. reload_ucode_amd();
  288. break;
  289. default:
  290. break;
  291. }
  292. }
  293. static void collect_cpu_info_local(void *arg)
  294. {
  295. struct cpu_info_ctx *ctx = arg;
  296. ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
  297. ctx->cpu_sig);
  298. }
  299. static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
  300. {
  301. struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
  302. int ret;
  303. ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
  304. if (!ret)
  305. ret = ctx.err;
  306. return ret;
  307. }
  308. static int collect_cpu_info(int cpu)
  309. {
  310. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  311. int ret;
  312. memset(uci, 0, sizeof(*uci));
  313. ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
  314. if (!ret)
  315. uci->valid = 1;
  316. return ret;
  317. }
  318. static void apply_microcode_local(void *arg)
  319. {
  320. enum ucode_state *err = arg;
  321. *err = microcode_ops->apply_microcode(smp_processor_id());
  322. }
  323. static int apply_microcode_on_target(int cpu)
  324. {
  325. enum ucode_state err;
  326. int ret;
  327. ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
  328. if (!ret) {
  329. if (err == UCODE_ERROR)
  330. ret = 1;
  331. }
  332. return ret;
  333. }
  334. #ifdef CONFIG_MICROCODE_OLD_INTERFACE
  335. static int do_microcode_update(const void __user *buf, size_t size)
  336. {
  337. int error = 0;
  338. int cpu;
  339. for_each_online_cpu(cpu) {
  340. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  341. enum ucode_state ustate;
  342. if (!uci->valid)
  343. continue;
  344. ustate = microcode_ops->request_microcode_user(cpu, buf, size);
  345. if (ustate == UCODE_ERROR) {
  346. error = -1;
  347. break;
  348. } else if (ustate == UCODE_OK)
  349. apply_microcode_on_target(cpu);
  350. }
  351. return error;
  352. }
  353. static int microcode_open(struct inode *inode, struct file *file)
  354. {
  355. return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
  356. }
  357. static ssize_t microcode_write(struct file *file, const char __user *buf,
  358. size_t len, loff_t *ppos)
  359. {
  360. ssize_t ret = -EINVAL;
  361. if ((len >> PAGE_SHIFT) > totalram_pages) {
  362. pr_err("too much data (max %ld pages)\n", totalram_pages);
  363. return ret;
  364. }
  365. get_online_cpus();
  366. mutex_lock(&microcode_mutex);
  367. if (do_microcode_update(buf, len) == 0)
  368. ret = (ssize_t)len;
  369. if (ret > 0)
  370. perf_check_microcode();
  371. mutex_unlock(&microcode_mutex);
  372. put_online_cpus();
  373. return ret;
  374. }
  375. static const struct file_operations microcode_fops = {
  376. .owner = THIS_MODULE,
  377. .write = microcode_write,
  378. .open = microcode_open,
  379. .llseek = no_llseek,
  380. };
  381. static struct miscdevice microcode_dev = {
  382. .minor = MICROCODE_MINOR,
  383. .name = "microcode",
  384. .nodename = "cpu/microcode",
  385. .fops = &microcode_fops,
  386. };
  387. static int __init microcode_dev_init(void)
  388. {
  389. int error;
  390. error = misc_register(&microcode_dev);
  391. if (error) {
  392. pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
  393. return error;
  394. }
  395. return 0;
  396. }
  397. static void __exit microcode_dev_exit(void)
  398. {
  399. misc_deregister(&microcode_dev);
  400. }
  401. #else
  402. #define microcode_dev_init() 0
  403. #define microcode_dev_exit() do { } while (0)
  404. #endif
  405. /* fake device for request_firmware */
  406. static struct platform_device *microcode_pdev;
  407. /*
  408. * Late loading dance. Why the heavy-handed stomp_machine effort?
  409. *
  410. * - HT siblings must be idle and not execute other code while the other sibling
  411. * is loading microcode in order to avoid any negative interactions caused by
  412. * the loading.
  413. *
  414. * - In addition, microcode update on the cores must be serialized until this
  415. * requirement can be relaxed in the future. Right now, this is conservative
  416. * and good.
  417. */
  418. #define SPINUNIT 100 /* 100 nsec */
  419. static int check_online_cpus(void)
  420. {
  421. unsigned int cpu;
  422. /*
  423. * Make sure all CPUs are online. It's fine for SMT to be disabled if
  424. * all the primary threads are still online.
  425. */
  426. for_each_present_cpu(cpu) {
  427. if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
  428. pr_err("Not all CPUs online, aborting microcode update.\n");
  429. return -EINVAL;
  430. }
  431. }
  432. return 0;
  433. }
  434. static atomic_t late_cpus_in;
  435. static atomic_t late_cpus_out;
  436. static int __wait_for_cpus(atomic_t *t, long long timeout)
  437. {
  438. int all_cpus = num_online_cpus();
  439. atomic_inc(t);
  440. while (atomic_read(t) < all_cpus) {
  441. if (timeout < SPINUNIT) {
  442. pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
  443. all_cpus - atomic_read(t));
  444. return 1;
  445. }
  446. ndelay(SPINUNIT);
  447. timeout -= SPINUNIT;
  448. touch_nmi_watchdog();
  449. }
  450. return 0;
  451. }
  452. /*
  453. * Returns:
  454. * < 0 - on error
  455. * 0 - no update done
  456. * 1 - microcode was updated
  457. */
  458. static int __reload_late(void *info)
  459. {
  460. int cpu = smp_processor_id();
  461. enum ucode_state err;
  462. int ret = 0;
  463. /*
  464. * Wait for all CPUs to arrive. A load will not be attempted unless all
  465. * CPUs show up.
  466. * */
  467. if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
  468. return -1;
  469. raw_spin_lock(&update_lock);
  470. apply_microcode_local(&err);
  471. raw_spin_unlock(&update_lock);
  472. /* siblings return UCODE_OK because their engine got updated already */
  473. if (err > UCODE_NFOUND) {
  474. pr_warn("Error reloading microcode on CPU %d\n", cpu);
  475. ret = -1;
  476. } else if (err == UCODE_UPDATED || err == UCODE_OK) {
  477. ret = 1;
  478. }
  479. /*
  480. * Increase the wait timeout to a safe value here since we're
  481. * serializing the microcode update and that could take a while on a
  482. * large number of CPUs. And that is fine as the *actual* timeout will
  483. * be determined by the last CPU finished updating and thus cut short.
  484. */
  485. if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
  486. panic("Timeout during microcode update!\n");
  487. return ret;
  488. }
  489. /*
  490. * Reload microcode late on all CPUs. Wait for a sec until they
  491. * all gather together.
  492. */
  493. static int microcode_reload_late(void)
  494. {
  495. int ret;
  496. atomic_set(&late_cpus_in, 0);
  497. atomic_set(&late_cpus_out, 0);
  498. ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
  499. if (ret > 0)
  500. microcode_check();
  501. return ret;
  502. }
  503. static ssize_t reload_store(struct device *dev,
  504. struct device_attribute *attr,
  505. const char *buf, size_t size)
  506. {
  507. enum ucode_state tmp_ret = UCODE_OK;
  508. int bsp = boot_cpu_data.cpu_index;
  509. unsigned long val;
  510. ssize_t ret = 0;
  511. ret = kstrtoul(buf, 0, &val);
  512. if (ret)
  513. return ret;
  514. if (val != 1)
  515. return size;
  516. tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
  517. if (tmp_ret != UCODE_NEW)
  518. return size;
  519. get_online_cpus();
  520. ret = check_online_cpus();
  521. if (ret)
  522. goto put;
  523. mutex_lock(&microcode_mutex);
  524. ret = microcode_reload_late();
  525. mutex_unlock(&microcode_mutex);
  526. put:
  527. put_online_cpus();
  528. if (ret >= 0)
  529. ret = size;
  530. return ret;
  531. }
  532. static ssize_t version_show(struct device *dev,
  533. struct device_attribute *attr, char *buf)
  534. {
  535. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  536. return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
  537. }
  538. static ssize_t pf_show(struct device *dev,
  539. struct device_attribute *attr, char *buf)
  540. {
  541. struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
  542. return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
  543. }
  544. static DEVICE_ATTR_WO(reload);
  545. static DEVICE_ATTR(version, 0444, version_show, NULL);
  546. static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL);
  547. static struct attribute *mc_default_attrs[] = {
  548. &dev_attr_version.attr,
  549. &dev_attr_processor_flags.attr,
  550. NULL
  551. };
  552. static const struct attribute_group mc_attr_group = {
  553. .attrs = mc_default_attrs,
  554. .name = "microcode",
  555. };
  556. static void microcode_fini_cpu(int cpu)
  557. {
  558. if (microcode_ops->microcode_fini_cpu)
  559. microcode_ops->microcode_fini_cpu(cpu);
  560. }
  561. static enum ucode_state microcode_resume_cpu(int cpu)
  562. {
  563. if (apply_microcode_on_target(cpu))
  564. return UCODE_ERROR;
  565. pr_debug("CPU%d updated upon resume\n", cpu);
  566. return UCODE_OK;
  567. }
  568. static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
  569. {
  570. enum ucode_state ustate;
  571. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  572. if (uci->valid)
  573. return UCODE_OK;
  574. if (collect_cpu_info(cpu))
  575. return UCODE_ERROR;
  576. /* --dimm. Trigger a delayed update? */
  577. if (system_state != SYSTEM_RUNNING)
  578. return UCODE_NFOUND;
  579. ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
  580. if (ustate == UCODE_NEW) {
  581. pr_debug("CPU%d updated upon init\n", cpu);
  582. apply_microcode_on_target(cpu);
  583. }
  584. return ustate;
  585. }
  586. static enum ucode_state microcode_update_cpu(int cpu)
  587. {
  588. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  589. /* Refresh CPU microcode revision after resume. */
  590. collect_cpu_info(cpu);
  591. if (uci->valid)
  592. return microcode_resume_cpu(cpu);
  593. return microcode_init_cpu(cpu, false);
  594. }
  595. static int mc_device_add(struct device *dev, struct subsys_interface *sif)
  596. {
  597. int err, cpu = dev->id;
  598. if (!cpu_online(cpu))
  599. return 0;
  600. pr_debug("CPU%d added\n", cpu);
  601. err = sysfs_create_group(&dev->kobj, &mc_attr_group);
  602. if (err)
  603. return err;
  604. if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
  605. return -EINVAL;
  606. return err;
  607. }
  608. static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
  609. {
  610. int cpu = dev->id;
  611. if (!cpu_online(cpu))
  612. return;
  613. pr_debug("CPU%d removed\n", cpu);
  614. microcode_fini_cpu(cpu);
  615. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  616. }
  617. static struct subsys_interface mc_cpu_interface = {
  618. .name = "microcode",
  619. .subsys = &cpu_subsys,
  620. .add_dev = mc_device_add,
  621. .remove_dev = mc_device_remove,
  622. };
  623. /**
  624. * mc_bp_resume - Update boot CPU microcode during resume.
  625. */
  626. static void mc_bp_resume(void)
  627. {
  628. int cpu = smp_processor_id();
  629. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  630. if (uci->valid && uci->mc)
  631. microcode_ops->apply_microcode(cpu);
  632. else if (!uci->mc)
  633. reload_early_microcode();
  634. }
  635. static struct syscore_ops mc_syscore_ops = {
  636. .resume = mc_bp_resume,
  637. };
  638. static int mc_cpu_online(unsigned int cpu)
  639. {
  640. struct device *dev;
  641. dev = get_cpu_device(cpu);
  642. microcode_update_cpu(cpu);
  643. pr_debug("CPU%d added\n", cpu);
  644. if (sysfs_create_group(&dev->kobj, &mc_attr_group))
  645. pr_err("Failed to create group for CPU%d\n", cpu);
  646. return 0;
  647. }
  648. static int mc_cpu_down_prep(unsigned int cpu)
  649. {
  650. struct device *dev;
  651. dev = get_cpu_device(cpu);
  652. /* Suspend is in progress, only remove the interface */
  653. sysfs_remove_group(&dev->kobj, &mc_attr_group);
  654. pr_debug("CPU%d removed\n", cpu);
  655. return 0;
  656. }
  657. static struct attribute *cpu_root_microcode_attrs[] = {
  658. &dev_attr_reload.attr,
  659. NULL
  660. };
  661. static const struct attribute_group cpu_root_microcode_group = {
  662. .name = "microcode",
  663. .attrs = cpu_root_microcode_attrs,
  664. };
  665. int __init microcode_init(void)
  666. {
  667. struct cpuinfo_x86 *c = &boot_cpu_data;
  668. int error;
  669. if (dis_ucode_ldr)
  670. return -EINVAL;
  671. if (c->x86_vendor == X86_VENDOR_INTEL)
  672. microcode_ops = init_intel_microcode();
  673. else if (c->x86_vendor == X86_VENDOR_AMD)
  674. microcode_ops = init_amd_microcode();
  675. else
  676. pr_err("no support for this CPU vendor\n");
  677. if (!microcode_ops)
  678. return -ENODEV;
  679. microcode_pdev = platform_device_register_simple("microcode", -1,
  680. NULL, 0);
  681. if (IS_ERR(microcode_pdev))
  682. return PTR_ERR(microcode_pdev);
  683. get_online_cpus();
  684. mutex_lock(&microcode_mutex);
  685. error = subsys_interface_register(&mc_cpu_interface);
  686. if (!error)
  687. perf_check_microcode();
  688. mutex_unlock(&microcode_mutex);
  689. put_online_cpus();
  690. if (error)
  691. goto out_pdev;
  692. error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  693. &cpu_root_microcode_group);
  694. if (error) {
  695. pr_err("Error creating microcode group!\n");
  696. goto out_driver;
  697. }
  698. error = microcode_dev_init();
  699. if (error)
  700. goto out_ucode_group;
  701. register_syscore_ops(&mc_syscore_ops);
  702. cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
  703. mc_cpu_online, mc_cpu_down_prep);
  704. pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
  705. return 0;
  706. out_ucode_group:
  707. sysfs_remove_group(&cpu_subsys.dev_root->kobj,
  708. &cpu_root_microcode_group);
  709. out_driver:
  710. get_online_cpus();
  711. mutex_lock(&microcode_mutex);
  712. subsys_interface_unregister(&mc_cpu_interface);
  713. mutex_unlock(&microcode_mutex);
  714. put_online_cpus();
  715. out_pdev:
  716. platform_device_unregister(microcode_pdev);
  717. return error;
  718. }
  719. fs_initcall(save_microcode_in_initrd);
  720. late_initcall(microcode_init);