bugs.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1994 Linus Torvalds
  4. *
  5. * Cyrix stuff, June 1998 by:
  6. * - Rafael R. Reilova (moved everything from head.S),
  7. * <rreilova@ececs.uc.edu>
  8. * - Channing Corn (tests & fixes),
  9. * - Andrew D. Balsa (code cleanup).
  10. */
  11. #include <linux/init.h>
  12. #include <linux/utsname.h>
  13. #include <linux/cpu.h>
  14. #include <linux/module.h>
  15. #include <asm/nospec-branch.h>
  16. #include <asm/cmdline.h>
  17. #include <asm/bugs.h>
  18. #include <asm/processor.h>
  19. #include <asm/processor-flags.h>
  20. #include <asm/fpu/internal.h>
  21. #include <asm/msr.h>
  22. #include <asm/paravirt.h>
  23. #include <asm/alternative.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/set_memory.h>
  26. #include <asm/intel-family.h>
  27. static void __init spectre_v2_select_mitigation(void);
  28. static void __init ssb_select_mitigation(void);
  29. /*
  30. * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
  31. * writes to SPEC_CTRL contain whatever reserved bits have been set.
  32. */
  33. static u64 __ro_after_init x86_spec_ctrl_base;
  34. void __init check_bugs(void)
  35. {
  36. identify_boot_cpu();
  37. if (!IS_ENABLED(CONFIG_SMP)) {
  38. pr_info("CPU: ");
  39. print_cpu_info(&boot_cpu_data);
  40. }
  41. /*
  42. * Read the SPEC_CTRL MSR to account for reserved bits which may
  43. * have unknown values.
  44. */
  45. if (boot_cpu_has(X86_FEATURE_IBRS))
  46. rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  47. /* Select the proper spectre mitigation before patching alternatives */
  48. spectre_v2_select_mitigation();
  49. /*
  50. * Select proper mitigation for any exposure to the Speculative Store
  51. * Bypass vulnerability.
  52. */
  53. ssb_select_mitigation();
  54. #ifdef CONFIG_X86_32
  55. /*
  56. * Check whether we are able to run this kernel safely on SMP.
  57. *
  58. * - i386 is no longer supported.
  59. * - In order to run on anything without a TSC, we need to be
  60. * compiled for a i486.
  61. */
  62. if (boot_cpu_data.x86 < 4)
  63. panic("Kernel requires i486+ for 'invlpg' and other features");
  64. init_utsname()->machine[1] =
  65. '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
  66. alternative_instructions();
  67. fpu__init_check_bugs();
  68. #else /* CONFIG_X86_64 */
  69. alternative_instructions();
  70. /*
  71. * Make sure the first 2MB area is not mapped by huge pages
  72. * There are typically fixed size MTRRs in there and overlapping
  73. * MTRRs into large pages causes slow downs.
  74. *
  75. * Right now we don't do that with gbpages because there seems
  76. * very little benefit for that case.
  77. */
  78. if (!direct_gbpages)
  79. set_memory_4k((unsigned long)__va(0), 1);
  80. #endif
  81. }
  82. /* The kernel command line selection */
  83. enum spectre_v2_mitigation_cmd {
  84. SPECTRE_V2_CMD_NONE,
  85. SPECTRE_V2_CMD_AUTO,
  86. SPECTRE_V2_CMD_FORCE,
  87. SPECTRE_V2_CMD_RETPOLINE,
  88. SPECTRE_V2_CMD_RETPOLINE_GENERIC,
  89. SPECTRE_V2_CMD_RETPOLINE_AMD,
  90. };
  91. static const char *spectre_v2_strings[] = {
  92. [SPECTRE_V2_NONE] = "Vulnerable",
  93. [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
  94. [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
  95. [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
  96. [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
  97. };
  98. #undef pr_fmt
  99. #define pr_fmt(fmt) "Spectre V2 : " fmt
  100. static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
  101. void x86_spec_ctrl_set(u64 val)
  102. {
  103. if (val & ~SPEC_CTRL_IBRS)
  104. WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
  105. else
  106. wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
  107. }
  108. EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
  109. u64 x86_spec_ctrl_get_default(void)
  110. {
  111. return x86_spec_ctrl_base;
  112. }
  113. EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
  114. void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
  115. {
  116. if (!boot_cpu_has(X86_FEATURE_IBRS))
  117. return;
  118. if (x86_spec_ctrl_base != guest_spec_ctrl)
  119. wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
  120. }
  121. EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
  122. void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
  123. {
  124. if (!boot_cpu_has(X86_FEATURE_IBRS))
  125. return;
  126. if (x86_spec_ctrl_base != guest_spec_ctrl)
  127. wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  128. }
  129. EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
  130. #ifdef RETPOLINE
  131. static bool spectre_v2_bad_module;
  132. bool retpoline_module_ok(bool has_retpoline)
  133. {
  134. if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
  135. return true;
  136. pr_err("System may be vulnerable to spectre v2\n");
  137. spectre_v2_bad_module = true;
  138. return false;
  139. }
  140. static inline const char *spectre_v2_module_string(void)
  141. {
  142. return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
  143. }
  144. #else
  145. static inline const char *spectre_v2_module_string(void) { return ""; }
  146. #endif
  147. static void __init spec2_print_if_insecure(const char *reason)
  148. {
  149. if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
  150. pr_info("%s selected on command line.\n", reason);
  151. }
  152. static void __init spec2_print_if_secure(const char *reason)
  153. {
  154. if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
  155. pr_info("%s selected on command line.\n", reason);
  156. }
  157. static inline bool retp_compiler(void)
  158. {
  159. return __is_defined(RETPOLINE);
  160. }
  161. static inline bool match_option(const char *arg, int arglen, const char *opt)
  162. {
  163. int len = strlen(opt);
  164. return len == arglen && !strncmp(arg, opt, len);
  165. }
  166. static const struct {
  167. const char *option;
  168. enum spectre_v2_mitigation_cmd cmd;
  169. bool secure;
  170. } mitigation_options[] = {
  171. { "off", SPECTRE_V2_CMD_NONE, false },
  172. { "on", SPECTRE_V2_CMD_FORCE, true },
  173. { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
  174. { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
  175. { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
  176. { "auto", SPECTRE_V2_CMD_AUTO, false },
  177. };
  178. static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
  179. {
  180. char arg[20];
  181. int ret, i;
  182. enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
  183. if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
  184. return SPECTRE_V2_CMD_NONE;
  185. else {
  186. ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
  187. if (ret < 0)
  188. return SPECTRE_V2_CMD_AUTO;
  189. for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
  190. if (!match_option(arg, ret, mitigation_options[i].option))
  191. continue;
  192. cmd = mitigation_options[i].cmd;
  193. break;
  194. }
  195. if (i >= ARRAY_SIZE(mitigation_options)) {
  196. pr_err("unknown option (%s). Switching to AUTO select\n", arg);
  197. return SPECTRE_V2_CMD_AUTO;
  198. }
  199. }
  200. if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
  201. cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
  202. cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
  203. !IS_ENABLED(CONFIG_RETPOLINE)) {
  204. pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
  205. return SPECTRE_V2_CMD_AUTO;
  206. }
  207. if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
  208. boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
  209. pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
  210. return SPECTRE_V2_CMD_AUTO;
  211. }
  212. if (mitigation_options[i].secure)
  213. spec2_print_if_secure(mitigation_options[i].option);
  214. else
  215. spec2_print_if_insecure(mitigation_options[i].option);
  216. return cmd;
  217. }
  218. /* Check for Skylake-like CPUs (for RSB handling) */
  219. static bool __init is_skylake_era(void)
  220. {
  221. if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
  222. boot_cpu_data.x86 == 6) {
  223. switch (boot_cpu_data.x86_model) {
  224. case INTEL_FAM6_SKYLAKE_MOBILE:
  225. case INTEL_FAM6_SKYLAKE_DESKTOP:
  226. case INTEL_FAM6_SKYLAKE_X:
  227. case INTEL_FAM6_KABYLAKE_MOBILE:
  228. case INTEL_FAM6_KABYLAKE_DESKTOP:
  229. return true;
  230. }
  231. }
  232. return false;
  233. }
  234. static void __init spectre_v2_select_mitigation(void)
  235. {
  236. enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
  237. enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
  238. /*
  239. * If the CPU is not affected and the command line mode is NONE or AUTO
  240. * then nothing to do.
  241. */
  242. if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
  243. (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
  244. return;
  245. switch (cmd) {
  246. case SPECTRE_V2_CMD_NONE:
  247. return;
  248. case SPECTRE_V2_CMD_FORCE:
  249. case SPECTRE_V2_CMD_AUTO:
  250. if (IS_ENABLED(CONFIG_RETPOLINE))
  251. goto retpoline_auto;
  252. break;
  253. case SPECTRE_V2_CMD_RETPOLINE_AMD:
  254. if (IS_ENABLED(CONFIG_RETPOLINE))
  255. goto retpoline_amd;
  256. break;
  257. case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
  258. if (IS_ENABLED(CONFIG_RETPOLINE))
  259. goto retpoline_generic;
  260. break;
  261. case SPECTRE_V2_CMD_RETPOLINE:
  262. if (IS_ENABLED(CONFIG_RETPOLINE))
  263. goto retpoline_auto;
  264. break;
  265. }
  266. pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
  267. return;
  268. retpoline_auto:
  269. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
  270. retpoline_amd:
  271. if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
  272. pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
  273. goto retpoline_generic;
  274. }
  275. mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
  276. SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
  277. setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
  278. setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
  279. } else {
  280. retpoline_generic:
  281. mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
  282. SPECTRE_V2_RETPOLINE_MINIMAL;
  283. setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
  284. }
  285. spectre_v2_enabled = mode;
  286. pr_info("%s\n", spectre_v2_strings[mode]);
  287. /*
  288. * If neither SMEP nor PTI are available, there is a risk of
  289. * hitting userspace addresses in the RSB after a context switch
  290. * from a shallow call stack to a deeper one. To prevent this fill
  291. * the entire RSB, even when using IBRS.
  292. *
  293. * Skylake era CPUs have a separate issue with *underflow* of the
  294. * RSB, when they will predict 'ret' targets from the generic BTB.
  295. * The proper mitigation for this is IBRS. If IBRS is not supported
  296. * or deactivated in favour of retpolines the RSB fill on context
  297. * switch is required.
  298. */
  299. if ((!boot_cpu_has(X86_FEATURE_PTI) &&
  300. !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
  301. setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
  302. pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
  303. }
  304. /* Initialize Indirect Branch Prediction Barrier if supported */
  305. if (boot_cpu_has(X86_FEATURE_IBPB)) {
  306. setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
  307. pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
  308. }
  309. /*
  310. * Retpoline means the kernel is safe because it has no indirect
  311. * branches. But firmware isn't, so use IBRS to protect that.
  312. */
  313. if (boot_cpu_has(X86_FEATURE_IBRS)) {
  314. setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
  315. pr_info("Enabling Restricted Speculation for firmware calls\n");
  316. }
  317. }
  318. #undef pr_fmt
  319. #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
  320. static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
  321. /* The kernel command line selection */
  322. enum ssb_mitigation_cmd {
  323. SPEC_STORE_BYPASS_CMD_NONE,
  324. SPEC_STORE_BYPASS_CMD_AUTO,
  325. SPEC_STORE_BYPASS_CMD_ON,
  326. };
  327. static const char *ssb_strings[] = {
  328. [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
  329. [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
  330. };
  331. static const struct {
  332. const char *option;
  333. enum ssb_mitigation_cmd cmd;
  334. } ssb_mitigation_options[] = {
  335. { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
  336. { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
  337. { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
  338. };
  339. static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
  340. {
  341. enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
  342. char arg[20];
  343. int ret, i;
  344. if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
  345. return SPEC_STORE_BYPASS_CMD_NONE;
  346. } else {
  347. ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
  348. arg, sizeof(arg));
  349. if (ret < 0)
  350. return SPEC_STORE_BYPASS_CMD_AUTO;
  351. for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
  352. if (!match_option(arg, ret, ssb_mitigation_options[i].option))
  353. continue;
  354. cmd = ssb_mitigation_options[i].cmd;
  355. break;
  356. }
  357. if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
  358. pr_err("unknown option (%s). Switching to AUTO select\n", arg);
  359. return SPEC_STORE_BYPASS_CMD_AUTO;
  360. }
  361. }
  362. return cmd;
  363. }
  364. static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
  365. {
  366. enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
  367. enum ssb_mitigation_cmd cmd;
  368. if (!boot_cpu_has(X86_FEATURE_RDS))
  369. return mode;
  370. cmd = ssb_parse_cmdline();
  371. if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
  372. (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
  373. cmd == SPEC_STORE_BYPASS_CMD_AUTO))
  374. return mode;
  375. switch (cmd) {
  376. case SPEC_STORE_BYPASS_CMD_AUTO:
  377. case SPEC_STORE_BYPASS_CMD_ON:
  378. mode = SPEC_STORE_BYPASS_DISABLE;
  379. break;
  380. case SPEC_STORE_BYPASS_CMD_NONE:
  381. break;
  382. }
  383. if (mode != SPEC_STORE_BYPASS_NONE)
  384. setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
  385. return mode;
  386. }
  387. static void ssb_select_mitigation()
  388. {
  389. ssb_mode = __ssb_select_mitigation();
  390. if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
  391. pr_info("%s\n", ssb_strings[ssb_mode]);
  392. }
  393. #undef pr_fmt
  394. #ifdef CONFIG_SYSFS
  395. ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
  396. char *buf, unsigned int bug)
  397. {
  398. if (!boot_cpu_has_bug(bug))
  399. return sprintf(buf, "Not affected\n");
  400. switch (bug) {
  401. case X86_BUG_CPU_MELTDOWN:
  402. if (boot_cpu_has(X86_FEATURE_PTI))
  403. return sprintf(buf, "Mitigation: PTI\n");
  404. break;
  405. case X86_BUG_SPECTRE_V1:
  406. return sprintf(buf, "Mitigation: __user pointer sanitization\n");
  407. case X86_BUG_SPECTRE_V2:
  408. return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
  409. boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
  410. boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
  411. spectre_v2_module_string());
  412. case X86_BUG_SPEC_STORE_BYPASS:
  413. return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
  414. default:
  415. break;
  416. }
  417. return sprintf(buf, "Vulnerable\n");
  418. }
  419. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
  420. {
  421. return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
  422. }
  423. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
  424. {
  425. return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
  426. }
  427. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
  428. {
  429. return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
  430. }
  431. ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
  432. {
  433. return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
  434. }
  435. #endif