security.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Security related flags and so on.
  4. //
  5. // Copyright 2018, Michael Ellerman, IBM Corporation.
  6. #include <linux/cpu.h>
  7. #include <linux/kernel.h>
  8. #include <linux/device.h>
  9. #include <linux/seq_buf.h>
  10. #include <asm/asm-prototypes.h>
  11. #include <asm/code-patching.h>
  12. #include <asm/debugfs.h>
  13. #include <asm/security_features.h>
  14. #include <asm/setup.h>
  15. unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
  16. enum count_cache_flush_type {
  17. COUNT_CACHE_FLUSH_NONE = 0x1,
  18. COUNT_CACHE_FLUSH_SW = 0x2,
  19. COUNT_CACHE_FLUSH_HW = 0x4,
  20. };
  21. static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
  22. bool barrier_nospec_enabled;
  23. static bool no_nospec;
  24. static bool btb_flush_enabled;
  25. #ifdef CONFIG_PPC_FSL_BOOK3E
  26. static bool no_spectrev2;
  27. #endif
  28. static void enable_barrier_nospec(bool enable)
  29. {
  30. barrier_nospec_enabled = enable;
  31. do_barrier_nospec_fixups(enable);
  32. }
  33. void setup_barrier_nospec(void)
  34. {
  35. bool enable;
  36. /*
  37. * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
  38. * But there's a good reason not to. The two flags we check below are
  39. * both are enabled by default in the kernel, so if the hcall is not
  40. * functional they will be enabled.
  41. * On a system where the host firmware has been updated (so the ori
  42. * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
  43. * not been updated, we would like to enable the barrier. Dropping the
  44. * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
  45. * we potentially enable the barrier on systems where the host firmware
  46. * is not updated, but that's harmless as it's a no-op.
  47. */
  48. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  49. security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
  50. if (!no_nospec && !cpu_mitigations_off())
  51. enable_barrier_nospec(enable);
  52. }
  53. static int __init handle_nospectre_v1(char *p)
  54. {
  55. no_nospec = true;
  56. return 0;
  57. }
  58. early_param("nospectre_v1", handle_nospectre_v1);
  59. #ifdef CONFIG_DEBUG_FS
  60. static int barrier_nospec_set(void *data, u64 val)
  61. {
  62. switch (val) {
  63. case 0:
  64. case 1:
  65. break;
  66. default:
  67. return -EINVAL;
  68. }
  69. if (!!val == !!barrier_nospec_enabled)
  70. return 0;
  71. enable_barrier_nospec(!!val);
  72. return 0;
  73. }
  74. static int barrier_nospec_get(void *data, u64 *val)
  75. {
  76. *val = barrier_nospec_enabled ? 1 : 0;
  77. return 0;
  78. }
  79. DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
  80. barrier_nospec_get, barrier_nospec_set, "%llu\n");
  81. static __init int barrier_nospec_debugfs_init(void)
  82. {
  83. debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
  84. &fops_barrier_nospec);
  85. return 0;
  86. }
  87. device_initcall(barrier_nospec_debugfs_init);
  88. #endif /* CONFIG_DEBUG_FS */
  89. #ifdef CONFIG_PPC_FSL_BOOK3E
  90. static int __init handle_nospectre_v2(char *p)
  91. {
  92. no_spectrev2 = true;
  93. return 0;
  94. }
  95. early_param("nospectre_v2", handle_nospectre_v2);
  96. void setup_spectre_v2(void)
  97. {
  98. if (no_spectrev2 || cpu_mitigations_off())
  99. do_btb_flush_fixups();
  100. else
  101. btb_flush_enabled = true;
  102. }
  103. #endif /* CONFIG_PPC_FSL_BOOK3E */
  104. #ifdef CONFIG_PPC_BOOK3S_64
  105. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
  106. {
  107. bool thread_priv;
  108. thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
  109. if (rfi_flush || thread_priv) {
  110. struct seq_buf s;
  111. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  112. seq_buf_printf(&s, "Mitigation: ");
  113. if (rfi_flush)
  114. seq_buf_printf(&s, "RFI Flush");
  115. if (rfi_flush && thread_priv)
  116. seq_buf_printf(&s, ", ");
  117. if (thread_priv)
  118. seq_buf_printf(&s, "L1D private per thread");
  119. seq_buf_printf(&s, "\n");
  120. return s.len;
  121. }
  122. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  123. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  124. return sprintf(buf, "Not affected\n");
  125. return sprintf(buf, "Vulnerable\n");
  126. }
  127. #endif
  128. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
  129. {
  130. struct seq_buf s;
  131. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  132. if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
  133. if (barrier_nospec_enabled)
  134. seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
  135. else
  136. seq_buf_printf(&s, "Vulnerable");
  137. if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
  138. seq_buf_printf(&s, ", ori31 speculation barrier enabled");
  139. seq_buf_printf(&s, "\n");
  140. } else
  141. seq_buf_printf(&s, "Not affected\n");
  142. return s.len;
  143. }
  144. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
  145. {
  146. struct seq_buf s;
  147. bool bcs, ccd;
  148. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  149. bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
  150. ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
  151. if (bcs || ccd) {
  152. seq_buf_printf(&s, "Mitigation: ");
  153. if (bcs)
  154. seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
  155. if (bcs && ccd)
  156. seq_buf_printf(&s, ", ");
  157. if (ccd)
  158. seq_buf_printf(&s, "Indirect branch cache disabled");
  159. } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
  160. seq_buf_printf(&s, "Mitigation: Software count cache flush");
  161. if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
  162. seq_buf_printf(&s, " (hardware accelerated)");
  163. } else if (btb_flush_enabled) {
  164. seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
  165. } else {
  166. seq_buf_printf(&s, "Vulnerable");
  167. }
  168. seq_buf_printf(&s, "\n");
  169. return s.len;
  170. }
  171. #ifdef CONFIG_PPC_BOOK3S_64
  172. /*
  173. * Store-forwarding barrier support.
  174. */
  175. static enum stf_barrier_type stf_enabled_flush_types;
  176. static bool no_stf_barrier;
  177. bool stf_barrier;
  178. static int __init handle_no_stf_barrier(char *p)
  179. {
  180. pr_info("stf-barrier: disabled on command line.");
  181. no_stf_barrier = true;
  182. return 0;
  183. }
  184. early_param("no_stf_barrier", handle_no_stf_barrier);
  185. /* This is the generic flag used by other architectures */
  186. static int __init handle_ssbd(char *p)
  187. {
  188. if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
  189. /* Until firmware tells us, we have the barrier with auto */
  190. return 0;
  191. } else if (strncmp(p, "off", 3) == 0) {
  192. handle_no_stf_barrier(NULL);
  193. return 0;
  194. } else
  195. return 1;
  196. return 0;
  197. }
  198. early_param("spec_store_bypass_disable", handle_ssbd);
  199. /* This is the generic flag used by other architectures */
  200. static int __init handle_no_ssbd(char *p)
  201. {
  202. handle_no_stf_barrier(NULL);
  203. return 0;
  204. }
  205. early_param("nospec_store_bypass_disable", handle_no_ssbd);
  206. static void stf_barrier_enable(bool enable)
  207. {
  208. if (enable)
  209. do_stf_barrier_fixups(stf_enabled_flush_types);
  210. else
  211. do_stf_barrier_fixups(STF_BARRIER_NONE);
  212. stf_barrier = enable;
  213. }
  214. void setup_stf_barrier(void)
  215. {
  216. enum stf_barrier_type type;
  217. bool enable, hv;
  218. hv = cpu_has_feature(CPU_FTR_HVMODE);
  219. /* Default to fallback in case fw-features are not available */
  220. if (cpu_has_feature(CPU_FTR_ARCH_300))
  221. type = STF_BARRIER_EIEIO;
  222. else if (cpu_has_feature(CPU_FTR_ARCH_207S))
  223. type = STF_BARRIER_SYNC_ORI;
  224. else if (cpu_has_feature(CPU_FTR_ARCH_206))
  225. type = STF_BARRIER_FALLBACK;
  226. else
  227. type = STF_BARRIER_NONE;
  228. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  229. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
  230. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
  231. if (type == STF_BARRIER_FALLBACK) {
  232. pr_info("stf-barrier: fallback barrier available\n");
  233. } else if (type == STF_BARRIER_SYNC_ORI) {
  234. pr_info("stf-barrier: hwsync barrier available\n");
  235. } else if (type == STF_BARRIER_EIEIO) {
  236. pr_info("stf-barrier: eieio barrier available\n");
  237. }
  238. stf_enabled_flush_types = type;
  239. if (!no_stf_barrier && !cpu_mitigations_off())
  240. stf_barrier_enable(enable);
  241. }
  242. ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
  243. {
  244. if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
  245. const char *type;
  246. switch (stf_enabled_flush_types) {
  247. case STF_BARRIER_EIEIO:
  248. type = "eieio";
  249. break;
  250. case STF_BARRIER_SYNC_ORI:
  251. type = "hwsync";
  252. break;
  253. case STF_BARRIER_FALLBACK:
  254. type = "fallback";
  255. break;
  256. default:
  257. type = "unknown";
  258. }
  259. return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
  260. }
  261. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  262. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  263. return sprintf(buf, "Not affected\n");
  264. return sprintf(buf, "Vulnerable\n");
  265. }
  266. #ifdef CONFIG_DEBUG_FS
  267. static int stf_barrier_set(void *data, u64 val)
  268. {
  269. bool enable;
  270. if (val == 1)
  271. enable = true;
  272. else if (val == 0)
  273. enable = false;
  274. else
  275. return -EINVAL;
  276. /* Only do anything if we're changing state */
  277. if (enable != stf_barrier)
  278. stf_barrier_enable(enable);
  279. return 0;
  280. }
  281. static int stf_barrier_get(void *data, u64 *val)
  282. {
  283. *val = stf_barrier ? 1 : 0;
  284. return 0;
  285. }
  286. DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
  287. static __init int stf_barrier_debugfs_init(void)
  288. {
  289. debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
  290. return 0;
  291. }
  292. device_initcall(stf_barrier_debugfs_init);
  293. #endif /* CONFIG_DEBUG_FS */
  294. static void toggle_count_cache_flush(bool enable)
  295. {
  296. if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
  297. patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
  298. count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
  299. pr_info("count-cache-flush: software flush disabled.\n");
  300. return;
  301. }
  302. patch_branch_site(&patch__call_flush_count_cache,
  303. (u64)&flush_count_cache, BRANCH_SET_LINK);
  304. if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
  305. count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
  306. pr_info("count-cache-flush: full software flush sequence enabled.\n");
  307. return;
  308. }
  309. patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
  310. count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
  311. pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
  312. }
  313. void setup_count_cache_flush(void)
  314. {
  315. toggle_count_cache_flush(true);
  316. }
  317. #ifdef CONFIG_DEBUG_FS
  318. static int count_cache_flush_set(void *data, u64 val)
  319. {
  320. bool enable;
  321. if (val == 1)
  322. enable = true;
  323. else if (val == 0)
  324. enable = false;
  325. else
  326. return -EINVAL;
  327. toggle_count_cache_flush(enable);
  328. return 0;
  329. }
  330. static int count_cache_flush_get(void *data, u64 *val)
  331. {
  332. if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
  333. *val = 0;
  334. else
  335. *val = 1;
  336. return 0;
  337. }
  338. DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
  339. count_cache_flush_set, "%llu\n");
  340. static __init int count_cache_flush_debugfs_init(void)
  341. {
  342. debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
  343. NULL, &fops_count_cache_flush);
  344. return 0;
  345. }
  346. device_initcall(count_cache_flush_debugfs_init);
  347. #endif /* CONFIG_DEBUG_FS */
  348. #endif /* CONFIG_PPC_BOOK3S_64 */