security.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Security related flags and so on.
  4. //
  5. // Copyright 2018, Michael Ellerman, IBM Corporation.
  6. #include <linux/kernel.h>
  7. #include <linux/device.h>
  8. #include <linux/seq_buf.h>
  9. #include <asm/debugfs.h>
  10. #include <asm/security_features.h>
  11. #include <asm/setup.h>
  12. unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
  13. bool barrier_nospec_enabled;
  14. static bool no_nospec;
  15. static void enable_barrier_nospec(bool enable)
  16. {
  17. barrier_nospec_enabled = enable;
  18. do_barrier_nospec_fixups(enable);
  19. }
  20. void setup_barrier_nospec(void)
  21. {
  22. bool enable;
  23. /*
  24. * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
  25. * But there's a good reason not to. The two flags we check below are
  26. * both are enabled by default in the kernel, so if the hcall is not
  27. * functional they will be enabled.
  28. * On a system where the host firmware has been updated (so the ori
  29. * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
  30. * not been updated, we would like to enable the barrier. Dropping the
  31. * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
  32. * we potentially enable the barrier on systems where the host firmware
  33. * is not updated, but that's harmless as it's a no-op.
  34. */
  35. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  36. security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
  37. if (!no_nospec)
  38. enable_barrier_nospec(enable);
  39. }
  40. static int __init handle_nospectre_v1(char *p)
  41. {
  42. no_nospec = true;
  43. return 0;
  44. }
  45. early_param("nospectre_v1", handle_nospectre_v1);
  46. #ifdef CONFIG_DEBUG_FS
  47. static int barrier_nospec_set(void *data, u64 val)
  48. {
  49. switch (val) {
  50. case 0:
  51. case 1:
  52. break;
  53. default:
  54. return -EINVAL;
  55. }
  56. if (!!val == !!barrier_nospec_enabled)
  57. return 0;
  58. enable_barrier_nospec(!!val);
  59. return 0;
  60. }
  61. static int barrier_nospec_get(void *data, u64 *val)
  62. {
  63. *val = barrier_nospec_enabled ? 1 : 0;
  64. return 0;
  65. }
  66. DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
  67. barrier_nospec_get, barrier_nospec_set, "%llu\n");
  68. static __init int barrier_nospec_debugfs_init(void)
  69. {
  70. debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
  71. &fops_barrier_nospec);
  72. return 0;
  73. }
  74. device_initcall(barrier_nospec_debugfs_init);
  75. #endif /* CONFIG_DEBUG_FS */
  76. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
  77. {
  78. bool thread_priv;
  79. thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
  80. if (rfi_flush || thread_priv) {
  81. struct seq_buf s;
  82. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  83. seq_buf_printf(&s, "Mitigation: ");
  84. if (rfi_flush)
  85. seq_buf_printf(&s, "RFI Flush");
  86. if (rfi_flush && thread_priv)
  87. seq_buf_printf(&s, ", ");
  88. if (thread_priv)
  89. seq_buf_printf(&s, "L1D private per thread");
  90. seq_buf_printf(&s, "\n");
  91. return s.len;
  92. }
  93. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  94. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  95. return sprintf(buf, "Not affected\n");
  96. return sprintf(buf, "Vulnerable\n");
  97. }
  98. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
  99. {
  100. struct seq_buf s;
  101. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  102. if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
  103. if (barrier_nospec_enabled)
  104. seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
  105. else
  106. seq_buf_printf(&s, "Vulnerable");
  107. if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
  108. seq_buf_printf(&s, ", ori31 speculation barrier enabled");
  109. seq_buf_printf(&s, "\n");
  110. } else
  111. seq_buf_printf(&s, "Not affected\n");
  112. return s.len;
  113. }
  114. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
  115. {
  116. struct seq_buf s;
  117. bool bcs, ccd;
  118. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  119. bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
  120. ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
  121. if (bcs || ccd) {
  122. seq_buf_printf(&s, "Mitigation: ");
  123. if (bcs)
  124. seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
  125. if (bcs && ccd)
  126. seq_buf_printf(&s, ", ");
  127. if (ccd)
  128. seq_buf_printf(&s, "Indirect branch cache disabled");
  129. } else
  130. seq_buf_printf(&s, "Vulnerable");
  131. seq_buf_printf(&s, "\n");
  132. return s.len;
  133. }
  134. #ifdef CONFIG_PPC_BOOK3S_64
  135. /*
  136. * Store-forwarding barrier support.
  137. */
  138. static enum stf_barrier_type stf_enabled_flush_types;
  139. static bool no_stf_barrier;
  140. bool stf_barrier;
  141. static int __init handle_no_stf_barrier(char *p)
  142. {
  143. pr_info("stf-barrier: disabled on command line.");
  144. no_stf_barrier = true;
  145. return 0;
  146. }
  147. early_param("no_stf_barrier", handle_no_stf_barrier);
  148. /* This is the generic flag used by other architectures */
  149. static int __init handle_ssbd(char *p)
  150. {
  151. if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
  152. /* Until firmware tells us, we have the barrier with auto */
  153. return 0;
  154. } else if (strncmp(p, "off", 3) == 0) {
  155. handle_no_stf_barrier(NULL);
  156. return 0;
  157. } else
  158. return 1;
  159. return 0;
  160. }
  161. early_param("spec_store_bypass_disable", handle_ssbd);
  162. /* This is the generic flag used by other architectures */
  163. static int __init handle_no_ssbd(char *p)
  164. {
  165. handle_no_stf_barrier(NULL);
  166. return 0;
  167. }
  168. early_param("nospec_store_bypass_disable", handle_no_ssbd);
  169. static void stf_barrier_enable(bool enable)
  170. {
  171. if (enable)
  172. do_stf_barrier_fixups(stf_enabled_flush_types);
  173. else
  174. do_stf_barrier_fixups(STF_BARRIER_NONE);
  175. stf_barrier = enable;
  176. }
  177. void setup_stf_barrier(void)
  178. {
  179. enum stf_barrier_type type;
  180. bool enable, hv;
  181. hv = cpu_has_feature(CPU_FTR_HVMODE);
  182. /* Default to fallback in case fw-features are not available */
  183. if (cpu_has_feature(CPU_FTR_ARCH_300))
  184. type = STF_BARRIER_EIEIO;
  185. else if (cpu_has_feature(CPU_FTR_ARCH_207S))
  186. type = STF_BARRIER_SYNC_ORI;
  187. else if (cpu_has_feature(CPU_FTR_ARCH_206))
  188. type = STF_BARRIER_FALLBACK;
  189. else
  190. type = STF_BARRIER_NONE;
  191. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  192. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
  193. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
  194. if (type == STF_BARRIER_FALLBACK) {
  195. pr_info("stf-barrier: fallback barrier available\n");
  196. } else if (type == STF_BARRIER_SYNC_ORI) {
  197. pr_info("stf-barrier: hwsync barrier available\n");
  198. } else if (type == STF_BARRIER_EIEIO) {
  199. pr_info("stf-barrier: eieio barrier available\n");
  200. }
  201. stf_enabled_flush_types = type;
  202. if (!no_stf_barrier)
  203. stf_barrier_enable(enable);
  204. }
  205. ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
  206. {
  207. if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
  208. const char *type;
  209. switch (stf_enabled_flush_types) {
  210. case STF_BARRIER_EIEIO:
  211. type = "eieio";
  212. break;
  213. case STF_BARRIER_SYNC_ORI:
  214. type = "hwsync";
  215. break;
  216. case STF_BARRIER_FALLBACK:
  217. type = "fallback";
  218. break;
  219. default:
  220. type = "unknown";
  221. }
  222. return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
  223. }
  224. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  225. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  226. return sprintf(buf, "Not affected\n");
  227. return sprintf(buf, "Vulnerable\n");
  228. }
  229. #ifdef CONFIG_DEBUG_FS
  230. static int stf_barrier_set(void *data, u64 val)
  231. {
  232. bool enable;
  233. if (val == 1)
  234. enable = true;
  235. else if (val == 0)
  236. enable = false;
  237. else
  238. return -EINVAL;
  239. /* Only do anything if we're changing state */
  240. if (enable != stf_barrier)
  241. stf_barrier_enable(enable);
  242. return 0;
  243. }
  244. static int stf_barrier_get(void *data, u64 *val)
  245. {
  246. *val = stf_barrier ? 1 : 0;
  247. return 0;
  248. }
  249. DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
  250. static __init int stf_barrier_debugfs_init(void)
  251. {
  252. debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
  253. return 0;
  254. }
  255. device_initcall(stf_barrier_debugfs_init);
  256. #endif /* CONFIG_DEBUG_FS */
  257. #endif /* CONFIG_PPC_BOOK3S_64 */