security.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Security related flags and so on.
  4. //
  5. // Copyright 2018, Michael Ellerman, IBM Corporation.
  6. #include <linux/cpu.h>
  7. #include <linux/kernel.h>
  8. #include <linux/device.h>
  9. #include <linux/memblock.h>
  10. #include <linux/nospec.h>
  11. #include <linux/prctl.h>
  12. #include <linux/seq_buf.h>
  13. #include <linux/debugfs.h>
  14. #include <asm/asm-prototypes.h>
  15. #include <asm/code-patching.h>
  16. #include <asm/security_features.h>
  17. #include <asm/sections.h>
  18. #include <asm/setup.h>
  19. #include <asm/inst.h>
  20. #include "setup.h"
  21. u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
  22. enum branch_cache_flush_type {
  23. BRANCH_CACHE_FLUSH_NONE = 0x1,
  24. BRANCH_CACHE_FLUSH_SW = 0x2,
  25. BRANCH_CACHE_FLUSH_HW = 0x4,
  26. };
  27. static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
  28. static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
  29. bool barrier_nospec_enabled;
  30. static bool no_nospec;
  31. static bool btb_flush_enabled;
  32. #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
  33. static bool no_spectrev2;
  34. #endif
  35. static void enable_barrier_nospec(bool enable)
  36. {
  37. barrier_nospec_enabled = enable;
  38. do_barrier_nospec_fixups(enable);
  39. }
  40. void __init setup_barrier_nospec(void)
  41. {
  42. bool enable;
  43. /*
  44. * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
  45. * But there's a good reason not to. The two flags we check below are
  46. * both are enabled by default in the kernel, so if the hcall is not
  47. * functional they will be enabled.
  48. * On a system where the host firmware has been updated (so the ori
  49. * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
  50. * not been updated, we would like to enable the barrier. Dropping the
  51. * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
  52. * we potentially enable the barrier on systems where the host firmware
  53. * is not updated, but that's harmless as it's a no-op.
  54. */
  55. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  56. security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
  57. if (!no_nospec && !cpu_mitigations_off())
  58. enable_barrier_nospec(enable);
  59. }
  60. static int __init handle_nospectre_v1(char *p)
  61. {
  62. no_nospec = true;
  63. return 0;
  64. }
  65. early_param("nospectre_v1", handle_nospectre_v1);
  66. #ifdef CONFIG_DEBUG_FS
  67. static int barrier_nospec_set(void *data, u64 val)
  68. {
  69. switch (val) {
  70. case 0:
  71. case 1:
  72. break;
  73. default:
  74. return -EINVAL;
  75. }
  76. if (!!val == !!barrier_nospec_enabled)
  77. return 0;
  78. enable_barrier_nospec(!!val);
  79. return 0;
  80. }
  81. static int barrier_nospec_get(void *data, u64 *val)
  82. {
  83. *val = barrier_nospec_enabled ? 1 : 0;
  84. return 0;
  85. }
  86. DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get,
  87. barrier_nospec_set, "%llu\n");
  88. static __init int barrier_nospec_debugfs_init(void)
  89. {
  90. debugfs_create_file_unsafe("barrier_nospec", 0600,
  91. arch_debugfs_dir, NULL,
  92. &fops_barrier_nospec);
  93. return 0;
  94. }
  95. device_initcall(barrier_nospec_debugfs_init);
  96. static __init int security_feature_debugfs_init(void)
  97. {
  98. debugfs_create_x64("security_features", 0400, arch_debugfs_dir,
  99. &powerpc_security_features);
  100. return 0;
  101. }
  102. device_initcall(security_feature_debugfs_init);
  103. #endif /* CONFIG_DEBUG_FS */
  104. #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
  105. static int __init handle_nospectre_v2(char *p)
  106. {
  107. no_spectrev2 = true;
  108. return 0;
  109. }
  110. early_param("nospectre_v2", handle_nospectre_v2);
  111. #endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */
  112. #ifdef CONFIG_PPC_E500
  113. void __init setup_spectre_v2(void)
  114. {
  115. if (no_spectrev2 || cpu_mitigations_off())
  116. do_btb_flush_fixups();
  117. else
  118. btb_flush_enabled = true;
  119. }
  120. #endif /* CONFIG_PPC_E500 */
  121. #ifdef CONFIG_PPC_BOOK3S_64
  122. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
  123. {
  124. bool thread_priv;
  125. thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
  126. if (rfi_flush) {
  127. struct seq_buf s;
  128. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  129. seq_buf_printf(&s, "Mitigation: RFI Flush");
  130. if (thread_priv)
  131. seq_buf_printf(&s, ", L1D private per thread");
  132. seq_buf_printf(&s, "\n");
  133. return s.len;
  134. }
  135. if (thread_priv)
  136. return sprintf(buf, "Vulnerable: L1D private per thread\n");
  137. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  138. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  139. return sprintf(buf, "Not affected\n");
  140. return sprintf(buf, "Vulnerable\n");
  141. }
  142. ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
  143. {
  144. return cpu_show_meltdown(dev, attr, buf);
  145. }
  146. #endif
  147. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
  148. {
  149. struct seq_buf s;
  150. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  151. if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
  152. if (barrier_nospec_enabled)
  153. seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
  154. else
  155. seq_buf_printf(&s, "Vulnerable");
  156. if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
  157. seq_buf_printf(&s, ", ori31 speculation barrier enabled");
  158. seq_buf_printf(&s, "\n");
  159. } else
  160. seq_buf_printf(&s, "Not affected\n");
  161. return s.len;
  162. }
  163. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
  164. {
  165. struct seq_buf s;
  166. bool bcs, ccd;
  167. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  168. bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
  169. ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
  170. if (bcs || ccd) {
  171. seq_buf_printf(&s, "Mitigation: ");
  172. if (bcs)
  173. seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
  174. if (bcs && ccd)
  175. seq_buf_printf(&s, ", ");
  176. if (ccd)
  177. seq_buf_printf(&s, "Indirect branch cache disabled");
  178. } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
  179. seq_buf_printf(&s, "Mitigation: Software count cache flush");
  180. if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW)
  181. seq_buf_printf(&s, " (hardware accelerated)");
  182. } else if (btb_flush_enabled) {
  183. seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
  184. } else {
  185. seq_buf_printf(&s, "Vulnerable");
  186. }
  187. if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
  188. if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
  189. seq_buf_printf(&s, ", Software link stack flush");
  190. if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW)
  191. seq_buf_printf(&s, " (hardware accelerated)");
  192. }
  193. seq_buf_printf(&s, "\n");
  194. return s.len;
  195. }
  196. #ifdef CONFIG_PPC_BOOK3S_64
  197. /*
  198. * Store-forwarding barrier support.
  199. */
  200. static enum stf_barrier_type stf_enabled_flush_types;
  201. static bool no_stf_barrier;
  202. static bool stf_barrier;
  203. static int __init handle_no_stf_barrier(char *p)
  204. {
  205. pr_info("stf-barrier: disabled on command line.");
  206. no_stf_barrier = true;
  207. return 0;
  208. }
  209. early_param("no_stf_barrier", handle_no_stf_barrier);
  210. enum stf_barrier_type stf_barrier_type_get(void)
  211. {
  212. return stf_enabled_flush_types;
  213. }
  214. /* This is the generic flag used by other architectures */
  215. static int __init handle_ssbd(char *p)
  216. {
  217. if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
  218. /* Until firmware tells us, we have the barrier with auto */
  219. return 0;
  220. } else if (strncmp(p, "off", 3) == 0) {
  221. handle_no_stf_barrier(NULL);
  222. return 0;
  223. } else
  224. return 1;
  225. return 0;
  226. }
  227. early_param("spec_store_bypass_disable", handle_ssbd);
  228. /* This is the generic flag used by other architectures */
  229. static int __init handle_no_ssbd(char *p)
  230. {
  231. handle_no_stf_barrier(NULL);
  232. return 0;
  233. }
  234. early_param("nospec_store_bypass_disable", handle_no_ssbd);
  235. static void stf_barrier_enable(bool enable)
  236. {
  237. if (enable)
  238. do_stf_barrier_fixups(stf_enabled_flush_types);
  239. else
  240. do_stf_barrier_fixups(STF_BARRIER_NONE);
  241. stf_barrier = enable;
  242. }
  243. void setup_stf_barrier(void)
  244. {
  245. enum stf_barrier_type type;
  246. bool enable;
  247. /* Default to fallback in case fw-features are not available */
  248. if (cpu_has_feature(CPU_FTR_ARCH_300))
  249. type = STF_BARRIER_EIEIO;
  250. else if (cpu_has_feature(CPU_FTR_ARCH_207S))
  251. type = STF_BARRIER_SYNC_ORI;
  252. else if (cpu_has_feature(CPU_FTR_ARCH_206))
  253. type = STF_BARRIER_FALLBACK;
  254. else
  255. type = STF_BARRIER_NONE;
  256. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  257. security_ftr_enabled(SEC_FTR_STF_BARRIER);
  258. if (type == STF_BARRIER_FALLBACK) {
  259. pr_info("stf-barrier: fallback barrier available\n");
  260. } else if (type == STF_BARRIER_SYNC_ORI) {
  261. pr_info("stf-barrier: hwsync barrier available\n");
  262. } else if (type == STF_BARRIER_EIEIO) {
  263. pr_info("stf-barrier: eieio barrier available\n");
  264. }
  265. stf_enabled_flush_types = type;
  266. if (!no_stf_barrier && !cpu_mitigations_off())
  267. stf_barrier_enable(enable);
  268. }
  269. ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
  270. {
  271. if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
  272. const char *type;
  273. switch (stf_enabled_flush_types) {
  274. case STF_BARRIER_EIEIO:
  275. type = "eieio";
  276. break;
  277. case STF_BARRIER_SYNC_ORI:
  278. type = "hwsync";
  279. break;
  280. case STF_BARRIER_FALLBACK:
  281. type = "fallback";
  282. break;
  283. default:
  284. type = "unknown";
  285. }
  286. return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
  287. }
  288. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  289. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  290. return sprintf(buf, "Not affected\n");
  291. return sprintf(buf, "Vulnerable\n");
  292. }
  293. static int ssb_prctl_get(struct task_struct *task)
  294. {
  295. /*
  296. * The STF_BARRIER feature is on by default, so if it's off that means
  297. * firmware has explicitly said the CPU is not vulnerable via either
  298. * the hypercall or device tree.
  299. */
  300. if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
  301. return PR_SPEC_NOT_AFFECTED;
  302. /*
  303. * If the system's CPU has no known barrier (see setup_stf_barrier())
  304. * then assume that the CPU is not vulnerable.
  305. */
  306. if (stf_enabled_flush_types == STF_BARRIER_NONE)
  307. return PR_SPEC_NOT_AFFECTED;
  308. /*
  309. * Otherwise the CPU is vulnerable. The barrier is not a global or
  310. * per-process mitigation, so the only value that can be reported here
  311. * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
  312. */
  313. return PR_SPEC_ENABLE;
  314. }
  315. int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
  316. {
  317. switch (which) {
  318. case PR_SPEC_STORE_BYPASS:
  319. return ssb_prctl_get(task);
  320. default:
  321. return -ENODEV;
  322. }
  323. }
  324. #ifdef CONFIG_DEBUG_FS
  325. static int stf_barrier_set(void *data, u64 val)
  326. {
  327. bool enable;
  328. if (val == 1)
  329. enable = true;
  330. else if (val == 0)
  331. enable = false;
  332. else
  333. return -EINVAL;
  334. /* Only do anything if we're changing state */
  335. if (enable != stf_barrier)
  336. stf_barrier_enable(enable);
  337. return 0;
  338. }
  339. static int stf_barrier_get(void *data, u64 *val)
  340. {
  341. *val = stf_barrier ? 1 : 0;
  342. return 0;
  343. }
  344. DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set,
  345. "%llu\n");
  346. static __init int stf_barrier_debugfs_init(void)
  347. {
  348. debugfs_create_file_unsafe("stf_barrier", 0600, arch_debugfs_dir,
  349. NULL, &fops_stf_barrier);
  350. return 0;
  351. }
  352. device_initcall(stf_barrier_debugfs_init);
  353. #endif /* CONFIG_DEBUG_FS */
  354. static void update_branch_cache_flush(void)
  355. {
  356. u32 *site, __maybe_unused *site2;
  357. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  358. site = &patch__call_kvm_flush_link_stack;
  359. site2 = &patch__call_kvm_flush_link_stack_p9;
  360. // This controls the branch from guest_exit_cont to kvm_flush_link_stack
  361. if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
  362. patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
  363. patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
  364. } else {
  365. // Could use HW flush, but that could also flush count cache
  366. patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
  367. patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
  368. }
  369. #endif
  370. // Patch out the bcctr first, then nop the rest
  371. site = &patch__call_flush_branch_caches3;
  372. patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
  373. site = &patch__call_flush_branch_caches2;
  374. patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
  375. site = &patch__call_flush_branch_caches1;
  376. patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
  377. // This controls the branch from _switch to flush_branch_caches
  378. if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
  379. link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
  380. // Nothing to be done
  381. } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
  382. link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
  383. // Patch in the bcctr last
  384. site = &patch__call_flush_branch_caches1;
  385. patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
  386. site = &patch__call_flush_branch_caches2;
  387. patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
  388. site = &patch__call_flush_branch_caches3;
  389. patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
  390. } else {
  391. patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
  392. // If we just need to flush the link stack, early return
  393. if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
  394. patch_instruction_site(&patch__flush_link_stack_return,
  395. ppc_inst(PPC_RAW_BLR()));
  396. // If we have flush instruction, early return
  397. } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
  398. patch_instruction_site(&patch__flush_count_cache_return,
  399. ppc_inst(PPC_RAW_BLR()));
  400. }
  401. }
  402. }
  403. static void toggle_branch_cache_flush(bool enable)
  404. {
  405. if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
  406. if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE)
  407. count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
  408. pr_info("count-cache-flush: flush disabled.\n");
  409. } else {
  410. if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
  411. count_cache_flush_type = BRANCH_CACHE_FLUSH_HW;
  412. pr_info("count-cache-flush: hardware flush enabled.\n");
  413. } else {
  414. count_cache_flush_type = BRANCH_CACHE_FLUSH_SW;
  415. pr_info("count-cache-flush: software flush enabled.\n");
  416. }
  417. }
  418. if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) {
  419. if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
  420. link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
  421. pr_info("link-stack-flush: flush disabled.\n");
  422. } else {
  423. if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) {
  424. link_stack_flush_type = BRANCH_CACHE_FLUSH_HW;
  425. pr_info("link-stack-flush: hardware flush enabled.\n");
  426. } else {
  427. link_stack_flush_type = BRANCH_CACHE_FLUSH_SW;
  428. pr_info("link-stack-flush: software flush enabled.\n");
  429. }
  430. }
  431. update_branch_cache_flush();
  432. }
  433. void setup_count_cache_flush(void)
  434. {
  435. bool enable = true;
  436. if (no_spectrev2 || cpu_mitigations_off()) {
  437. if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
  438. security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
  439. pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
  440. enable = false;
  441. }
  442. /*
  443. * There's no firmware feature flag/hypervisor bit to tell us we need to
  444. * flush the link stack on context switch. So we set it here if we see
  445. * either of the Spectre v2 mitigations that aim to protect userspace.
  446. */
  447. if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
  448. security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
  449. security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
  450. toggle_branch_cache_flush(enable);
  451. }
  452. static enum l1d_flush_type enabled_flush_types;
  453. static void *l1d_flush_fallback_area;
  454. static bool no_rfi_flush;
  455. static bool no_entry_flush;
  456. static bool no_uaccess_flush;
  457. bool rfi_flush;
  458. static bool entry_flush;
  459. static bool uaccess_flush;
  460. DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
  461. EXPORT_SYMBOL(uaccess_flush_key);
  462. static int __init handle_no_rfi_flush(char *p)
  463. {
  464. pr_info("rfi-flush: disabled on command line.");
  465. no_rfi_flush = true;
  466. return 0;
  467. }
  468. early_param("no_rfi_flush", handle_no_rfi_flush);
  469. static int __init handle_no_entry_flush(char *p)
  470. {
  471. pr_info("entry-flush: disabled on command line.");
  472. no_entry_flush = true;
  473. return 0;
  474. }
  475. early_param("no_entry_flush", handle_no_entry_flush);
  476. static int __init handle_no_uaccess_flush(char *p)
  477. {
  478. pr_info("uaccess-flush: disabled on command line.");
  479. no_uaccess_flush = true;
  480. return 0;
  481. }
  482. early_param("no_uaccess_flush", handle_no_uaccess_flush);
  483. /*
  484. * The RFI flush is not KPTI, but because users will see doco that says to use
  485. * nopti we hijack that option here to also disable the RFI flush.
  486. */
  487. static int __init handle_no_pti(char *p)
  488. {
  489. pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
  490. handle_no_rfi_flush(NULL);
  491. return 0;
  492. }
  493. early_param("nopti", handle_no_pti);
  494. static void do_nothing(void *unused)
  495. {
  496. /*
  497. * We don't need to do the flush explicitly, just enter+exit kernel is
  498. * sufficient, the RFI exit handlers will do the right thing.
  499. */
  500. }
  501. void rfi_flush_enable(bool enable)
  502. {
  503. if (enable) {
  504. do_rfi_flush_fixups(enabled_flush_types);
  505. on_each_cpu(do_nothing, NULL, 1);
  506. } else
  507. do_rfi_flush_fixups(L1D_FLUSH_NONE);
  508. rfi_flush = enable;
  509. }
  510. static void entry_flush_enable(bool enable)
  511. {
  512. if (enable) {
  513. do_entry_flush_fixups(enabled_flush_types);
  514. on_each_cpu(do_nothing, NULL, 1);
  515. } else {
  516. do_entry_flush_fixups(L1D_FLUSH_NONE);
  517. }
  518. entry_flush = enable;
  519. }
  520. static void uaccess_flush_enable(bool enable)
  521. {
  522. if (enable) {
  523. do_uaccess_flush_fixups(enabled_flush_types);
  524. static_branch_enable(&uaccess_flush_key);
  525. on_each_cpu(do_nothing, NULL, 1);
  526. } else {
  527. static_branch_disable(&uaccess_flush_key);
  528. do_uaccess_flush_fixups(L1D_FLUSH_NONE);
  529. }
  530. uaccess_flush = enable;
  531. }
  532. static void __ref init_fallback_flush(void)
  533. {
  534. u64 l1d_size, limit;
  535. int cpu;
  536. /* Only allocate the fallback flush area once (at boot time). */
  537. if (l1d_flush_fallback_area)
  538. return;
  539. l1d_size = ppc64_caches.l1d.size;
  540. /*
  541. * If there is no d-cache-size property in the device tree, l1d_size
  542. * could be zero. That leads to the loop in the asm wrapping around to
  543. * 2^64-1, and then walking off the end of the fallback area and
  544. * eventually causing a page fault which is fatal. Just default to
  545. * something vaguely sane.
  546. */
  547. if (!l1d_size)
  548. l1d_size = (64 * 1024);
  549. limit = min(ppc64_bolted_size(), ppc64_rma_size);
  550. /*
  551. * Align to L1d size, and size it at 2x L1d size, to catch possible
  552. * hardware prefetch runoff. We don't have a recipe for load patterns to
  553. * reliably avoid the prefetcher.
  554. */
  555. l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
  556. l1d_size, MEMBLOCK_LOW_LIMIT,
  557. limit, NUMA_NO_NODE);
  558. if (!l1d_flush_fallback_area)
  559. panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
  560. __func__, l1d_size * 2, l1d_size, &limit);
  561. for_each_possible_cpu(cpu) {
  562. struct paca_struct *paca = paca_ptrs[cpu];
  563. paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
  564. paca->l1d_flush_size = l1d_size;
  565. }
  566. }
  567. void setup_rfi_flush(enum l1d_flush_type types, bool enable)
  568. {
  569. if (types & L1D_FLUSH_FALLBACK) {
  570. pr_info("rfi-flush: fallback displacement flush available\n");
  571. init_fallback_flush();
  572. }
  573. if (types & L1D_FLUSH_ORI)
  574. pr_info("rfi-flush: ori type flush available\n");
  575. if (types & L1D_FLUSH_MTTRIG)
  576. pr_info("rfi-flush: mttrig type flush available\n");
  577. enabled_flush_types = types;
  578. if (!cpu_mitigations_off() && !no_rfi_flush)
  579. rfi_flush_enable(enable);
  580. }
  581. void setup_entry_flush(bool enable)
  582. {
  583. if (cpu_mitigations_off())
  584. return;
  585. if (!no_entry_flush)
  586. entry_flush_enable(enable);
  587. }
  588. void setup_uaccess_flush(bool enable)
  589. {
  590. if (cpu_mitigations_off())
  591. return;
  592. if (!no_uaccess_flush)
  593. uaccess_flush_enable(enable);
  594. }
  595. #ifdef CONFIG_DEBUG_FS
  596. static int count_cache_flush_set(void *data, u64 val)
  597. {
  598. bool enable;
  599. if (val == 1)
  600. enable = true;
  601. else if (val == 0)
  602. enable = false;
  603. else
  604. return -EINVAL;
  605. toggle_branch_cache_flush(enable);
  606. return 0;
  607. }
  608. static int count_cache_flush_get(void *data, u64 *val)
  609. {
  610. if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE)
  611. *val = 0;
  612. else
  613. *val = 1;
  614. return 0;
  615. }
  616. static int link_stack_flush_get(void *data, u64 *val)
  617. {
  618. if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE)
  619. *val = 0;
  620. else
  621. *val = 1;
  622. return 0;
  623. }
  624. DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
  625. count_cache_flush_set, "%llu\n");
  626. DEFINE_DEBUGFS_ATTRIBUTE(fops_link_stack_flush, link_stack_flush_get,
  627. count_cache_flush_set, "%llu\n");
  628. static __init int count_cache_flush_debugfs_init(void)
  629. {
  630. debugfs_create_file_unsafe("count_cache_flush", 0600,
  631. arch_debugfs_dir, NULL,
  632. &fops_count_cache_flush);
  633. debugfs_create_file_unsafe("link_stack_flush", 0600,
  634. arch_debugfs_dir, NULL,
  635. &fops_link_stack_flush);
  636. return 0;
  637. }
  638. device_initcall(count_cache_flush_debugfs_init);
  639. static int rfi_flush_set(void *data, u64 val)
  640. {
  641. bool enable;
  642. if (val == 1)
  643. enable = true;
  644. else if (val == 0)
  645. enable = false;
  646. else
  647. return -EINVAL;
  648. /* Only do anything if we're changing state */
  649. if (enable != rfi_flush)
  650. rfi_flush_enable(enable);
  651. return 0;
  652. }
  653. static int rfi_flush_get(void *data, u64 *val)
  654. {
  655. *val = rfi_flush ? 1 : 0;
  656. return 0;
  657. }
  658. DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
  659. static int entry_flush_set(void *data, u64 val)
  660. {
  661. bool enable;
  662. if (val == 1)
  663. enable = true;
  664. else if (val == 0)
  665. enable = false;
  666. else
  667. return -EINVAL;
  668. /* Only do anything if we're changing state */
  669. if (enable != entry_flush)
  670. entry_flush_enable(enable);
  671. return 0;
  672. }
  673. static int entry_flush_get(void *data, u64 *val)
  674. {
  675. *val = entry_flush ? 1 : 0;
  676. return 0;
  677. }
  678. DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
  679. static int uaccess_flush_set(void *data, u64 val)
  680. {
  681. bool enable;
  682. if (val == 1)
  683. enable = true;
  684. else if (val == 0)
  685. enable = false;
  686. else
  687. return -EINVAL;
  688. /* Only do anything if we're changing state */
  689. if (enable != uaccess_flush)
  690. uaccess_flush_enable(enable);
  691. return 0;
  692. }
  693. static int uaccess_flush_get(void *data, u64 *val)
  694. {
  695. *val = uaccess_flush ? 1 : 0;
  696. return 0;
  697. }
  698. DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
  699. static __init int rfi_flush_debugfs_init(void)
  700. {
  701. debugfs_create_file("rfi_flush", 0600, arch_debugfs_dir, NULL, &fops_rfi_flush);
  702. debugfs_create_file("entry_flush", 0600, arch_debugfs_dir, NULL, &fops_entry_flush);
  703. debugfs_create_file("uaccess_flush", 0600, arch_debugfs_dir, NULL, &fops_uaccess_flush);
  704. return 0;
  705. }
  706. device_initcall(rfi_flush_debugfs_init);
  707. #endif /* CONFIG_DEBUG_FS */
  708. #endif /* CONFIG_PPC_BOOK3S_64 */