proton-pack.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
  4. * detailed at:
  5. *
  6. * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
  7. *
  8. * This code was originally written hastily under an awful lot of stress and so
  9. * aspects of it are somewhat hacky. Unfortunately, changing anything in here
  10. * instantly makes me feel ill. Thanks, Jann. Thann.
  11. *
  12. * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
  13. * Copyright (C) 2020 Google LLC
  14. *
  15. * "If there's something strange in your neighbourhood, who you gonna call?"
  16. *
  17. * Authors: Will Deacon <[email protected]> and Marc Zyngier <[email protected]>
  18. */
  19. #include <linux/arm-smccc.h>
  20. #include <linux/bpf.h>
  21. #include <linux/cpu.h>
  22. #include <linux/device.h>
  23. #include <linux/nospec.h>
  24. #include <linux/prctl.h>
  25. #include <linux/sched/task_stack.h>
  26. #include <asm/debug-monitors.h>
  27. #include <asm/insn.h>
  28. #include <asm/spectre.h>
  29. #include <asm/traps.h>
  30. #include <asm/vectors.h>
  31. #include <asm/virt.h>
  32. /*
  33. * We try to ensure that the mitigation state can never change as the result of
  34. * onlining a late CPU.
  35. */
  36. static void update_mitigation_state(enum mitigation_state *oldp,
  37. enum mitigation_state new)
  38. {
  39. enum mitigation_state state;
  40. do {
  41. state = READ_ONCE(*oldp);
  42. if (new <= state)
  43. break;
  44. /* Userspace almost certainly can't deal with this. */
  45. if (WARN_ON(system_capabilities_finalized()))
  46. break;
  47. } while (cmpxchg_relaxed(oldp, state, new) != state);
  48. }
  49. /*
  50. * Spectre v1.
  51. *
  52. * The kernel can't protect userspace for this one: it's each person for
  53. * themselves. Advertise what we're doing and be done with it.
  54. */
  55. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
  56. char *buf)
  57. {
  58. return sprintf(buf, "Mitigation: __user pointer sanitization\n");
  59. }
  60. /*
  61. * Spectre v2.
  62. *
  63. * This one sucks. A CPU is either:
  64. *
  65. * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
  66. * - Mitigated in hardware and listed in our "safe list".
  67. * - Mitigated in software by firmware.
  68. * - Mitigated in software by a CPU-specific dance in the kernel and a
  69. * firmware call at EL2.
  70. * - Vulnerable.
  71. *
  72. * It's not unlikely for different CPUs in a big.LITTLE system to fall into
  73. * different camps.
  74. */
  75. static enum mitigation_state spectre_v2_state;
  76. static bool __read_mostly __nospectre_v2;
  77. static int __init parse_spectre_v2_param(char *str)
  78. {
  79. __nospectre_v2 = true;
  80. return 0;
  81. }
  82. early_param("nospectre_v2", parse_spectre_v2_param);
  83. static bool spectre_v2_mitigations_off(void)
  84. {
  85. bool ret = __nospectre_v2 || cpu_mitigations_off();
  86. if (ret)
  87. pr_info_once("spectre-v2 mitigation disabled by command line option\n");
  88. return ret;
  89. }
  90. static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
  91. {
  92. switch (bhb_state) {
  93. case SPECTRE_UNAFFECTED:
  94. return "";
  95. default:
  96. case SPECTRE_VULNERABLE:
  97. return ", but not BHB";
  98. case SPECTRE_MITIGATED:
  99. return ", BHB";
  100. }
  101. }
  102. static bool _unprivileged_ebpf_enabled(void)
  103. {
  104. #ifdef CONFIG_BPF_SYSCALL
  105. return !sysctl_unprivileged_bpf_disabled;
  106. #else
  107. return false;
  108. #endif
  109. }
  110. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
  111. char *buf)
  112. {
  113. enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
  114. const char *bhb_str = get_bhb_affected_string(bhb_state);
  115. const char *v2_str = "Branch predictor hardening";
  116. switch (spectre_v2_state) {
  117. case SPECTRE_UNAFFECTED:
  118. if (bhb_state == SPECTRE_UNAFFECTED)
  119. return sprintf(buf, "Not affected\n");
  120. /*
  121. * Platforms affected by Spectre-BHB can't report
  122. * "Not affected" for Spectre-v2.
  123. */
  124. v2_str = "CSV2";
  125. fallthrough;
  126. case SPECTRE_MITIGATED:
  127. if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
  128. return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
  129. return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
  130. case SPECTRE_VULNERABLE:
  131. fallthrough;
  132. default:
  133. return sprintf(buf, "Vulnerable\n");
  134. }
  135. }
  136. static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
  137. {
  138. u64 pfr0;
  139. static const struct midr_range spectre_v2_safe_list[] = {
  140. MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
  141. MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
  142. MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
  143. MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
  144. MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
  145. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
  146. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
  147. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
  148. { /* sentinel */ }
  149. };
  150. /* If the CPU has CSV2 set, we're safe */
  151. pfr0 = read_cpuid(ID_AA64PFR0_EL1);
  152. if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
  153. return SPECTRE_UNAFFECTED;
  154. /* Alternatively, we have a list of unaffected CPUs */
  155. if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
  156. return SPECTRE_UNAFFECTED;
  157. return SPECTRE_VULNERABLE;
  158. }
  159. static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
  160. {
  161. int ret;
  162. struct arm_smccc_res res;
  163. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
  164. ARM_SMCCC_ARCH_WORKAROUND_1, &res);
  165. ret = res.a0;
  166. switch (ret) {
  167. case SMCCC_RET_SUCCESS:
  168. return SPECTRE_MITIGATED;
  169. case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
  170. return SPECTRE_UNAFFECTED;
  171. default:
  172. fallthrough;
  173. case SMCCC_RET_NOT_SUPPORTED:
  174. return SPECTRE_VULNERABLE;
  175. }
  176. }
  177. bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
  178. {
  179. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  180. if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
  181. return false;
  182. if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
  183. return false;
  184. return true;
  185. }
  186. enum mitigation_state arm64_get_spectre_v2_state(void)
  187. {
  188. return spectre_v2_state;
  189. }
  190. DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
  191. static void install_bp_hardening_cb(bp_hardening_cb_t fn)
  192. {
  193. __this_cpu_write(bp_hardening_data.fn, fn);
  194. /*
  195. * Vinz Clortho takes the hyp_vecs start/end "keys" at
  196. * the door when we're a guest. Skip the hyp-vectors work.
  197. */
  198. if (!is_hyp_mode_available())
  199. return;
  200. __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
  201. }
  202. /* Called during entry so must be noinstr */
  203. static noinstr void call_smc_arch_workaround_1(void)
  204. {
  205. arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
  206. }
  207. /* Called during entry so must be noinstr */
  208. static noinstr void call_hvc_arch_workaround_1(void)
  209. {
  210. arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
  211. }
  212. /* Called during entry so must be noinstr */
  213. static noinstr void qcom_link_stack_sanitisation(void)
  214. {
  215. u64 tmp;
  216. asm volatile("mov %0, x30 \n"
  217. ".rept 16 \n"
  218. "bl . + 4 \n"
  219. ".endr \n"
  220. "mov x30, %0 \n"
  221. : "=&r" (tmp));
  222. }
  223. static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
  224. {
  225. u32 midr = read_cpuid_id();
  226. if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
  227. ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
  228. return NULL;
  229. return qcom_link_stack_sanitisation;
  230. }
  231. static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
  232. {
  233. bp_hardening_cb_t cb;
  234. enum mitigation_state state;
  235. state = spectre_v2_get_cpu_fw_mitigation_state();
  236. if (state != SPECTRE_MITIGATED)
  237. return state;
  238. if (spectre_v2_mitigations_off())
  239. return SPECTRE_VULNERABLE;
  240. switch (arm_smccc_1_1_get_conduit()) {
  241. case SMCCC_CONDUIT_HVC:
  242. cb = call_hvc_arch_workaround_1;
  243. break;
  244. case SMCCC_CONDUIT_SMC:
  245. cb = call_smc_arch_workaround_1;
  246. break;
  247. default:
  248. return SPECTRE_VULNERABLE;
  249. }
  250. /*
  251. * Prefer a CPU-specific workaround if it exists. Note that we
  252. * still rely on firmware for the mitigation at EL2.
  253. */
  254. cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
  255. install_bp_hardening_cb(cb);
  256. return SPECTRE_MITIGATED;
  257. }
  258. void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
  259. {
  260. enum mitigation_state state;
  261. WARN_ON(preemptible());
  262. state = spectre_v2_get_cpu_hw_mitigation_state();
  263. if (state == SPECTRE_VULNERABLE)
  264. state = spectre_v2_enable_fw_mitigation();
  265. update_mitigation_state(&spectre_v2_state, state);
  266. }
  267. /*
  268. * Spectre-v3a.
  269. *
  270. * Phew, there's not an awful lot to do here! We just instruct EL2 to use
  271. * an indirect trampoline for the hyp vectors so that guests can't read
  272. * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
  273. */
  274. bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
  275. {
  276. static const struct midr_range spectre_v3a_unsafe_list[] = {
  277. MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
  278. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  279. {},
  280. };
  281. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  282. return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
  283. }
  284. void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
  285. {
  286. struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
  287. if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
  288. data->slot += HYP_VECTOR_INDIRECT;
  289. }
  290. /*
  291. * Spectre v4.
  292. *
  293. * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
  294. * either:
  295. *
  296. * - Mitigated in hardware and listed in our "safe list".
  297. * - Mitigated in hardware via PSTATE.SSBS.
  298. * - Mitigated in software by firmware (sometimes referred to as SSBD).
  299. *
  300. * Wait, that doesn't sound so bad, does it? Keep reading...
  301. *
  302. * A major source of headaches is that the software mitigation is enabled both
  303. * on a per-task basis, but can also be forced on for the kernel, necessitating
  304. * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
  305. * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
  306. * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
  307. * so you can have systems that have both firmware and SSBS mitigations. This
  308. * means we actually have to reject late onlining of CPUs with mitigations if
  309. * all of the currently onlined CPUs are safelisted, as the mitigation tends to
  310. * be opt-in for userspace. Yes, really, the cure is worse than the disease.
  311. *
  312. * The only good part is that if the firmware mitigation is present, then it is
  313. * present for all CPUs, meaning we don't have to worry about late onlining of a
  314. * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
  315. *
  316. * Give me a VAX-11/780 any day of the week...
  317. */
  318. static enum mitigation_state spectre_v4_state;
  319. /* This is the per-cpu state tracking whether we need to talk to firmware */
  320. DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
  321. enum spectre_v4_policy {
  322. SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
  323. SPECTRE_V4_POLICY_MITIGATION_ENABLED,
  324. SPECTRE_V4_POLICY_MITIGATION_DISABLED,
  325. };
  326. static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
  327. static const struct spectre_v4_param {
  328. const char *str;
  329. enum spectre_v4_policy policy;
  330. } spectre_v4_params[] = {
  331. { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
  332. { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
  333. { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
  334. };
  335. static int __init parse_spectre_v4_param(char *str)
  336. {
  337. int i;
  338. if (!str || !str[0])
  339. return -EINVAL;
  340. for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
  341. const struct spectre_v4_param *param = &spectre_v4_params[i];
  342. if (strncmp(str, param->str, strlen(param->str)))
  343. continue;
  344. __spectre_v4_policy = param->policy;
  345. return 0;
  346. }
  347. return -EINVAL;
  348. }
  349. early_param("ssbd", parse_spectre_v4_param);
  350. /*
  351. * Because this was all written in a rush by people working in different silos,
  352. * we've ended up with multiple command line options to control the same thing.
  353. * Wrap these up in some helpers, which prefer disabling the mitigation if faced
  354. * with contradictory parameters. The mitigation is always either "off",
  355. * "dynamic" or "on".
  356. */
  357. static bool spectre_v4_mitigations_off(void)
  358. {
  359. bool ret = cpu_mitigations_off() ||
  360. __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
  361. if (ret)
  362. pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
  363. return ret;
  364. }
  365. /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
  366. static bool spectre_v4_mitigations_dynamic(void)
  367. {
  368. return !spectre_v4_mitigations_off() &&
  369. __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
  370. }
  371. static bool spectre_v4_mitigations_on(void)
  372. {
  373. return !spectre_v4_mitigations_off() &&
  374. __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
  375. }
  376. ssize_t cpu_show_spec_store_bypass(struct device *dev,
  377. struct device_attribute *attr, char *buf)
  378. {
  379. switch (spectre_v4_state) {
  380. case SPECTRE_UNAFFECTED:
  381. return sprintf(buf, "Not affected\n");
  382. case SPECTRE_MITIGATED:
  383. return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
  384. case SPECTRE_VULNERABLE:
  385. fallthrough;
  386. default:
  387. return sprintf(buf, "Vulnerable\n");
  388. }
  389. }
  390. enum mitigation_state arm64_get_spectre_v4_state(void)
  391. {
  392. return spectre_v4_state;
  393. }
  394. static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
  395. {
  396. static const struct midr_range spectre_v4_safe_list[] = {
  397. MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
  398. MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
  399. MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
  400. MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
  401. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
  402. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
  403. { /* sentinel */ },
  404. };
  405. if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
  406. return SPECTRE_UNAFFECTED;
  407. /* CPU features are detected first */
  408. if (this_cpu_has_cap(ARM64_SSBS))
  409. return SPECTRE_MITIGATED;
  410. return SPECTRE_VULNERABLE;
  411. }
  412. static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
  413. {
  414. int ret;
  415. struct arm_smccc_res res;
  416. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
  417. ARM_SMCCC_ARCH_WORKAROUND_2, &res);
  418. ret = res.a0;
  419. switch (ret) {
  420. case SMCCC_RET_SUCCESS:
  421. return SPECTRE_MITIGATED;
  422. case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
  423. fallthrough;
  424. case SMCCC_RET_NOT_REQUIRED:
  425. return SPECTRE_UNAFFECTED;
  426. default:
  427. fallthrough;
  428. case SMCCC_RET_NOT_SUPPORTED:
  429. return SPECTRE_VULNERABLE;
  430. }
  431. }
  432. bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
  433. {
  434. enum mitigation_state state;
  435. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  436. state = spectre_v4_get_cpu_hw_mitigation_state();
  437. if (state == SPECTRE_VULNERABLE)
  438. state = spectre_v4_get_cpu_fw_mitigation_state();
  439. return state != SPECTRE_UNAFFECTED;
  440. }
  441. static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
  442. {
  443. if (user_mode(regs))
  444. return 1;
  445. if (instr & BIT(PSTATE_Imm_shift))
  446. regs->pstate |= PSR_SSBS_BIT;
  447. else
  448. regs->pstate &= ~PSR_SSBS_BIT;
  449. arm64_skip_faulting_instruction(regs, 4);
  450. return 0;
  451. }
  452. static struct undef_hook ssbs_emulation_hook = {
  453. .instr_mask = ~(1U << PSTATE_Imm_shift),
  454. .instr_val = 0xd500401f | PSTATE_SSBS,
  455. .fn = ssbs_emulation_handler,
  456. };
  457. static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
  458. {
  459. static bool undef_hook_registered = false;
  460. static DEFINE_RAW_SPINLOCK(hook_lock);
  461. enum mitigation_state state;
  462. /*
  463. * If the system is mitigated but this CPU doesn't have SSBS, then
  464. * we must be on the safelist and there's nothing more to do.
  465. */
  466. state = spectre_v4_get_cpu_hw_mitigation_state();
  467. if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
  468. return state;
  469. raw_spin_lock(&hook_lock);
  470. if (!undef_hook_registered) {
  471. register_undef_hook(&ssbs_emulation_hook);
  472. undef_hook_registered = true;
  473. }
  474. raw_spin_unlock(&hook_lock);
  475. if (spectre_v4_mitigations_off()) {
  476. sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
  477. set_pstate_ssbs(1);
  478. return SPECTRE_VULNERABLE;
  479. }
  480. /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
  481. set_pstate_ssbs(0);
  482. return SPECTRE_MITIGATED;
  483. }
  484. /*
  485. * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
  486. * we fallthrough and check whether firmware needs to be called on this CPU.
  487. */
  488. void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
  489. __le32 *origptr,
  490. __le32 *updptr, int nr_inst)
  491. {
  492. BUG_ON(nr_inst != 1); /* Branch -> NOP */
  493. if (spectre_v4_mitigations_off())
  494. return;
  495. if (cpus_have_cap(ARM64_SSBS))
  496. return;
  497. if (spectre_v4_mitigations_dynamic())
  498. *updptr = cpu_to_le32(aarch64_insn_gen_nop());
  499. }
  500. /*
  501. * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
  502. * to call into firmware to adjust the mitigation state.
  503. */
  504. void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
  505. __le32 *origptr,
  506. __le32 *updptr, int nr_inst)
  507. {
  508. u32 insn;
  509. BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
  510. switch (arm_smccc_1_1_get_conduit()) {
  511. case SMCCC_CONDUIT_HVC:
  512. insn = aarch64_insn_get_hvc_value();
  513. break;
  514. case SMCCC_CONDUIT_SMC:
  515. insn = aarch64_insn_get_smc_value();
  516. break;
  517. default:
  518. return;
  519. }
  520. *updptr = cpu_to_le32(insn);
  521. }
  522. static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
  523. {
  524. enum mitigation_state state;
  525. state = spectre_v4_get_cpu_fw_mitigation_state();
  526. if (state != SPECTRE_MITIGATED)
  527. return state;
  528. if (spectre_v4_mitigations_off()) {
  529. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
  530. return SPECTRE_VULNERABLE;
  531. }
  532. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
  533. if (spectre_v4_mitigations_dynamic())
  534. __this_cpu_write(arm64_ssbd_callback_required, 1);
  535. return SPECTRE_MITIGATED;
  536. }
  537. void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
  538. {
  539. enum mitigation_state state;
  540. WARN_ON(preemptible());
  541. state = spectre_v4_enable_hw_mitigation();
  542. if (state == SPECTRE_VULNERABLE)
  543. state = spectre_v4_enable_fw_mitigation();
  544. update_mitigation_state(&spectre_v4_state, state);
  545. }
  546. static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
  547. {
  548. u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
  549. if (state)
  550. regs->pstate |= bit;
  551. else
  552. regs->pstate &= ~bit;
  553. }
  554. void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
  555. {
  556. struct pt_regs *regs = task_pt_regs(tsk);
  557. bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
  558. if (spectre_v4_mitigations_off())
  559. ssbs = true;
  560. else if (spectre_v4_mitigations_dynamic() && !kthread)
  561. ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
  562. __update_pstate_ssbs(regs, ssbs);
  563. }
  564. /*
  565. * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
  566. * This is interesting because the "speculation disabled" behaviour can be
  567. * configured so that it is preserved across exec(), which means that the
  568. * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
  569. * from userspace.
  570. */
  571. static void ssbd_prctl_enable_mitigation(struct task_struct *task)
  572. {
  573. task_clear_spec_ssb_noexec(task);
  574. task_set_spec_ssb_disable(task);
  575. set_tsk_thread_flag(task, TIF_SSBD);
  576. }
  577. static void ssbd_prctl_disable_mitigation(struct task_struct *task)
  578. {
  579. task_clear_spec_ssb_noexec(task);
  580. task_clear_spec_ssb_disable(task);
  581. clear_tsk_thread_flag(task, TIF_SSBD);
  582. }
  583. static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
  584. {
  585. switch (ctrl) {
  586. case PR_SPEC_ENABLE:
  587. /* Enable speculation: disable mitigation */
  588. /*
  589. * Force disabled speculation prevents it from being
  590. * re-enabled.
  591. */
  592. if (task_spec_ssb_force_disable(task))
  593. return -EPERM;
  594. /*
  595. * If the mitigation is forced on, then speculation is forced
  596. * off and we again prevent it from being re-enabled.
  597. */
  598. if (spectre_v4_mitigations_on())
  599. return -EPERM;
  600. ssbd_prctl_disable_mitigation(task);
  601. break;
  602. case PR_SPEC_FORCE_DISABLE:
  603. /* Force disable speculation: force enable mitigation */
  604. /*
  605. * If the mitigation is forced off, then speculation is forced
  606. * on and we prevent it from being disabled.
  607. */
  608. if (spectre_v4_mitigations_off())
  609. return -EPERM;
  610. task_set_spec_ssb_force_disable(task);
  611. fallthrough;
  612. case PR_SPEC_DISABLE:
  613. /* Disable speculation: enable mitigation */
  614. /* Same as PR_SPEC_FORCE_DISABLE */
  615. if (spectre_v4_mitigations_off())
  616. return -EPERM;
  617. ssbd_prctl_enable_mitigation(task);
  618. break;
  619. case PR_SPEC_DISABLE_NOEXEC:
  620. /* Disable speculation until execve(): enable mitigation */
  621. /*
  622. * If the mitigation state is forced one way or the other, then
  623. * we must fail now before we try to toggle it on execve().
  624. */
  625. if (task_spec_ssb_force_disable(task) ||
  626. spectre_v4_mitigations_off() ||
  627. spectre_v4_mitigations_on()) {
  628. return -EPERM;
  629. }
  630. ssbd_prctl_enable_mitigation(task);
  631. task_set_spec_ssb_noexec(task);
  632. break;
  633. default:
  634. return -ERANGE;
  635. }
  636. spectre_v4_enable_task_mitigation(task);
  637. return 0;
  638. }
  639. int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
  640. unsigned long ctrl)
  641. {
  642. switch (which) {
  643. case PR_SPEC_STORE_BYPASS:
  644. return ssbd_prctl_set(task, ctrl);
  645. default:
  646. return -ENODEV;
  647. }
  648. }
  649. static int ssbd_prctl_get(struct task_struct *task)
  650. {
  651. switch (spectre_v4_state) {
  652. case SPECTRE_UNAFFECTED:
  653. return PR_SPEC_NOT_AFFECTED;
  654. case SPECTRE_MITIGATED:
  655. if (spectre_v4_mitigations_on())
  656. return PR_SPEC_NOT_AFFECTED;
  657. if (spectre_v4_mitigations_dynamic())
  658. break;
  659. /* Mitigations are disabled, so we're vulnerable. */
  660. fallthrough;
  661. case SPECTRE_VULNERABLE:
  662. fallthrough;
  663. default:
  664. return PR_SPEC_ENABLE;
  665. }
  666. /* Check the mitigation state for this task */
  667. if (task_spec_ssb_force_disable(task))
  668. return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
  669. if (task_spec_ssb_noexec(task))
  670. return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
  671. if (task_spec_ssb_disable(task))
  672. return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
  673. return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
  674. }
  675. int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
  676. {
  677. switch (which) {
  678. case PR_SPEC_STORE_BYPASS:
  679. return ssbd_prctl_get(task);
  680. default:
  681. return -ENODEV;
  682. }
  683. }
  684. /*
  685. * Spectre BHB.
  686. *
  687. * A CPU is either:
  688. * - Mitigated by a branchy loop a CPU specific number of times, and listed
  689. * in our "loop mitigated list".
  690. * - Mitigated in software by the firmware Spectre v2 call.
  691. * - Has the ClearBHB instruction to perform the mitigation.
  692. * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
  693. * software mitigation in the vectors is needed.
  694. * - Has CSV2.3, so is unaffected.
  695. */
  696. static enum mitigation_state spectre_bhb_state;
  697. enum mitigation_state arm64_get_spectre_bhb_state(void)
  698. {
  699. return spectre_bhb_state;
  700. }
  701. enum bhb_mitigation_bits {
  702. BHB_LOOP,
  703. BHB_FW,
  704. BHB_HW,
  705. BHB_INSN,
  706. };
  707. static unsigned long system_bhb_mitigations;
  708. /*
  709. * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
  710. * SCOPE_SYSTEM call will give the right answer.
  711. */
  712. u8 spectre_bhb_loop_affected(int scope)
  713. {
  714. u8 k = 0;
  715. static u8 max_bhb_k;
  716. if (scope == SCOPE_LOCAL_CPU) {
  717. static const struct midr_range spectre_bhb_k32_list[] = {
  718. MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
  719. MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
  720. MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
  721. MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
  722. MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
  723. MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
  724. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
  725. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
  726. {},
  727. };
  728. static const struct midr_range spectre_bhb_k24_list[] = {
  729. MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
  730. MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
  731. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
  732. {},
  733. };
  734. static const struct midr_range spectre_bhb_k11_list[] = {
  735. MIDR_ALL_VERSIONS(MIDR_AMPERE1),
  736. {},
  737. };
  738. static const struct midr_range spectre_bhb_k8_list[] = {
  739. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  740. MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
  741. {},
  742. };
  743. if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
  744. k = 32;
  745. else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
  746. k = 24;
  747. else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
  748. k = 11;
  749. else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
  750. k = 8;
  751. max_bhb_k = max(max_bhb_k, k);
  752. } else {
  753. k = max_bhb_k;
  754. }
  755. return k;
  756. }
  757. static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
  758. {
  759. int ret;
  760. struct arm_smccc_res res;
  761. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
  762. ARM_SMCCC_ARCH_WORKAROUND_3, &res);
  763. ret = res.a0;
  764. switch (ret) {
  765. case SMCCC_RET_SUCCESS:
  766. return SPECTRE_MITIGATED;
  767. case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
  768. return SPECTRE_UNAFFECTED;
  769. default:
  770. fallthrough;
  771. case SMCCC_RET_NOT_SUPPORTED:
  772. return SPECTRE_VULNERABLE;
  773. }
  774. }
  775. static bool is_spectre_bhb_fw_affected(int scope)
  776. {
  777. static bool system_affected;
  778. enum mitigation_state fw_state;
  779. bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
  780. static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
  781. MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
  782. MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
  783. {},
  784. };
  785. bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
  786. spectre_bhb_firmware_mitigated_list);
  787. if (scope != SCOPE_LOCAL_CPU)
  788. return system_affected;
  789. fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
  790. if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
  791. system_affected = true;
  792. return true;
  793. }
  794. return false;
  795. }
  796. static bool supports_ecbhb(int scope)
  797. {
  798. u64 mmfr1;
  799. if (scope == SCOPE_LOCAL_CPU)
  800. mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
  801. else
  802. mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
  803. return cpuid_feature_extract_unsigned_field(mmfr1,
  804. ID_AA64MMFR1_EL1_ECBHB_SHIFT);
  805. }
  806. bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
  807. int scope)
  808. {
  809. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  810. if (supports_csv2p3(scope))
  811. return false;
  812. if (supports_clearbhb(scope))
  813. return true;
  814. if (spectre_bhb_loop_affected(scope))
  815. return true;
  816. if (is_spectre_bhb_fw_affected(scope))
  817. return true;
  818. return false;
  819. }
  820. static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
  821. {
  822. const char *v = arm64_get_bp_hardening_vector(slot);
  823. if (slot < 0)
  824. return;
  825. __this_cpu_write(this_cpu_vector, v);
  826. /*
  827. * When KPTI is in use, the vectors are switched when exiting to
  828. * user-space.
  829. */
  830. if (arm64_kernel_unmapped_at_el0())
  831. return;
  832. write_sysreg(v, vbar_el1);
  833. isb();
  834. }
  835. static bool __read_mostly __nospectre_bhb;
  836. static int __init parse_spectre_bhb_param(char *str)
  837. {
  838. __nospectre_bhb = true;
  839. return 0;
  840. }
  841. early_param("nospectre_bhb", parse_spectre_bhb_param);
  842. void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
  843. {
  844. bp_hardening_cb_t cpu_cb;
  845. enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
  846. struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
  847. if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
  848. return;
  849. if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
  850. /* No point mitigating Spectre-BHB alone. */
  851. } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
  852. pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
  853. } else if (cpu_mitigations_off() || __nospectre_bhb) {
  854. pr_info_once("spectre-bhb mitigation disabled by command line option\n");
  855. } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
  856. state = SPECTRE_MITIGATED;
  857. set_bit(BHB_HW, &system_bhb_mitigations);
  858. } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
  859. /*
  860. * Ensure KVM uses the indirect vector which will have ClearBHB
  861. * added.
  862. */
  863. if (!data->slot)
  864. data->slot = HYP_VECTOR_INDIRECT;
  865. this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
  866. state = SPECTRE_MITIGATED;
  867. set_bit(BHB_INSN, &system_bhb_mitigations);
  868. } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
  869. /*
  870. * Ensure KVM uses the indirect vector which will have the
  871. * branchy-loop added. A57/A72-r0 will already have selected
  872. * the spectre-indirect vector, which is sufficient for BHB
  873. * too.
  874. */
  875. if (!data->slot)
  876. data->slot = HYP_VECTOR_INDIRECT;
  877. this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
  878. state = SPECTRE_MITIGATED;
  879. set_bit(BHB_LOOP, &system_bhb_mitigations);
  880. } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
  881. fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
  882. if (fw_state == SPECTRE_MITIGATED) {
  883. /*
  884. * Ensure KVM uses one of the spectre bp_hardening
  885. * vectors. The indirect vector doesn't include the EL3
  886. * call, so needs upgrading to
  887. * HYP_VECTOR_SPECTRE_INDIRECT.
  888. */
  889. if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
  890. data->slot += 1;
  891. this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
  892. /*
  893. * The WA3 call in the vectors supersedes the WA1 call
  894. * made during context-switch. Uninstall any firmware
  895. * bp_hardening callback.
  896. */
  897. cpu_cb = spectre_v2_get_sw_mitigation_cb();
  898. if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
  899. __this_cpu_write(bp_hardening_data.fn, NULL);
  900. state = SPECTRE_MITIGATED;
  901. set_bit(BHB_FW, &system_bhb_mitigations);
  902. }
  903. }
  904. update_mitigation_state(&spectre_bhb_state, state);
  905. }
  906. /* Patched to NOP when enabled */
  907. void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
  908. __le32 *origptr,
  909. __le32 *updptr, int nr_inst)
  910. {
  911. BUG_ON(nr_inst != 1);
  912. if (test_bit(BHB_LOOP, &system_bhb_mitigations))
  913. *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
  914. }
  915. /* Patched to NOP when enabled */
  916. void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
  917. __le32 *origptr,
  918. __le32 *updptr, int nr_inst)
  919. {
  920. BUG_ON(nr_inst != 1);
  921. if (test_bit(BHB_FW, &system_bhb_mitigations))
  922. *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
  923. }
  924. /* Patched to correct the immediate */
  925. void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
  926. __le32 *origptr, __le32 *updptr, int nr_inst)
  927. {
  928. u8 rd;
  929. u32 insn;
  930. u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
  931. BUG_ON(nr_inst != 1); /* MOV -> MOV */
  932. if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
  933. return;
  934. insn = le32_to_cpu(*origptr);
  935. rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
  936. insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
  937. AARCH64_INSN_VARIANT_64BIT,
  938. AARCH64_INSN_MOVEWIDE_ZERO);
  939. *updptr++ = cpu_to_le32(insn);
  940. }
  941. /* Patched to mov WA3 when supported */
  942. void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
  943. __le32 *origptr, __le32 *updptr, int nr_inst)
  944. {
  945. u8 rd;
  946. u32 insn;
  947. BUG_ON(nr_inst != 1); /* MOV -> MOV */
  948. if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
  949. !test_bit(BHB_FW, &system_bhb_mitigations))
  950. return;
  951. insn = le32_to_cpu(*origptr);
  952. rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
  953. insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
  954. AARCH64_INSN_VARIANT_32BIT,
  955. AARCH64_INSN_REG_ZR, rd,
  956. ARM_SMCCC_ARCH_WORKAROUND_3);
  957. if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
  958. return;
  959. *updptr++ = cpu_to_le32(insn);
  960. }
  961. /* Patched to NOP when not supported */
  962. void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
  963. __le32 *origptr, __le32 *updptr, int nr_inst)
  964. {
  965. BUG_ON(nr_inst != 2);
  966. if (test_bit(BHB_INSN, &system_bhb_mitigations))
  967. return;
  968. *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
  969. *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
  970. }
  971. #ifdef CONFIG_BPF_SYSCALL
  972. #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
  973. void unpriv_ebpf_notify(int new_state)
  974. {
  975. if (spectre_v2_state == SPECTRE_VULNERABLE ||
  976. spectre_bhb_state != SPECTRE_MITIGATED)
  977. return;
  978. if (!new_state)
  979. pr_err("WARNING: %s", EBPF_WARN);
  980. }
  981. #endif