spectre.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Interface for managing mitigations for Spectre vulnerabilities.
  4. *
  5. * Copyright (C) 2020 Google LLC
  6. * Author: Will Deacon <[email protected]>
  7. */
  8. #ifndef __ASM_SPECTRE_H
  9. #define __ASM_SPECTRE_H
  10. #define BP_HARDEN_EL2_SLOTS 4
  11. #define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K)
  12. #ifndef __ASSEMBLY__
  13. #include <linux/percpu.h>
  14. #include <asm/cpufeature.h>
  15. #include <asm/virt.h>
  16. /* Watch out, ordering is important here. */
  17. enum mitigation_state {
  18. SPECTRE_UNAFFECTED,
  19. SPECTRE_MITIGATED,
  20. SPECTRE_VULNERABLE,
  21. };
  22. struct task_struct;
  23. /*
  24. * Note: the order of this enum corresponds to __bp_harden_hyp_vecs and
  25. * we rely on having the direct vectors first.
  26. */
  27. enum arm64_hyp_spectre_vector {
  28. /*
  29. * Take exceptions directly to __kvm_hyp_vector. This must be
  30. * 0 so that it used by default when mitigations are not needed.
  31. */
  32. HYP_VECTOR_DIRECT,
  33. /*
  34. * Bounce via a slot in the hypervisor text mapping of
  35. * __bp_harden_hyp_vecs, which contains an SMC call.
  36. */
  37. HYP_VECTOR_SPECTRE_DIRECT,
  38. /*
  39. * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
  40. * next to the idmap page.
  41. */
  42. HYP_VECTOR_INDIRECT,
  43. /*
  44. * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
  45. * next to the idmap page, which contains an SMC call.
  46. */
  47. HYP_VECTOR_SPECTRE_INDIRECT,
  48. };
  49. typedef void (*bp_hardening_cb_t)(void);
  50. struct bp_hardening_data {
  51. enum arm64_hyp_spectre_vector slot;
  52. bp_hardening_cb_t fn;
  53. };
  54. DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
  55. /* Called during entry so must be __always_inline */
  56. static __always_inline void arm64_apply_bp_hardening(void)
  57. {
  58. struct bp_hardening_data *d;
  59. if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
  60. return;
  61. d = this_cpu_ptr(&bp_hardening_data);
  62. if (d->fn)
  63. d->fn();
  64. }
  65. enum mitigation_state arm64_get_spectre_v2_state(void);
  66. bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
  67. void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
  68. bool has_spectre_v3a(const struct arm64_cpu_capabilities *cap, int scope);
  69. void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
  70. enum mitigation_state arm64_get_spectre_v4_state(void);
  71. bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
  72. void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
  73. void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
  74. enum mitigation_state arm64_get_meltdown_state(void);
  75. enum mitigation_state arm64_get_spectre_bhb_state(void);
  76. bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
  77. u8 spectre_bhb_loop_affected(int scope);
  78. void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
  79. #endif /* __ASSEMBLY__ */
  80. #endif /* __ASM_SPECTRE_H */