cpu_errata.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Contains CPU specific errata definitions
  4. *
  5. * Copyright (C) 2014 ARM Ltd.
  6. */
  7. #include <linux/arm-smccc.h>
  8. #include <linux/types.h>
  9. #include <linux/cpu.h>
  10. #include <asm/cpu.h>
  11. #include <asm/cputype.h>
  12. #include <asm/cpufeature.h>
  13. #include <asm/kvm_asm.h>
  14. #include <asm/smp_plat.h>
  15. static bool __maybe_unused
  16. is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
  17. {
  18. const struct arm64_midr_revidr *fix;
  19. u32 midr = read_cpuid_id(), revidr;
  20. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  21. if (!is_midr_in_range(midr, &entry->midr_range))
  22. return false;
  23. midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
  24. revidr = read_cpuid(REVIDR_EL1);
  25. for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
  26. if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
  27. return false;
  28. return true;
  29. }
  30. static bool __maybe_unused
  31. is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
  32. int scope)
  33. {
  34. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  35. return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
  36. }
  37. static bool __maybe_unused
  38. is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
  39. {
  40. u32 model;
  41. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  42. model = read_cpuid_id();
  43. model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
  44. MIDR_ARCHITECTURE_MASK;
  45. return model == entry->midr_range.model;
  46. }
  47. static bool
  48. has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
  49. int scope)
  50. {
  51. u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
  52. u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
  53. u64 ctr_raw, ctr_real;
  54. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  55. /*
  56. * We want to make sure that all the CPUs in the system expose
  57. * a consistent CTR_EL0 to make sure that applications behaves
  58. * correctly with migration.
  59. *
  60. * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
  61. *
  62. * 1) It is safe if the system doesn't support IDC, as CPU anyway
  63. * reports IDC = 0, consistent with the rest.
  64. *
  65. * 2) If the system has IDC, it is still safe as we trap CTR_EL0
  66. * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
  67. *
  68. * So, we need to make sure either the raw CTR_EL0 or the effective
  69. * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
  70. */
  71. ctr_raw = read_cpuid_cachetype() & mask;
  72. ctr_real = read_cpuid_effective_cachetype() & mask;
  73. return (ctr_real != sys) && (ctr_raw != sys);
  74. }
  75. static void
  76. cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
  77. {
  78. u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
  79. bool enable_uct_trap = false;
  80. /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
  81. if ((read_cpuid_cachetype() & mask) !=
  82. (arm64_ftr_reg_ctrel0.sys_val & mask))
  83. enable_uct_trap = true;
  84. /* ... or if the system is affected by an erratum */
  85. if (cap->capability == ARM64_WORKAROUND_1542419)
  86. enable_uct_trap = true;
  87. if (enable_uct_trap)
  88. sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
  89. }
  90. #ifdef CONFIG_ARM64_ERRATUM_1463225
  91. DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
  92. static bool
  93. has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
  94. int scope)
  95. {
  96. return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
  97. }
  98. #endif
  99. static void __maybe_unused
  100. cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
  101. {
  102. sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
  103. }
  104. #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
  105. .matches = is_affected_midr_range, \
  106. .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
  107. #define CAP_MIDR_ALL_VERSIONS(model) \
  108. .matches = is_affected_midr_range, \
  109. .midr_range = MIDR_ALL_VERSIONS(model)
  110. #define MIDR_FIXED(rev, revidr_mask) \
  111. .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
  112. #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
  113. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  114. CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
  115. #define CAP_MIDR_RANGE_LIST(list) \
  116. .matches = is_affected_midr_range_list, \
  117. .midr_range_list = list
  118. /* Errata affecting a range of revisions of given model variant */
  119. #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
  120. ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
  121. /* Errata affecting a single variant/revision of a model */
  122. #define ERRATA_MIDR_REV(model, var, rev) \
  123. ERRATA_MIDR_RANGE(model, var, rev, var, rev)
  124. /* Errata affecting all variants/revisions of a given a model */
  125. #define ERRATA_MIDR_ALL_VERSIONS(model) \
  126. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  127. CAP_MIDR_ALL_VERSIONS(model)
  128. /* Errata affecting a list of midr ranges, with same work around */
  129. #define ERRATA_MIDR_RANGE_LIST(midr_list) \
  130. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  131. CAP_MIDR_RANGE_LIST(midr_list)
  132. static const __maybe_unused struct midr_range tx2_family_cpus[] = {
  133. MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
  134. MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
  135. {},
  136. };
  137. static bool __maybe_unused
  138. needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
  139. int scope)
  140. {
  141. int i;
  142. if (!is_affected_midr_range_list(entry, scope) ||
  143. !is_hyp_mode_available())
  144. return false;
  145. for_each_possible_cpu(i) {
  146. if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
  147. return true;
  148. }
  149. return false;
  150. }
  151. static bool __maybe_unused
  152. has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
  153. int scope)
  154. {
  155. u32 midr = read_cpuid_id();
  156. bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
  157. const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
  158. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  159. return is_midr_in_range(midr, &range) && has_dic;
  160. }
  161. #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
  162. static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
  163. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
  164. {
  165. ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
  166. },
  167. {
  168. .midr_range.model = MIDR_QCOM_KRYO,
  169. .matches = is_kryo_midr,
  170. },
  171. #endif
  172. #ifdef CONFIG_ARM64_ERRATUM_1286807
  173. {
  174. ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
  175. /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
  176. ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
  177. },
  178. #endif
  179. {},
  180. };
  181. #endif
  182. #ifdef CONFIG_CAVIUM_ERRATUM_27456
  183. const struct midr_range cavium_erratum_27456_cpus[] = {
  184. /* Cavium ThunderX, T88 pass 1.x - 2.1 */
  185. MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
  186. /* Cavium ThunderX, T81 pass 1.0 */
  187. MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
  188. {},
  189. };
  190. #endif
  191. #ifdef CONFIG_CAVIUM_ERRATUM_30115
  192. static const struct midr_range cavium_erratum_30115_cpus[] = {
  193. /* Cavium ThunderX, T88 pass 1.x - 2.2 */
  194. MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
  195. /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
  196. MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
  197. /* Cavium ThunderX, T83 pass 1.0 */
  198. MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
  199. {},
  200. };
  201. #endif
  202. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
  203. static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
  204. {
  205. ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
  206. },
  207. {
  208. .midr_range.model = MIDR_QCOM_KRYO,
  209. .matches = is_kryo_midr,
  210. },
  211. {},
  212. };
  213. #endif
  214. #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
  215. static const struct midr_range workaround_clean_cache[] = {
  216. #if defined(CONFIG_ARM64_ERRATUM_826319) || \
  217. defined(CONFIG_ARM64_ERRATUM_827319) || \
  218. defined(CONFIG_ARM64_ERRATUM_824069)
  219. /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
  220. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
  221. #endif
  222. #ifdef CONFIG_ARM64_ERRATUM_819472
  223. /* Cortex-A53 r0p[01] : ARM errata 819472 */
  224. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
  225. #endif
  226. {},
  227. };
  228. #endif
  229. #ifdef CONFIG_ARM64_ERRATUM_1418040
  230. /*
  231. * - 1188873 affects r0p0 to r2p0
  232. * - 1418040 affects r0p0 to r3p1
  233. */
  234. static const struct midr_range erratum_1418040_list[] = {
  235. /* Cortex-A76 r0p0 to r3p1 */
  236. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
  237. /* Neoverse-N1 r0p0 to r3p1 */
  238. MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
  239. /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
  240. MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
  241. {},
  242. };
  243. #endif
  244. #ifdef CONFIG_ARM64_ERRATUM_845719
  245. static const struct midr_range erratum_845719_list[] = {
  246. /* Cortex-A53 r0p[01234] */
  247. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
  248. /* Brahma-B53 r0p[0] */
  249. MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
  250. /* Kryo2XX Silver rAp4 */
  251. MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
  252. {},
  253. };
  254. #endif
  255. #ifdef CONFIG_ARM64_ERRATUM_843419
  256. static const struct arm64_cpu_capabilities erratum_843419_list[] = {
  257. {
  258. /* Cortex-A53 r0p[01234] */
  259. .matches = is_affected_midr_range,
  260. ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
  261. MIDR_FIXED(0x4, BIT(8)),
  262. },
  263. {
  264. /* Brahma-B53 r0p[0] */
  265. .matches = is_affected_midr_range,
  266. ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
  267. },
  268. {},
  269. };
  270. #endif
  271. #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
  272. static const struct midr_range erratum_speculative_at_list[] = {
  273. #ifdef CONFIG_ARM64_ERRATUM_1165522
  274. /* Cortex A76 r0p0 to r2p0 */
  275. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
  276. #endif
  277. #ifdef CONFIG_ARM64_ERRATUM_1319367
  278. MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
  279. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  280. #endif
  281. #ifdef CONFIG_ARM64_ERRATUM_1530923
  282. /* Cortex A55 r0p0 to r2p0 */
  283. MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
  284. /* Kryo4xx Silver (rdpe => r1p0) */
  285. MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
  286. #endif
  287. {},
  288. };
  289. #endif
  290. #ifdef CONFIG_ARM64_ERRATUM_1463225
  291. static const struct midr_range erratum_1463225[] = {
  292. /* Cortex-A76 r0p0 - r3p1 */
  293. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
  294. /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
  295. MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
  296. {},
  297. };
  298. #endif
  299. #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
  300. static const struct midr_range tsb_flush_fail_cpus[] = {
  301. #ifdef CONFIG_ARM64_ERRATUM_2067961
  302. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
  303. #endif
  304. #ifdef CONFIG_ARM64_ERRATUM_2054223
  305. MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
  306. #endif
  307. {},
  308. };
  309. #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
  310. const struct arm64_cpu_capabilities arm64_errata[] = {
  311. #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
  312. {
  313. .desc = "ARM errata 826319, 827319, 824069, or 819472",
  314. .capability = ARM64_WORKAROUND_CLEAN_CACHE,
  315. ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
  316. .cpu_enable = cpu_enable_cache_maint_trap,
  317. },
  318. #endif
  319. #ifdef CONFIG_ARM64_ERRATUM_832075
  320. {
  321. /* Cortex-A57 r0p0 - r1p2 */
  322. .desc = "ARM erratum 832075",
  323. .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
  324. ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
  325. 0, 0,
  326. 1, 2),
  327. },
  328. #endif
  329. #ifdef CONFIG_ARM64_ERRATUM_834220
  330. {
  331. /* Cortex-A57 r0p0 - r1p2 */
  332. .desc = "ARM erratum 834220",
  333. .capability = ARM64_WORKAROUND_834220,
  334. ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
  335. 0, 0,
  336. 1, 2),
  337. },
  338. #endif
  339. #ifdef CONFIG_ARM64_ERRATUM_843419
  340. {
  341. .desc = "ARM erratum 843419",
  342. .capability = ARM64_WORKAROUND_843419,
  343. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  344. .matches = cpucap_multi_entry_cap_matches,
  345. .match_list = erratum_843419_list,
  346. },
  347. #endif
  348. #ifdef CONFIG_ARM64_ERRATUM_845719
  349. {
  350. .desc = "ARM erratum 845719",
  351. .capability = ARM64_WORKAROUND_845719,
  352. ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
  353. },
  354. #endif
  355. #ifdef CONFIG_CAVIUM_ERRATUM_23154
  356. {
  357. /* Cavium ThunderX, pass 1.x */
  358. .desc = "Cavium erratum 23154",
  359. .capability = ARM64_WORKAROUND_CAVIUM_23154,
  360. ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
  361. },
  362. #endif
  363. #ifdef CONFIG_CAVIUM_ERRATUM_27456
  364. {
  365. .desc = "Cavium erratum 27456",
  366. .capability = ARM64_WORKAROUND_CAVIUM_27456,
  367. ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
  368. },
  369. #endif
  370. #ifdef CONFIG_CAVIUM_ERRATUM_30115
  371. {
  372. .desc = "Cavium erratum 30115",
  373. .capability = ARM64_WORKAROUND_CAVIUM_30115,
  374. ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
  375. },
  376. #endif
  377. {
  378. .desc = "Mismatched cache type (CTR_EL0)",
  379. .capability = ARM64_MISMATCHED_CACHE_TYPE,
  380. .matches = has_mismatched_cache_type,
  381. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  382. .cpu_enable = cpu_enable_trap_ctr_access,
  383. },
  384. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
  385. {
  386. .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
  387. .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
  388. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  389. .matches = cpucap_multi_entry_cap_matches,
  390. .match_list = qcom_erratum_1003_list,
  391. },
  392. #endif
  393. #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
  394. {
  395. .desc = "Qualcomm erratum 1009, or ARM erratum 1286807",
  396. .capability = ARM64_WORKAROUND_REPEAT_TLBI,
  397. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  398. .matches = cpucap_multi_entry_cap_matches,
  399. .match_list = arm64_repeat_tlbi_list,
  400. },
  401. #endif
  402. #ifdef CONFIG_ARM64_ERRATUM_858921
  403. {
  404. /* Cortex-A73 all versions */
  405. .desc = "ARM erratum 858921",
  406. .capability = ARM64_WORKAROUND_858921,
  407. ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
  408. },
  409. #endif
  410. {
  411. .desc = "Spectre-v2",
  412. .capability = ARM64_SPECTRE_V2,
  413. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  414. .matches = has_spectre_v2,
  415. .cpu_enable = spectre_v2_enable_mitigation,
  416. },
  417. #ifdef CONFIG_RANDOMIZE_BASE
  418. {
  419. /* Must come after the Spectre-v2 entry */
  420. .desc = "Spectre-v3a",
  421. .capability = ARM64_SPECTRE_V3A,
  422. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  423. .matches = has_spectre_v3a,
  424. .cpu_enable = spectre_v3a_enable_mitigation,
  425. },
  426. #endif
  427. {
  428. .desc = "Spectre-v4",
  429. .capability = ARM64_SPECTRE_V4,
  430. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  431. .matches = has_spectre_v4,
  432. .cpu_enable = spectre_v4_enable_mitigation,
  433. },
  434. {
  435. .desc = "Spectre-BHB",
  436. .capability = ARM64_SPECTRE_BHB,
  437. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  438. .matches = is_spectre_bhb_affected,
  439. .cpu_enable = spectre_bhb_enable_mitigation,
  440. },
  441. #ifdef CONFIG_ARM64_ERRATUM_1418040
  442. {
  443. .desc = "ARM erratum 1418040",
  444. .capability = ARM64_WORKAROUND_1418040,
  445. ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
  446. /*
  447. * We need to allow affected CPUs to come in late, but
  448. * also need the non-affected CPUs to be able to come
  449. * in at any point in time. Wonderful.
  450. */
  451. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  452. },
  453. #endif
  454. #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
  455. {
  456. .desc = "ARM errata 1165522, 1319367, or 1530923",
  457. .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
  458. ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
  459. },
  460. #endif
  461. #ifdef CONFIG_ARM64_ERRATUM_1463225
  462. {
  463. .desc = "ARM erratum 1463225",
  464. .capability = ARM64_WORKAROUND_1463225,
  465. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  466. .matches = has_cortex_a76_erratum_1463225,
  467. .midr_range_list = erratum_1463225,
  468. },
  469. #endif
  470. #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
  471. {
  472. .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
  473. .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
  474. ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
  475. .matches = needs_tx2_tvm_workaround,
  476. },
  477. {
  478. .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
  479. .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
  480. ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
  481. },
  482. #endif
  483. #ifdef CONFIG_ARM64_ERRATUM_1542419
  484. {
  485. /* we depend on the firmware portion for correctness */
  486. .desc = "ARM erratum 1542419 (kernel portion)",
  487. .capability = ARM64_WORKAROUND_1542419,
  488. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  489. .matches = has_neoverse_n1_erratum_1542419,
  490. .cpu_enable = cpu_enable_trap_ctr_access,
  491. },
  492. #endif
  493. #ifdef CONFIG_ARM64_ERRATUM_1508412
  494. {
  495. /* we depend on the firmware portion for correctness */
  496. .desc = "ARM erratum 1508412 (kernel portion)",
  497. .capability = ARM64_WORKAROUND_1508412,
  498. ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
  499. 0, 0,
  500. 1, 0),
  501. },
  502. #endif
  503. #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
  504. {
  505. .desc = "ARM erratum 2067961 or 2054223",
  506. .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
  507. ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
  508. },
  509. #endif
  510. {
  511. }
  512. };