cpu_errata.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Contains CPU specific errata definitions
  4. *
  5. * Copyright (C) 2014 ARM Ltd.
  6. */
  7. #include <linux/arm-smccc.h>
  8. #include <linux/types.h>
  9. #include <linux/cpu.h>
  10. #include <asm/cpu.h>
  11. #include <asm/cputype.h>
  12. #include <asm/cpufeature.h>
  13. #include <asm/kvm_asm.h>
  14. #include <asm/smp_plat.h>
  15. static bool __maybe_unused
  16. is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
  17. {
  18. const struct arm64_midr_revidr *fix;
  19. u32 midr = read_cpuid_id(), revidr;
  20. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  21. if (!is_midr_in_range(midr, &entry->midr_range))
  22. return false;
  23. midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
  24. revidr = read_cpuid(REVIDR_EL1);
  25. for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
  26. if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
  27. return false;
  28. return true;
  29. }
  30. static bool __maybe_unused
  31. is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
  32. int scope)
  33. {
  34. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  35. return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
  36. }
  37. static bool __maybe_unused
  38. is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
  39. {
  40. u32 model;
  41. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  42. model = read_cpuid_id();
  43. model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
  44. MIDR_ARCHITECTURE_MASK;
  45. return model == entry->midr_range.model;
  46. }
  47. static bool
  48. has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
  49. int scope)
  50. {
  51. u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
  52. u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
  53. u64 ctr_raw, ctr_real;
  54. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  55. /*
  56. * We want to make sure that all the CPUs in the system expose
  57. * a consistent CTR_EL0 to make sure that applications behaves
  58. * correctly with migration.
  59. *
  60. * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
  61. *
  62. * 1) It is safe if the system doesn't support IDC, as CPU anyway
  63. * reports IDC = 0, consistent with the rest.
  64. *
  65. * 2) If the system has IDC, it is still safe as we trap CTR_EL0
  66. * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
  67. *
  68. * So, we need to make sure either the raw CTR_EL0 or the effective
  69. * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
  70. */
  71. ctr_raw = read_cpuid_cachetype() & mask;
  72. ctr_real = read_cpuid_effective_cachetype() & mask;
  73. return (ctr_real != sys) && (ctr_raw != sys);
  74. }
  75. static void
  76. cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
  77. {
  78. u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
  79. bool enable_uct_trap = false;
  80. /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
  81. if ((read_cpuid_cachetype() & mask) !=
  82. (arm64_ftr_reg_ctrel0.sys_val & mask))
  83. enable_uct_trap = true;
  84. /* ... or if the system is affected by an erratum */
  85. if (cap->capability == ARM64_WORKAROUND_1542419)
  86. enable_uct_trap = true;
  87. if (enable_uct_trap)
  88. sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
  89. }
  90. #ifdef CONFIG_ARM64_ERRATUM_1463225
  91. static bool
  92. has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
  93. int scope)
  94. {
  95. return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
  96. }
  97. #endif
  98. static void __maybe_unused
  99. cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
  100. {
  101. sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
  102. }
  103. static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
  104. static void __maybe_unused
  105. cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
  106. {
  107. struct arm64_ftr_reg *regp;
  108. regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
  109. if (!regp)
  110. return;
  111. raw_spin_lock(&reg_user_mask_modification);
  112. if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
  113. regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
  114. raw_spin_unlock(&reg_user_mask_modification);
  115. }
  116. #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
  117. .matches = is_affected_midr_range, \
  118. .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
  119. #define CAP_MIDR_ALL_VERSIONS(model) \
  120. .matches = is_affected_midr_range, \
  121. .midr_range = MIDR_ALL_VERSIONS(model)
  122. #define MIDR_FIXED(rev, revidr_mask) \
  123. .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
  124. #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
  125. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  126. CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
  127. #define CAP_MIDR_RANGE_LIST(list) \
  128. .matches = is_affected_midr_range_list, \
  129. .midr_range_list = list
  130. /* Errata affecting a range of revisions of given model variant */
  131. #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
  132. ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
  133. /* Errata affecting a single variant/revision of a model */
  134. #define ERRATA_MIDR_REV(model, var, rev) \
  135. ERRATA_MIDR_RANGE(model, var, rev, var, rev)
  136. /* Errata affecting all variants/revisions of a given a model */
  137. #define ERRATA_MIDR_ALL_VERSIONS(model) \
  138. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  139. CAP_MIDR_ALL_VERSIONS(model)
  140. /* Errata affecting a list of midr ranges, with same work around */
  141. #define ERRATA_MIDR_RANGE_LIST(midr_list) \
  142. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  143. CAP_MIDR_RANGE_LIST(midr_list)
  144. static const __maybe_unused struct midr_range tx2_family_cpus[] = {
  145. MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
  146. MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
  147. {},
  148. };
  149. static bool __maybe_unused
  150. needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
  151. int scope)
  152. {
  153. int i;
  154. if (!is_affected_midr_range_list(entry, scope) ||
  155. !is_hyp_mode_available())
  156. return false;
  157. for_each_possible_cpu(i) {
  158. if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
  159. return true;
  160. }
  161. return false;
  162. }
  163. static bool __maybe_unused
  164. has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
  165. int scope)
  166. {
  167. u32 midr = read_cpuid_id();
  168. bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
  169. const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
  170. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  171. return is_midr_in_range(midr, &range) && has_dic;
  172. }
  173. #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
  174. static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
  175. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
  176. {
  177. ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
  178. },
  179. {
  180. .midr_range.model = MIDR_QCOM_KRYO,
  181. .matches = is_kryo_midr,
  182. },
  183. #endif
  184. #ifdef CONFIG_ARM64_ERRATUM_1286807
  185. {
  186. ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
  187. },
  188. {
  189. /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
  190. ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
  191. },
  192. #endif
  193. #ifdef CONFIG_ARM64_ERRATUM_2441007
  194. {
  195. ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
  196. },
  197. #endif
  198. #ifdef CONFIG_ARM64_ERRATUM_2441009
  199. {
  200. /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
  201. ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
  202. },
  203. #endif
  204. {},
  205. };
  206. #endif
  207. #ifdef CONFIG_CAVIUM_ERRATUM_23154
  208. static const struct midr_range cavium_erratum_23154_cpus[] = {
  209. MIDR_ALL_VERSIONS(MIDR_THUNDERX),
  210. MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
  211. MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
  212. MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
  213. MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
  214. MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
  215. MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
  216. MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
  217. MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
  218. {},
  219. };
  220. #endif
  221. #ifdef CONFIG_CAVIUM_ERRATUM_27456
  222. const struct midr_range cavium_erratum_27456_cpus[] = {
  223. /* Cavium ThunderX, T88 pass 1.x - 2.1 */
  224. MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
  225. /* Cavium ThunderX, T81 pass 1.0 */
  226. MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
  227. {},
  228. };
  229. #endif
  230. #ifdef CONFIG_CAVIUM_ERRATUM_30115
  231. static const struct midr_range cavium_erratum_30115_cpus[] = {
  232. /* Cavium ThunderX, T88 pass 1.x - 2.2 */
  233. MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
  234. /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
  235. MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
  236. /* Cavium ThunderX, T83 pass 1.0 */
  237. MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
  238. {},
  239. };
  240. #endif
  241. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
  242. static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
  243. {
  244. ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
  245. },
  246. {
  247. .midr_range.model = MIDR_QCOM_KRYO,
  248. .matches = is_kryo_midr,
  249. },
  250. {},
  251. };
  252. #endif
  253. #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
  254. static const struct midr_range workaround_clean_cache[] = {
  255. #if defined(CONFIG_ARM64_ERRATUM_826319) || \
  256. defined(CONFIG_ARM64_ERRATUM_827319) || \
  257. defined(CONFIG_ARM64_ERRATUM_824069)
  258. /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
  259. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
  260. #endif
  261. #ifdef CONFIG_ARM64_ERRATUM_819472
  262. /* Cortex-A53 r0p[01] : ARM errata 819472 */
  263. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
  264. #endif
  265. {},
  266. };
  267. #endif
  268. #ifdef CONFIG_ARM64_ERRATUM_1418040
  269. /*
  270. * - 1188873 affects r0p0 to r2p0
  271. * - 1418040 affects r0p0 to r3p1
  272. */
  273. static const struct midr_range erratum_1418040_list[] = {
  274. /* Cortex-A76 r0p0 to r3p1 */
  275. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
  276. /* Neoverse-N1 r0p0 to r3p1 */
  277. MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
  278. /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
  279. MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
  280. {},
  281. };
  282. #endif
  283. #ifdef CONFIG_ARM64_ERRATUM_845719
  284. static const struct midr_range erratum_845719_list[] = {
  285. /* Cortex-A53 r0p[01234] */
  286. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
  287. /* Brahma-B53 r0p[0] */
  288. MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
  289. /* Kryo2XX Silver rAp4 */
  290. MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
  291. {},
  292. };
  293. #endif
  294. #ifdef CONFIG_ARM64_ERRATUM_843419
  295. static const struct arm64_cpu_capabilities erratum_843419_list[] = {
  296. {
  297. /* Cortex-A53 r0p[01234] */
  298. .matches = is_affected_midr_range,
  299. ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
  300. MIDR_FIXED(0x4, BIT(8)),
  301. },
  302. {
  303. /* Brahma-B53 r0p[0] */
  304. .matches = is_affected_midr_range,
  305. ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
  306. },
  307. {},
  308. };
  309. #endif
  310. #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
  311. static const struct midr_range erratum_speculative_at_list[] = {
  312. #ifdef CONFIG_ARM64_ERRATUM_1165522
  313. /* Cortex A76 r0p0 to r2p0 */
  314. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
  315. #endif
  316. #ifdef CONFIG_ARM64_ERRATUM_1319367
  317. MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
  318. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  319. #endif
  320. #ifdef CONFIG_ARM64_ERRATUM_1530923
  321. /* Cortex A55 r0p0 to r2p0 */
  322. MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
  323. /* Kryo4xx Silver (rdpe => r1p0) */
  324. MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
  325. #endif
  326. {},
  327. };
  328. #endif
  329. #ifdef CONFIG_ARM64_ERRATUM_1463225
  330. static const struct midr_range erratum_1463225[] = {
  331. /* Cortex-A76 r0p0 - r3p1 */
  332. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
  333. /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
  334. MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
  335. {},
  336. };
  337. #endif
  338. #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
  339. static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
  340. #ifdef CONFIG_ARM64_ERRATUM_2139208
  341. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
  342. #endif
  343. #ifdef CONFIG_ARM64_ERRATUM_2119858
  344. MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
  345. MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
  346. #endif
  347. {},
  348. };
  349. #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
  350. #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
  351. static const struct midr_range tsb_flush_fail_cpus[] = {
  352. #ifdef CONFIG_ARM64_ERRATUM_2067961
  353. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
  354. #endif
  355. #ifdef CONFIG_ARM64_ERRATUM_2054223
  356. MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
  357. #endif
  358. {},
  359. };
  360. #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
  361. #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
  362. static struct midr_range trbe_write_out_of_range_cpus[] = {
  363. #ifdef CONFIG_ARM64_ERRATUM_2253138
  364. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
  365. #endif
  366. #ifdef CONFIG_ARM64_ERRATUM_2224489
  367. MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
  368. MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
  369. #endif
  370. {},
  371. };
  372. #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
  373. #ifdef CONFIG_ARM64_ERRATUM_1742098
  374. static struct midr_range broken_aarch32_aes[] = {
  375. MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
  376. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  377. {},
  378. };
  379. #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
  380. const struct arm64_cpu_capabilities arm64_errata[] = {
  381. #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
  382. {
  383. .desc = "ARM errata 826319, 827319, 824069, or 819472",
  384. .capability = ARM64_WORKAROUND_CLEAN_CACHE,
  385. ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
  386. .cpu_enable = cpu_enable_cache_maint_trap,
  387. },
  388. #endif
  389. #ifdef CONFIG_ARM64_ERRATUM_832075
  390. {
  391. /* Cortex-A57 r0p0 - r1p2 */
  392. .desc = "ARM erratum 832075",
  393. .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
  394. ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
  395. 0, 0,
  396. 1, 2),
  397. },
  398. #endif
  399. #ifdef CONFIG_ARM64_ERRATUM_834220
  400. {
  401. /* Cortex-A57 r0p0 - r1p2 */
  402. .desc = "ARM erratum 834220",
  403. .capability = ARM64_WORKAROUND_834220,
  404. ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
  405. 0, 0,
  406. 1, 2),
  407. },
  408. #endif
  409. #ifdef CONFIG_ARM64_ERRATUM_843419
  410. {
  411. .desc = "ARM erratum 843419",
  412. .capability = ARM64_WORKAROUND_843419,
  413. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  414. .matches = cpucap_multi_entry_cap_matches,
  415. .match_list = erratum_843419_list,
  416. },
  417. #endif
  418. #ifdef CONFIG_ARM64_ERRATUM_845719
  419. {
  420. .desc = "ARM erratum 845719",
  421. .capability = ARM64_WORKAROUND_845719,
  422. ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
  423. },
  424. #endif
  425. #ifdef CONFIG_CAVIUM_ERRATUM_23154
  426. {
  427. .desc = "Cavium errata 23154 and 38545",
  428. .capability = ARM64_WORKAROUND_CAVIUM_23154,
  429. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  430. ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
  431. },
  432. #endif
  433. #ifdef CONFIG_CAVIUM_ERRATUM_27456
  434. {
  435. .desc = "Cavium erratum 27456",
  436. .capability = ARM64_WORKAROUND_CAVIUM_27456,
  437. ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
  438. },
  439. #endif
  440. #ifdef CONFIG_CAVIUM_ERRATUM_30115
  441. {
  442. .desc = "Cavium erratum 30115",
  443. .capability = ARM64_WORKAROUND_CAVIUM_30115,
  444. ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
  445. },
  446. #endif
  447. {
  448. .desc = "Mismatched cache type (CTR_EL0)",
  449. .capability = ARM64_MISMATCHED_CACHE_TYPE,
  450. .matches = has_mismatched_cache_type,
  451. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  452. .cpu_enable = cpu_enable_trap_ctr_access,
  453. },
  454. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
  455. {
  456. .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
  457. .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
  458. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  459. .matches = cpucap_multi_entry_cap_matches,
  460. .match_list = qcom_erratum_1003_list,
  461. },
  462. #endif
  463. #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
  464. {
  465. .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
  466. .capability = ARM64_WORKAROUND_REPEAT_TLBI,
  467. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  468. .matches = cpucap_multi_entry_cap_matches,
  469. .match_list = arm64_repeat_tlbi_list,
  470. },
  471. #endif
  472. #ifdef CONFIG_ARM64_ERRATUM_858921
  473. {
  474. /* Cortex-A73 all versions */
  475. .desc = "ARM erratum 858921",
  476. .capability = ARM64_WORKAROUND_858921,
  477. ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
  478. },
  479. #endif
  480. {
  481. .desc = "Spectre-v2",
  482. .capability = ARM64_SPECTRE_V2,
  483. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  484. .matches = has_spectre_v2,
  485. .cpu_enable = spectre_v2_enable_mitigation,
  486. },
  487. #ifdef CONFIG_RANDOMIZE_BASE
  488. {
  489. /* Must come after the Spectre-v2 entry */
  490. .desc = "Spectre-v3a",
  491. .capability = ARM64_SPECTRE_V3A,
  492. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  493. .matches = has_spectre_v3a,
  494. .cpu_enable = spectre_v3a_enable_mitigation,
  495. },
  496. #endif
  497. {
  498. .desc = "Spectre-v4",
  499. .capability = ARM64_SPECTRE_V4,
  500. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  501. .matches = has_spectre_v4,
  502. .cpu_enable = spectre_v4_enable_mitigation,
  503. },
  504. {
  505. .desc = "Spectre-BHB",
  506. .capability = ARM64_SPECTRE_BHB,
  507. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  508. .matches = is_spectre_bhb_affected,
  509. .cpu_enable = spectre_bhb_enable_mitigation,
  510. },
  511. #ifdef CONFIG_ARM64_ERRATUM_1418040
  512. {
  513. .desc = "ARM erratum 1418040",
  514. .capability = ARM64_WORKAROUND_1418040,
  515. ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
  516. /*
  517. * We need to allow affected CPUs to come in late, but
  518. * also need the non-affected CPUs to be able to come
  519. * in at any point in time. Wonderful.
  520. */
  521. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  522. },
  523. #endif
  524. #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
  525. {
  526. .desc = "ARM errata 1165522, 1319367, or 1530923",
  527. .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
  528. ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
  529. },
  530. #endif
  531. #ifdef CONFIG_ARM64_ERRATUM_1463225
  532. {
  533. .desc = "ARM erratum 1463225",
  534. .capability = ARM64_WORKAROUND_1463225,
  535. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  536. .matches = has_cortex_a76_erratum_1463225,
  537. .midr_range_list = erratum_1463225,
  538. },
  539. #endif
  540. #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
  541. {
  542. .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
  543. .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
  544. ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
  545. .matches = needs_tx2_tvm_workaround,
  546. },
  547. {
  548. .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
  549. .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
  550. ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
  551. },
  552. #endif
  553. #ifdef CONFIG_ARM64_ERRATUM_1542419
  554. {
  555. /* we depend on the firmware portion for correctness */
  556. .desc = "ARM erratum 1542419 (kernel portion)",
  557. .capability = ARM64_WORKAROUND_1542419,
  558. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  559. .matches = has_neoverse_n1_erratum_1542419,
  560. .cpu_enable = cpu_enable_trap_ctr_access,
  561. },
  562. #endif
  563. #ifdef CONFIG_ARM64_ERRATUM_1508412
  564. {
  565. /* we depend on the firmware portion for correctness */
  566. .desc = "ARM erratum 1508412 (kernel portion)",
  567. .capability = ARM64_WORKAROUND_1508412,
  568. ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
  569. 0, 0,
  570. 1, 0),
  571. },
  572. #endif
  573. #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
  574. {
  575. /* NVIDIA Carmel */
  576. .desc = "NVIDIA Carmel CNP erratum",
  577. .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
  578. ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
  579. },
  580. #endif
  581. #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
  582. {
  583. /*
  584. * The erratum work around is handled within the TRBE
  585. * driver and can be applied per-cpu. So, we can allow
  586. * a late CPU to come online with this erratum.
  587. */
  588. .desc = "ARM erratum 2119858 or 2139208",
  589. .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
  590. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  591. CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
  592. },
  593. #endif
  594. #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
  595. {
  596. .desc = "ARM erratum 2067961 or 2054223",
  597. .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
  598. ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
  599. },
  600. #endif
  601. #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
  602. {
  603. .desc = "ARM erratum 2253138 or 2224489",
  604. .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
  605. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  606. CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
  607. },
  608. #endif
  609. #ifdef CONFIG_ARM64_ERRATUM_2077057
  610. {
  611. .desc = "ARM erratum 2077057",
  612. .capability = ARM64_WORKAROUND_2077057,
  613. ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
  614. },
  615. #endif
  616. #ifdef CONFIG_ARM64_ERRATUM_2064142
  617. {
  618. .desc = "ARM erratum 2064142",
  619. .capability = ARM64_WORKAROUND_2064142,
  620. /* Cortex-A510 r0p0 - r0p2 */
  621. ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
  622. },
  623. #endif
  624. #ifdef CONFIG_ARM64_ERRATUM_2457168
  625. {
  626. .desc = "ARM erratum 2457168",
  627. .capability = ARM64_WORKAROUND_2457168,
  628. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  629. /* Cortex-A510 r0p0-r1p1 */
  630. CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
  631. },
  632. #endif
  633. #ifdef CONFIG_ARM64_ERRATUM_2038923
  634. {
  635. .desc = "ARM erratum 2038923",
  636. .capability = ARM64_WORKAROUND_2038923,
  637. /* Cortex-A510 r0p0 - r0p2 */
  638. ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
  639. },
  640. #endif
  641. #ifdef CONFIG_ARM64_ERRATUM_1902691
  642. {
  643. .desc = "ARM erratum 1902691",
  644. .capability = ARM64_WORKAROUND_1902691,
  645. /* Cortex-A510 r0p0 - r0p1 */
  646. ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
  647. },
  648. #endif
  649. #ifdef CONFIG_ARM64_ERRATUM_1742098
  650. {
  651. .desc = "ARM erratum 1742098",
  652. .capability = ARM64_WORKAROUND_1742098,
  653. CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
  654. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  655. },
  656. #endif
  657. #ifdef CONFIG_ARM64_ERRATUM_2658417
  658. {
  659. .desc = "ARM erratum 2658417",
  660. .capability = ARM64_WORKAROUND_2658417,
  661. /* Cortex-A510 r0p0 - r1p1 */
  662. ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
  663. MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
  664. .cpu_enable = cpu_clear_bf16_from_user_emulation,
  665. },
  666. #endif
  667. #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
  668. {
  669. .desc = "AmpereOne erratum AC03_CPU_38",
  670. .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
  671. ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
  672. },
  673. #endif
  674. {
  675. }
  676. };