kryo_arm64_edac.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/edac.h>
  8. #include <linux/of_device.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/smp.h>
  11. #include <linux/cpu.h>
  12. #include <linux/cpu_pm.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/panic_notifier.h>
  15. #include <linux/of_irq.h>
  16. #include <asm/cputype.h>
  17. #include "edac_mc.h"
  18. #include "edac_device.h"
  19. #ifdef CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE
  20. #define ARM64_ERP_PANIC_ON_UE 1
  21. #else
  22. #define ARM64_ERP_PANIC_ON_UE 0
  23. #endif
  24. #define L1_SILVER_BIT 0x0
  25. #define L2_SILVER_BIT 0x1
  26. #define L3_BIT 0x2
  27. #define QCOM_CPU_PART_KRYO4XX_GOLD 0x804
  28. #define QCOM_CPU_PART_KRYO5XX_GOLD 0xD0D
  29. #define QCOM_CPU_PART_A78_GOLD 0xD4B
  30. #define QCOM_CPU_PART_KRYO4XX_SILVER_V1 0x803
  31. #define QCOM_CPU_PART_KRYO4XX_SILVER_V2 0x805
  32. #define QCOM_CPU_PART_KRYO6XX_SILVER_V1 0xD05
  33. #define QCOM_CPU_PART_KRYO6XX_GOLDPLUS 0xD44
  34. #define L1_GOLD_IC_BIT 0x1
  35. #define L1_GOLD_DC_BIT 0x4
  36. #define L2_GOLD_BIT 0x8
  37. #define L2_GOLD_TLB_BIT 0x2
  38. #define L1 0x0
  39. #define L2 0x1
  40. #define L3 0x2
  41. #define EDAC_CPU "kryo_edac"
  42. #define KRYO_ERRXSTATUS_VALID(a) ((a >> 30) & 0x1)
  43. #define KRYO_ERRXSTATUS_UE(a) ((a >> 29) & 0x1)
  44. #define KRYO_ERRXSTATUS_SERR(a) (a & 0xFF)
  45. #define KRYO_ERRXMISC_LVL(a) ((a >> 1) & 0x7)
  46. #define KRYO_ERRXMISC_LVL_GOLD(a) (a & 0xF)
  47. #define KRYO_ERRXMISC_WAY(a) ((a >> 28) & 0xF)
  48. static inline void set_errxctlr_el1(void)
  49. {
  50. u64 val = 0x10f;
  51. asm volatile("msr s3_0_c5_c4_1, %0" : : "r" (val));
  52. }
  53. static inline void set_errxmisc_overflow(void)
  54. {
  55. u64 val = 0x7F7F00000000ULL;
  56. asm volatile("msr s3_0_c5_c5_0, %0" : : "r" (val));
  57. }
  58. static inline void write_errselr_el1(u64 val)
  59. {
  60. asm volatile("msr s3_0_c5_c3_1, %0" : : "r" (val));
  61. }
  62. static inline u64 read_errxstatus_el1(void)
  63. {
  64. u64 val;
  65. asm volatile("mrs %0, s3_0_c5_c4_2" : "=r" (val));
  66. return val;
  67. }
  68. static inline u64 read_errxmisc_el1(void)
  69. {
  70. u64 val;
  71. asm volatile("mrs %0, s3_0_c5_c5_0" : "=r" (val));
  72. return val;
  73. }
  74. static inline void clear_errxstatus_valid(u64 val)
  75. {
  76. asm volatile("msr s3_0_c5_c4_2, %0" : : "r" (val));
  77. }
  78. static void kryo_edac_handle_ce(struct edac_device_ctl_info *edac_dev,
  79. int inst_nr, int block_nr, const char *msg)
  80. {
  81. edac_device_handle_ce(edac_dev, inst_nr, block_nr, msg);
  82. #ifdef CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE
  83. panic("EDAC %s CE: %s\n", edac_dev->ctl_name, msg);
  84. #endif
  85. }
  86. struct errors_edac {
  87. const char * const msg;
  88. void (*func)(struct edac_device_ctl_info *edac_dev,
  89. int inst_nr, int block_nr, const char *msg);
  90. };
  91. static const struct errors_edac errors[] = {
  92. {"Kryo L1 Correctable Error", kryo_edac_handle_ce },
  93. {"Kryo L1 Uncorrectable Error", edac_device_handle_ue },
  94. {"Kryo L2 Correctable Error", kryo_edac_handle_ce },
  95. {"Kryo L2 Uncorrectable Error", edac_device_handle_ue },
  96. {"L3 Correctable Error", kryo_edac_handle_ce },
  97. {"L3 Uncorrectable Error", edac_device_handle_ue },
  98. };
  99. #define KRYO_L1_CE 0
  100. #define KRYO_L1_UE 1
  101. #define KRYO_L2_CE 2
  102. #define KRYO_L2_UE 3
  103. #define KRYO_L3_CE 4
  104. #define KRYO_L3_UE 5
  105. #define DATA_BUF_ERR 0x2
  106. #define CACHE_DATA_ERR 0x6
  107. #define CACHE_TAG_DIRTY_ERR 0x7
  108. #define TLB_PARITY_ERR_DATA 0x8
  109. #define TLB_PARITY_ERR_TAG 0x9
  110. #define BUS_ERROR 0x12
  111. struct erp_drvdata {
  112. struct edac_device_ctl_info *edev_ctl;
  113. struct erp_drvdata __percpu *erp_cpu_drvdata;
  114. struct notifier_block nb_pm;
  115. struct notifier_block nb_panic;
  116. int ppi;
  117. };
  118. static struct erp_drvdata *panic_handler_drvdata;
  119. static DEFINE_SPINLOCK(local_handler_lock);
  120. static void l1_l2_irq_enable(void *info)
  121. {
  122. int irq = *(int *)info;
  123. enable_percpu_irq(irq, irq_get_trigger_type(irq));
  124. }
  125. static void l1_l2_irq_disable(void *info)
  126. {
  127. int irq = *(int *)info;
  128. disable_percpu_irq(irq);
  129. }
  130. static int request_erp_irq(struct platform_device *pdev, const char *propname,
  131. const char *desc, irq_handler_t handler,
  132. void *ed, int percpu)
  133. {
  134. int rc;
  135. struct erp_drvdata *drv = ed;
  136. struct erp_drvdata *temp = NULL;
  137. int irq;
  138. irq = platform_get_irq_byname(pdev, propname);
  139. if (irq < 0) {
  140. pr_err("ARM64 CPU ERP: Could not find <%s> IRQ property. Proceeding anyway.\n",
  141. propname);
  142. goto out;
  143. }
  144. if (!percpu) {
  145. rc = devm_request_threaded_irq(&pdev->dev, irq, NULL,
  146. handler,
  147. IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
  148. desc,
  149. ed);
  150. if (rc) {
  151. pr_err("ARM64 CPU ERP: Failed to request IRQ %d: %d (%s / %s). Proceeding anyway.\n",
  152. irq, rc, propname, desc);
  153. goto out;
  154. }
  155. } else {
  156. drv->erp_cpu_drvdata = alloc_percpu(struct erp_drvdata);
  157. if (!drv->erp_cpu_drvdata) {
  158. pr_err("Failed to allocate percpu erp data\n");
  159. goto out;
  160. }
  161. temp = raw_cpu_ptr(drv->erp_cpu_drvdata);
  162. temp->erp_cpu_drvdata = drv;
  163. rc = request_percpu_irq(irq, handler, desc,
  164. drv->erp_cpu_drvdata);
  165. if (rc) {
  166. pr_err("ARM64 CPU ERP: Failed to request IRQ %d: %d (%s / %s). Proceeding anyway.\n",
  167. irq, rc, propname, desc);
  168. goto out_free;
  169. }
  170. drv->ppi = irq;
  171. on_each_cpu(l1_l2_irq_enable, &irq, 1);
  172. }
  173. return 0;
  174. out_free:
  175. free_percpu(drv->erp_cpu_drvdata);
  176. drv->erp_cpu_drvdata = NULL;
  177. out:
  178. return -EINVAL;
  179. }
  180. static void dump_err_reg(int errorcode, int level, u64 errxstatus, u64 errxmisc,
  181. struct edac_device_ctl_info *edev_ctl)
  182. {
  183. edac_printk(KERN_CRIT, EDAC_CPU, "ERRXSTATUS_EL1: %llx\n", errxstatus);
  184. edac_printk(KERN_CRIT, EDAC_CPU, "ERRXMISC_EL1: %llx\n", errxmisc);
  185. edac_printk(KERN_CRIT, EDAC_CPU, "Cache level: L%d\n", level + 1);
  186. switch (KRYO_ERRXSTATUS_SERR(errxstatus)) {
  187. case DATA_BUF_ERR:
  188. edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from internal data buffer\n");
  189. break;
  190. case CACHE_DATA_ERR:
  191. edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from cache data RAM\n");
  192. break;
  193. case CACHE_TAG_DIRTY_ERR:
  194. edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from cache tag or dirty RAM\n");
  195. break;
  196. case TLB_PARITY_ERR_DATA:
  197. edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB DATA RAM\n");
  198. break;
  199. case TLB_PARITY_ERR_TAG:
  200. edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB TAG RAM\n");
  201. break;
  202. case BUS_ERROR:
  203. edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
  204. break;
  205. }
  206. if (level == L3)
  207. edac_printk(KERN_CRIT, EDAC_CPU,
  208. "Way: %d\n", (int) KRYO_ERRXMISC_WAY(errxmisc));
  209. else
  210. edac_printk(KERN_CRIT, EDAC_CPU,
  211. "Way: %d\n", (int) KRYO_ERRXMISC_WAY(errxmisc) >> 2);
  212. errors[errorcode].func(edev_ctl, smp_processor_id(),
  213. level, errors[errorcode].msg);
  214. }
  215. static void kryo_parse_l1_l2_cache_error(u64 errxstatus, u64 errxmisc,
  216. struct edac_device_ctl_info *edev_ctl, int cpu)
  217. {
  218. int level = 0;
  219. u32 part_num;
  220. part_num = read_cpuid_part_number();
  221. switch (part_num) {
  222. case QCOM_CPU_PART_KRYO4XX_SILVER_V1:
  223. case QCOM_CPU_PART_KRYO4XX_SILVER_V2:
  224. case QCOM_CPU_PART_KRYO6XX_SILVER_V1:
  225. switch (KRYO_ERRXMISC_LVL(errxmisc)) {
  226. case L1_SILVER_BIT:
  227. level = L1;
  228. break;
  229. case L2_SILVER_BIT:
  230. level = L2;
  231. break;
  232. default:
  233. edac_printk(KERN_CRIT, EDAC_CPU,
  234. "silver cpu:%d unknown error location:%llu\n",
  235. cpu, KRYO_ERRXMISC_LVL(errxmisc));
  236. }
  237. break;
  238. case QCOM_CPU_PART_KRYO4XX_GOLD:
  239. case QCOM_CPU_PART_KRYO5XX_GOLD:
  240. case QCOM_CPU_PART_KRYO6XX_GOLDPLUS:
  241. case QCOM_CPU_PART_A78_GOLD:
  242. switch (KRYO_ERRXMISC_LVL_GOLD(errxmisc)) {
  243. case L1_GOLD_DC_BIT:
  244. case L1_GOLD_IC_BIT:
  245. level = L1;
  246. break;
  247. case L2_GOLD_BIT:
  248. case L2_GOLD_TLB_BIT:
  249. level = L2;
  250. break;
  251. default:
  252. edac_printk(KERN_CRIT, EDAC_CPU,
  253. "gold cpu:%d unknown error location:%llu\n",
  254. cpu, KRYO_ERRXMISC_LVL_GOLD(errxmisc));
  255. }
  256. break;
  257. default:
  258. edac_printk(KERN_CRIT, EDAC_CPU,
  259. "Error in matching cpu%d with part num:%u\n",
  260. cpu, part_num);
  261. return;
  262. }
  263. switch (level) {
  264. case L1:
  265. if (KRYO_ERRXSTATUS_UE(errxstatus))
  266. dump_err_reg(KRYO_L1_UE, level, errxstatus, errxmisc,
  267. edev_ctl);
  268. else
  269. dump_err_reg(KRYO_L1_CE, level, errxstatus, errxmisc,
  270. edev_ctl);
  271. break;
  272. case L2:
  273. if (KRYO_ERRXSTATUS_UE(errxstatus))
  274. dump_err_reg(KRYO_L2_UE, level, errxstatus, errxmisc,
  275. edev_ctl);
  276. else
  277. dump_err_reg(KRYO_L2_CE, level, errxstatus, errxmisc,
  278. edev_ctl);
  279. break;
  280. default:
  281. edac_printk(KERN_CRIT, EDAC_CPU, "Unknown KRYO_ERRXMISC_LVL value\n");
  282. }
  283. }
  284. static void kryo_check_l1_l2_ecc(void *info)
  285. {
  286. struct edac_device_ctl_info *edev_ctl = info;
  287. u64 errxstatus = 0;
  288. u64 errxmisc = 0;
  289. int cpu = 0;
  290. unsigned long flags;
  291. spin_lock_irqsave(&local_handler_lock, flags);
  292. write_errselr_el1(0);
  293. errxstatus = read_errxstatus_el1();
  294. cpu = smp_processor_id();
  295. if (KRYO_ERRXSTATUS_VALID(errxstatus)) {
  296. errxmisc = read_errxmisc_el1();
  297. edac_printk(KERN_CRIT, EDAC_CPU,
  298. "Kryo CPU%d detected a L1/L2 cache error, errxstatus = %llx, errxmisc = %llx\n",
  299. cpu, errxstatus, errxmisc);
  300. kryo_parse_l1_l2_cache_error(errxstatus, errxmisc, edev_ctl,
  301. cpu);
  302. clear_errxstatus_valid(errxstatus);
  303. }
  304. spin_unlock_irqrestore(&local_handler_lock, flags);
  305. }
  306. static bool l3_is_bus_error(u64 errxstatus)
  307. {
  308. if (KRYO_ERRXSTATUS_SERR(errxstatus) == BUS_ERROR) {
  309. edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
  310. return true;
  311. }
  312. return false;
  313. }
  314. static void kryo_check_l3_scu_error(struct edac_device_ctl_info *edev_ctl)
  315. {
  316. u64 errxstatus = 0;
  317. u64 errxmisc = 0;
  318. unsigned long flags;
  319. spin_lock_irqsave(&local_handler_lock, flags);
  320. write_errselr_el1(1);
  321. errxstatus = read_errxstatus_el1();
  322. errxmisc = read_errxmisc_el1();
  323. if (KRYO_ERRXSTATUS_VALID(errxstatus) &&
  324. KRYO_ERRXMISC_LVL(errxmisc) == L3_BIT) {
  325. if (l3_is_bus_error(errxstatus)) {
  326. if (edev_ctl->panic_on_ue) {
  327. spin_unlock_irqrestore(&local_handler_lock, flags);
  328. panic("Causing panic due to Bus Error\n");
  329. }
  330. goto unlock;
  331. }
  332. if (KRYO_ERRXSTATUS_UE(errxstatus)) {
  333. edac_printk(KERN_CRIT, EDAC_CPU, "Detected L3 uncorrectable error\n");
  334. dump_err_reg(KRYO_L3_UE, L3, errxstatus, errxmisc,
  335. edev_ctl);
  336. } else {
  337. edac_printk(KERN_CRIT, EDAC_CPU, "Detected L3 correctable error\n");
  338. dump_err_reg(KRYO_L3_CE, L3, errxstatus, errxmisc,
  339. edev_ctl);
  340. }
  341. clear_errxstatus_valid(errxstatus);
  342. }
  343. unlock:
  344. spin_unlock_irqrestore(&local_handler_lock, flags);
  345. }
  346. static int kryo_cpu_panic_notify(struct notifier_block *this,
  347. unsigned long event, void *ptr)
  348. {
  349. struct edac_device_ctl_info *edev_ctl =
  350. panic_handler_drvdata->edev_ctl;
  351. edev_ctl->panic_on_ue = 0;
  352. kryo_check_l3_scu_error(edev_ctl);
  353. kryo_check_l1_l2_ecc(edev_ctl);
  354. return NOTIFY_OK;
  355. }
  356. static irqreturn_t kryo_l1_l2_handler(int irq, void *drvdata)
  357. {
  358. kryo_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
  359. return IRQ_HANDLED;
  360. }
  361. static irqreturn_t kryo_l3_scu_handler(int irq, void *drvdata)
  362. {
  363. struct erp_drvdata *drv = drvdata;
  364. struct edac_device_ctl_info *edev_ctl = drv->edev_ctl;
  365. kryo_check_l3_scu_error(edev_ctl);
  366. return IRQ_HANDLED;
  367. }
  368. static void initialize_registers(void *info)
  369. {
  370. set_errxctlr_el1();
  371. set_errxmisc_overflow();
  372. }
  373. static void init_regs_on_cpu(bool all_cpus)
  374. {
  375. int cpu;
  376. write_errselr_el1(0);
  377. if (all_cpus) {
  378. for_each_possible_cpu(cpu)
  379. smp_call_function_single(cpu, initialize_registers,
  380. NULL, 1);
  381. } else
  382. initialize_registers(NULL);
  383. write_errselr_el1(1);
  384. initialize_registers(NULL);
  385. }
  386. static int kryo_pmu_cpu_pm_notify(struct notifier_block *self,
  387. unsigned long action, void *v)
  388. {
  389. switch (action) {
  390. case CPU_PM_EXIT:
  391. init_regs_on_cpu(false);
  392. kryo_check_l3_scu_error(panic_handler_drvdata->edev_ctl);
  393. kryo_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
  394. break;
  395. }
  396. return NOTIFY_OK;
  397. }
  398. static int kryo_cpu_erp_probe(struct platform_device *pdev)
  399. {
  400. struct device *dev = &pdev->dev;
  401. struct erp_drvdata *drv;
  402. int rc = 0;
  403. int erp_pass = 0;
  404. int num_irqs = 0;
  405. init_regs_on_cpu(true);
  406. drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
  407. if (!drv)
  408. return -ENOMEM;
  409. drv->edev_ctl = edac_device_alloc_ctl_info(0, "cpu",
  410. num_possible_cpus(), "L", 3, 1, NULL, 0,
  411. edac_device_alloc_index());
  412. if (!drv->edev_ctl)
  413. return -ENOMEM;
  414. drv->edev_ctl->dev = dev;
  415. drv->edev_ctl->mod_name = dev_name(dev);
  416. drv->edev_ctl->dev_name = dev_name(dev);
  417. drv->edev_ctl->ctl_name = "cache";
  418. drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
  419. drv->nb_pm.notifier_call = kryo_pmu_cpu_pm_notify;
  420. drv->nb_panic.notifier_call = kryo_cpu_panic_notify;
  421. atomic_notifier_chain_register(&panic_notifier_list,
  422. &drv->nb_panic);
  423. platform_set_drvdata(pdev, drv);
  424. rc = edac_device_add_device(drv->edev_ctl);
  425. if (rc)
  426. goto out_mem;
  427. panic_handler_drvdata = drv;
  428. num_irqs = platform_irq_count(pdev);
  429. if (num_irqs == 0) {
  430. pr_err("KRYO ERP: No irqs found for error reporting\n");
  431. rc = -EINVAL;
  432. goto out_dev;
  433. }
  434. if (num_irqs < 0) {
  435. rc = num_irqs;
  436. goto out_dev;
  437. }
  438. if (!request_erp_irq(pdev, "l1-l2-faultirq",
  439. "KRYO L1-L2 ECC FAULTIRQ",
  440. kryo_l1_l2_handler, drv, 1))
  441. erp_pass++;
  442. if (!request_erp_irq(pdev, "l3-scu-faultirq",
  443. "KRYO L3-SCU ECC FAULTIRQ",
  444. kryo_l3_scu_handler, drv, 0))
  445. erp_pass++;
  446. if (!request_erp_irq(pdev, "l3-c0-scu-faultirq",
  447. "KRYO L3-SCU ECC FAULTIRQ CLUSTER 0",
  448. kryo_l3_scu_handler, drv, 0))
  449. erp_pass++;
  450. if (!request_erp_irq(pdev, "l3-c1-scu-faultirq",
  451. "KRYO L3-SCU ECC FAULTIRQ CLUSTER 1",
  452. kryo_l3_scu_handler, drv, 0))
  453. erp_pass++;
  454. /* Return if none of the IRQ is valid */
  455. if (!erp_pass) {
  456. pr_err("KRYO ERP: Could not request any IRQs. Giving up.\n");
  457. rc = -ENODEV;
  458. goto out_dev;
  459. }
  460. cpu_pm_register_notifier(&(drv->nb_pm));
  461. return 0;
  462. out_dev:
  463. edac_device_del_device(dev);
  464. out_mem:
  465. edac_device_free_ctl_info(drv->edev_ctl);
  466. return rc;
  467. }
  468. static int kryo_cpu_erp_remove(struct platform_device *pdev)
  469. {
  470. struct erp_drvdata *drv = dev_get_drvdata(&pdev->dev);
  471. struct edac_device_ctl_info *edac_ctl = drv->edev_ctl;
  472. if (drv->erp_cpu_drvdata != NULL) {
  473. on_each_cpu(l1_l2_irq_disable, &(drv->ppi), 1);
  474. free_percpu_irq(drv->ppi, drv->erp_cpu_drvdata);
  475. free_percpu(drv->erp_cpu_drvdata);
  476. }
  477. cpu_pm_unregister_notifier(&(drv->nb_pm));
  478. edac_device_del_device(edac_ctl->dev);
  479. edac_device_free_ctl_info(edac_ctl);
  480. return 0;
  481. }
  482. static const struct of_device_id kryo_cpu_erp_match_table[] = {
  483. { .compatible = "arm,arm64-kryo-cpu-erp" },
  484. { }
  485. };
  486. static struct platform_driver kryo_cpu_erp_driver = {
  487. .probe = kryo_cpu_erp_probe,
  488. .remove = kryo_cpu_erp_remove,
  489. .driver = {
  490. .name = "kryo_cpu_cache_erp",
  491. .of_match_table = of_match_ptr(kryo_cpu_erp_match_table),
  492. },
  493. };
  494. module_platform_driver(kryo_cpu_erp_driver);
  495. MODULE_LICENSE("GPL");
  496. MODULE_DESCRIPTION("Kryo EDAC driver");