armada_xp_edac.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Pengutronix, Jan Luebbe <[email protected]>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/edac.h>
  7. #include <linux/of_platform.h>
  8. #include <asm/hardware/cache-l2x0.h>
  9. #include <asm/hardware/cache-aurora-l2.h>
  10. #include "edac_mc.h"
  11. #include "edac_device.h"
  12. #include "edac_module.h"
  13. /************************ EDAC MC (DDR RAM) ********************************/
  14. #define SDRAM_NUM_CS 4
  15. #define SDRAM_CONFIG_REG 0x0
  16. #define SDRAM_CONFIG_ECC_MASK BIT(18)
  17. #define SDRAM_CONFIG_REGISTERED_MASK BIT(17)
  18. #define SDRAM_CONFIG_BUS_WIDTH_MASK BIT(15)
  19. #define SDRAM_ADDR_CTRL_REG 0x10
  20. #define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs)
  21. #define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs) (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs))
  22. #define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs) BIT(16+cs)
  23. #define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs) (cs*4+2)
  24. #define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs))
  25. #define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs) (cs*4)
  26. #define SDRAM_ADDR_CTRL_STRUCT_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs))
  27. #define SDRAM_ERR_DATA_H_REG 0x40
  28. #define SDRAM_ERR_DATA_L_REG 0x44
  29. #define SDRAM_ERR_RECV_ECC_REG 0x48
  30. #define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff
  31. #define SDRAM_ERR_CALC_ECC_REG 0x4c
  32. #define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8
  33. #define SDRAM_ERR_CALC_ECC_ROW_MASK (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET)
  34. #define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff
  35. #define SDRAM_ERR_ADDR_REG 0x50
  36. #define SDRAM_ERR_ADDR_BANK_OFFSET 23
  37. #define SDRAM_ERR_ADDR_BANK_MASK (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET)
  38. #define SDRAM_ERR_ADDR_COL_OFFSET 8
  39. #define SDRAM_ERR_ADDR_COL_MASK (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET)
  40. #define SDRAM_ERR_ADDR_CS_OFFSET 1
  41. #define SDRAM_ERR_ADDR_CS_MASK (0x3 << SDRAM_ERR_ADDR_CS_OFFSET)
  42. #define SDRAM_ERR_ADDR_TYPE_MASK BIT(0)
  43. #define SDRAM_ERR_CTRL_REG 0x54
  44. #define SDRAM_ERR_CTRL_THR_OFFSET 16
  45. #define SDRAM_ERR_CTRL_THR_MASK (0xff << SDRAM_ERR_CTRL_THR_OFFSET)
  46. #define SDRAM_ERR_CTRL_PROP_MASK BIT(9)
  47. #define SDRAM_ERR_SBE_COUNT_REG 0x58
  48. #define SDRAM_ERR_DBE_COUNT_REG 0x5c
  49. #define SDRAM_ERR_CAUSE_ERR_REG 0xd0
  50. #define SDRAM_ERR_CAUSE_MSG_REG 0xd8
  51. #define SDRAM_ERR_CAUSE_DBE_MASK BIT(1)
  52. #define SDRAM_ERR_CAUSE_SBE_MASK BIT(0)
  53. #define SDRAM_RANK_CTRL_REG 0x1e0
  54. #define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs)
  55. struct axp_mc_drvdata {
  56. void __iomem *base;
  57. /* width in bytes */
  58. unsigned int width;
  59. /* bank interleaving */
  60. bool cs_addr_sel[SDRAM_NUM_CS];
  61. char msg[128];
  62. };
  63. /* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */
  64. static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
  65. uint8_t cs, uint8_t bank, uint16_t row,
  66. uint16_t col)
  67. {
  68. if (drvdata->width == 8) {
  69. /* 64 bit */
  70. if (drvdata->cs_addr_sel[cs])
  71. /* bank interleaved */
  72. return (((row & 0xfff8) << 16) |
  73. ((bank & 0x7) << 16) |
  74. ((row & 0x7) << 13) |
  75. ((col & 0x3ff) << 3));
  76. else
  77. return (((row & 0xffff << 16) |
  78. ((bank & 0x7) << 13) |
  79. ((col & 0x3ff)) << 3));
  80. } else if (drvdata->width == 4) {
  81. /* 32 bit */
  82. if (drvdata->cs_addr_sel[cs])
  83. /* bank interleaved */
  84. return (((row & 0xfff0) << 15) |
  85. ((bank & 0x7) << 16) |
  86. ((row & 0xf) << 12) |
  87. ((col & 0x3ff) << 2));
  88. else
  89. return (((row & 0xffff << 15) |
  90. ((bank & 0x7) << 12) |
  91. ((col & 0x3ff)) << 2));
  92. } else {
  93. /* 16 bit */
  94. if (drvdata->cs_addr_sel[cs])
  95. /* bank interleaved */
  96. return (((row & 0xffe0) << 14) |
  97. ((bank & 0x7) << 16) |
  98. ((row & 0x1f) << 11) |
  99. ((col & 0x3ff) << 1));
  100. else
  101. return (((row & 0xffff << 14) |
  102. ((bank & 0x7) << 11) |
  103. ((col & 0x3ff)) << 1));
  104. }
  105. }
  106. static void axp_mc_check(struct mem_ctl_info *mci)
  107. {
  108. struct axp_mc_drvdata *drvdata = mci->pvt_info;
  109. uint32_t data_h, data_l, recv_ecc, calc_ecc, addr;
  110. uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg;
  111. uint32_t row_val, col_val, bank_val, addr_val;
  112. uint8_t syndrome_val, cs_val;
  113. char *msg = drvdata->msg;
  114. data_h = readl(drvdata->base + SDRAM_ERR_DATA_H_REG);
  115. data_l = readl(drvdata->base + SDRAM_ERR_DATA_L_REG);
  116. recv_ecc = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG);
  117. calc_ecc = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG);
  118. addr = readl(drvdata->base + SDRAM_ERR_ADDR_REG);
  119. cnt_sbe = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
  120. cnt_dbe = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
  121. cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
  122. cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
  123. /* clear cause registers */
  124. writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
  125. drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
  126. writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
  127. drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
  128. /* clear error counter registers */
  129. if (cnt_sbe)
  130. writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
  131. if (cnt_dbe)
  132. writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
  133. if (!cnt_sbe && !cnt_dbe)
  134. return;
  135. if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
  136. if (cnt_sbe)
  137. cnt_sbe--;
  138. else
  139. dev_warn(mci->pdev, "inconsistent SBE count detected\n");
  140. } else {
  141. if (cnt_dbe)
  142. cnt_dbe--;
  143. else
  144. dev_warn(mci->pdev, "inconsistent DBE count detected\n");
  145. }
  146. /* report earlier errors */
  147. if (cnt_sbe)
  148. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  149. cnt_sbe, /* error count */
  150. 0, 0, 0, /* pfn, offset, syndrome */
  151. -1, -1, -1, /* top, mid, low layer */
  152. mci->ctl_name,
  153. "details unavailable (multiple errors)");
  154. if (cnt_dbe)
  155. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  156. cnt_dbe, /* error count */
  157. 0, 0, 0, /* pfn, offset, syndrome */
  158. -1, -1, -1, /* top, mid, low layer */
  159. mci->ctl_name,
  160. "details unavailable (multiple errors)");
  161. /* report details for most recent error */
  162. cs_val = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET;
  163. bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET;
  164. row_val = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET;
  165. col_val = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET;
  166. syndrome_val = (recv_ecc ^ calc_ecc) & 0xff;
  167. addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val,
  168. col_val);
  169. msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */
  170. msg += sprintf(msg, "bank=0x%x ", bank_val); /* 9 chars */
  171. msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */
  172. msg += sprintf(msg, "cs=%d", cs_val); /* 4 chars */
  173. if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
  174. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  175. 1, /* error count */
  176. addr_val >> PAGE_SHIFT,
  177. addr_val & ~PAGE_MASK,
  178. syndrome_val,
  179. cs_val, -1, -1, /* top, mid, low layer */
  180. mci->ctl_name, drvdata->msg);
  181. } else {
  182. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  183. 1, /* error count */
  184. addr_val >> PAGE_SHIFT,
  185. addr_val & ~PAGE_MASK,
  186. syndrome_val,
  187. cs_val, -1, -1, /* top, mid, low layer */
  188. mci->ctl_name, drvdata->msg);
  189. }
  190. }
  191. static void axp_mc_read_config(struct mem_ctl_info *mci)
  192. {
  193. struct axp_mc_drvdata *drvdata = mci->pvt_info;
  194. uint32_t config, addr_ctrl, rank_ctrl;
  195. unsigned int i, cs_struct, cs_size;
  196. struct dimm_info *dimm;
  197. config = readl(drvdata->base + SDRAM_CONFIG_REG);
  198. if (config & SDRAM_CONFIG_BUS_WIDTH_MASK)
  199. /* 64 bit */
  200. drvdata->width = 8;
  201. else
  202. /* 32 bit */
  203. drvdata->width = 4;
  204. addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG);
  205. rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG);
  206. for (i = 0; i < SDRAM_NUM_CS; i++) {
  207. dimm = mci->dimms[i];
  208. if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i)))
  209. continue;
  210. drvdata->cs_addr_sel[i] =
  211. !!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i));
  212. cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i);
  213. cs_size = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) |
  214. ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i)));
  215. switch (cs_size) {
  216. case 0: /* 2GBit */
  217. dimm->nr_pages = 524288;
  218. break;
  219. case 1: /* 256MBit */
  220. dimm->nr_pages = 65536;
  221. break;
  222. case 2: /* 512MBit */
  223. dimm->nr_pages = 131072;
  224. break;
  225. case 3: /* 1GBit */
  226. dimm->nr_pages = 262144;
  227. break;
  228. case 4: /* 4GBit */
  229. dimm->nr_pages = 1048576;
  230. break;
  231. case 5: /* 8GBit */
  232. dimm->nr_pages = 2097152;
  233. break;
  234. }
  235. dimm->grain = 8;
  236. dimm->dtype = cs_struct ? DEV_X16 : DEV_X8;
  237. dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ?
  238. MEM_RDDR3 : MEM_DDR3;
  239. dimm->edac_mode = EDAC_SECDED;
  240. }
  241. }
  242. static const struct of_device_id axp_mc_of_match[] = {
  243. {.compatible = "marvell,armada-xp-sdram-controller",},
  244. {},
  245. };
  246. MODULE_DEVICE_TABLE(of, axp_mc_of_match);
  247. static int axp_mc_probe(struct platform_device *pdev)
  248. {
  249. struct axp_mc_drvdata *drvdata;
  250. struct edac_mc_layer layers[1];
  251. const struct of_device_id *id;
  252. struct mem_ctl_info *mci;
  253. void __iomem *base;
  254. uint32_t config;
  255. base = devm_platform_ioremap_resource(pdev, 0);
  256. if (IS_ERR(base)) {
  257. dev_err(&pdev->dev, "Unable to map regs\n");
  258. return PTR_ERR(base);
  259. }
  260. config = readl(base + SDRAM_CONFIG_REG);
  261. if (!(config & SDRAM_CONFIG_ECC_MASK)) {
  262. dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n");
  263. return -EINVAL;
  264. }
  265. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  266. layers[0].size = SDRAM_NUM_CS;
  267. layers[0].is_virt_csrow = true;
  268. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata));
  269. if (!mci)
  270. return -ENOMEM;
  271. drvdata = mci->pvt_info;
  272. drvdata->base = base;
  273. mci->pdev = &pdev->dev;
  274. platform_set_drvdata(pdev, mci);
  275. id = of_match_device(axp_mc_of_match, &pdev->dev);
  276. mci->edac_check = axp_mc_check;
  277. mci->mtype_cap = MEM_FLAG_DDR3;
  278. mci->edac_cap = EDAC_FLAG_SECDED;
  279. mci->mod_name = pdev->dev.driver->name;
  280. mci->ctl_name = id ? id->compatible : "unknown";
  281. mci->dev_name = dev_name(&pdev->dev);
  282. mci->scrub_mode = SCRUB_NONE;
  283. axp_mc_read_config(mci);
  284. /* These SoCs have a reduced width bus */
  285. if (of_machine_is_compatible("marvell,armada380") ||
  286. of_machine_is_compatible("marvell,armadaxp-98dx3236"))
  287. drvdata->width /= 2;
  288. /* configure SBE threshold */
  289. /* it seems that SBEs are not captured otherwise */
  290. writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG);
  291. /* clear cause registers */
  292. writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
  293. writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
  294. /* clear counter registers */
  295. writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
  296. writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
  297. if (edac_mc_add_mc(mci)) {
  298. edac_mc_free(mci);
  299. return -EINVAL;
  300. }
  301. edac_op_state = EDAC_OPSTATE_POLL;
  302. return 0;
  303. }
  304. static int axp_mc_remove(struct platform_device *pdev)
  305. {
  306. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  307. edac_mc_del_mc(&pdev->dev);
  308. edac_mc_free(mci);
  309. platform_set_drvdata(pdev, NULL);
  310. return 0;
  311. }
  312. static struct platform_driver axp_mc_driver = {
  313. .probe = axp_mc_probe,
  314. .remove = axp_mc_remove,
  315. .driver = {
  316. .name = "armada_xp_mc_edac",
  317. .of_match_table = of_match_ptr(axp_mc_of_match),
  318. },
  319. };
  320. /************************ EDAC Device (L2 Cache) ***************************/
  321. struct aurora_l2_drvdata {
  322. void __iomem *base;
  323. char msg[128];
  324. /* error injection via debugfs */
  325. uint32_t inject_addr;
  326. uint32_t inject_mask;
  327. uint8_t inject_ctl;
  328. struct dentry *debugfs;
  329. };
  330. #ifdef CONFIG_EDAC_DEBUG
  331. static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata)
  332. {
  333. drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK;
  334. drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK;
  335. writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
  336. writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG);
  337. writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
  338. }
  339. #endif
  340. static void aurora_l2_check(struct edac_device_ctl_info *dci)
  341. {
  342. struct aurora_l2_drvdata *drvdata = dci->pvt_info;
  343. uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap;
  344. unsigned int cnt_ce, cnt_ue;
  345. char *msg = drvdata->msg;
  346. size_t size = sizeof(drvdata->msg);
  347. size_t len = 0;
  348. cnt = readl(drvdata->base + AURORA_ERR_CNT_REG);
  349. attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG);
  350. addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG);
  351. way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG);
  352. cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET;
  353. cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET;
  354. /* clear error counter registers */
  355. if (cnt_ce || cnt_ue)
  356. writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
  357. if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID))
  358. goto clear_remaining;
  359. src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF;
  360. if (src <= 3)
  361. len += scnprintf(msg+len, size-len, "src=CPU%d ", src);
  362. else
  363. len += scnprintf(msg+len, size-len, "src=IO ");
  364. txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF;
  365. switch (txn) {
  366. case 0:
  367. len += scnprintf(msg+len, size-len, "txn=Data-Read ");
  368. break;
  369. case 1:
  370. len += scnprintf(msg+len, size-len, "txn=Isn-Read ");
  371. break;
  372. case 2:
  373. len += scnprintf(msg+len, size-len, "txn=Clean-Flush ");
  374. break;
  375. case 3:
  376. len += scnprintf(msg+len, size-len, "txn=Eviction ");
  377. break;
  378. case 4:
  379. len += scnprintf(msg+len, size-len,
  380. "txn=Read-Modify-Write ");
  381. break;
  382. }
  383. err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF;
  384. switch (err) {
  385. case 0:
  386. len += scnprintf(msg+len, size-len, "err=CorrECC ");
  387. break;
  388. case 1:
  389. len += scnprintf(msg+len, size-len, "err=UnCorrECC ");
  390. break;
  391. case 2:
  392. len += scnprintf(msg+len, size-len, "err=TagParity ");
  393. break;
  394. }
  395. len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
  396. len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
  397. len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
  398. /* clear error capture registers */
  399. writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
  400. if (err) {
  401. /* UnCorrECC or TagParity */
  402. if (cnt_ue)
  403. cnt_ue--;
  404. edac_device_handle_ue(dci, 0, 0, drvdata->msg);
  405. } else {
  406. if (cnt_ce)
  407. cnt_ce--;
  408. edac_device_handle_ce(dci, 0, 0, drvdata->msg);
  409. }
  410. clear_remaining:
  411. /* report remaining errors */
  412. while (cnt_ue--)
  413. edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
  414. while (cnt_ce--)
  415. edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
  416. }
  417. static void aurora_l2_poll(struct edac_device_ctl_info *dci)
  418. {
  419. #ifdef CONFIG_EDAC_DEBUG
  420. struct aurora_l2_drvdata *drvdata = dci->pvt_info;
  421. #endif
  422. aurora_l2_check(dci);
  423. #ifdef CONFIG_EDAC_DEBUG
  424. aurora_l2_inject(drvdata);
  425. #endif
  426. }
  427. static const struct of_device_id aurora_l2_of_match[] = {
  428. {.compatible = "marvell,aurora-system-cache",},
  429. {},
  430. };
  431. MODULE_DEVICE_TABLE(of, aurora_l2_of_match);
  432. static int aurora_l2_probe(struct platform_device *pdev)
  433. {
  434. struct aurora_l2_drvdata *drvdata;
  435. struct edac_device_ctl_info *dci;
  436. const struct of_device_id *id;
  437. uint32_t l2x0_aux_ctrl;
  438. void __iomem *base;
  439. base = devm_platform_ioremap_resource(pdev, 0);
  440. if (IS_ERR(base)) {
  441. dev_err(&pdev->dev, "Unable to map regs\n");
  442. return PTR_ERR(base);
  443. }
  444. l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
  445. if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
  446. dev_warn(&pdev->dev, "tag parity is not enabled\n");
  447. if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
  448. dev_warn(&pdev->dev, "data ECC is not enabled\n");
  449. dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
  450. "cpu", 1, "L", 1, 2, NULL, 0, 0);
  451. if (!dci)
  452. return -ENOMEM;
  453. drvdata = dci->pvt_info;
  454. drvdata->base = base;
  455. dci->dev = &pdev->dev;
  456. platform_set_drvdata(pdev, dci);
  457. id = of_match_device(aurora_l2_of_match, &pdev->dev);
  458. dci->edac_check = aurora_l2_poll;
  459. dci->mod_name = pdev->dev.driver->name;
  460. dci->ctl_name = id ? id->compatible : "unknown";
  461. dci->dev_name = dev_name(&pdev->dev);
  462. /* clear registers */
  463. writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
  464. writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
  465. if (edac_device_add_device(dci)) {
  466. edac_device_free_ctl_info(dci);
  467. return -EINVAL;
  468. }
  469. #ifdef CONFIG_EDAC_DEBUG
  470. drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev));
  471. if (drvdata->debugfs) {
  472. edac_debugfs_create_x32("inject_addr", 0644,
  473. drvdata->debugfs,
  474. &drvdata->inject_addr);
  475. edac_debugfs_create_x32("inject_mask", 0644,
  476. drvdata->debugfs,
  477. &drvdata->inject_mask);
  478. edac_debugfs_create_x8("inject_ctl", 0644,
  479. drvdata->debugfs, &drvdata->inject_ctl);
  480. }
  481. #endif
  482. return 0;
  483. }
  484. static int aurora_l2_remove(struct platform_device *pdev)
  485. {
  486. struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
  487. #ifdef CONFIG_EDAC_DEBUG
  488. struct aurora_l2_drvdata *drvdata = dci->pvt_info;
  489. edac_debugfs_remove_recursive(drvdata->debugfs);
  490. #endif
  491. edac_device_del_device(&pdev->dev);
  492. edac_device_free_ctl_info(dci);
  493. platform_set_drvdata(pdev, NULL);
  494. return 0;
  495. }
  496. static struct platform_driver aurora_l2_driver = {
  497. .probe = aurora_l2_probe,
  498. .remove = aurora_l2_remove,
  499. .driver = {
  500. .name = "aurora_l2_edac",
  501. .of_match_table = of_match_ptr(aurora_l2_of_match),
  502. },
  503. };
  504. /************************ Driver registration ******************************/
  505. static struct platform_driver * const drivers[] = {
  506. &axp_mc_driver,
  507. &aurora_l2_driver,
  508. };
  509. static int __init armada_xp_edac_init(void)
  510. {
  511. int res;
  512. /* only polling is supported */
  513. edac_op_state = EDAC_OPSTATE_POLL;
  514. res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  515. if (res)
  516. pr_warn("Armada XP EDAC drivers fail to register\n");
  517. return 0;
  518. }
  519. module_init(armada_xp_edac_init);
  520. static void __exit armada_xp_edac_exit(void)
  521. {
  522. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  523. }
  524. module_exit(armada_xp_edac_exit);
  525. MODULE_LICENSE("GPL v2");
  526. MODULE_AUTHOR("Pengutronix");
  527. MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");