synopsys_edac.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Synopsys DDR ECC Driver
  4. * This driver is based on ppc4xx_edac.c drivers
  5. *
  6. * Copyright (C) 2012 - 2014 Xilinx, Inc.
  7. */
  8. #include <linux/edac.h>
  9. #include <linux/module.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/of.h>
  13. #include <linux/of_device.h>
  14. #include "edac_module.h"
  15. /* Number of cs_rows needed per memory controller */
  16. #define SYNPS_EDAC_NR_CSROWS 1
  17. /* Number of channels per memory controller */
  18. #define SYNPS_EDAC_NR_CHANS 1
  19. /* Granularity of reported error in bytes */
  20. #define SYNPS_EDAC_ERR_GRAIN 1
  21. #define SYNPS_EDAC_MSG_SIZE 256
  22. #define SYNPS_EDAC_MOD_STRING "synps_edac"
  23. #define SYNPS_EDAC_MOD_VER "1"
  24. /* Synopsys DDR memory controller registers that are relevant to ECC */
  25. #define CTRL_OFST 0x0
  26. #define T_ZQ_OFST 0xA4
  27. /* ECC control register */
  28. #define ECC_CTRL_OFST 0xC4
  29. /* ECC log register */
  30. #define CE_LOG_OFST 0xC8
  31. /* ECC address register */
  32. #define CE_ADDR_OFST 0xCC
  33. /* ECC data[31:0] register */
  34. #define CE_DATA_31_0_OFST 0xD0
  35. /* Uncorrectable error info registers */
  36. #define UE_LOG_OFST 0xDC
  37. #define UE_ADDR_OFST 0xE0
  38. #define UE_DATA_31_0_OFST 0xE4
  39. #define STAT_OFST 0xF0
  40. #define SCRUB_OFST 0xF4
  41. /* Control register bit field definitions */
  42. #define CTRL_BW_MASK 0xC
  43. #define CTRL_BW_SHIFT 2
  44. #define DDRCTL_WDTH_16 1
  45. #define DDRCTL_WDTH_32 0
  46. /* ZQ register bit field definitions */
  47. #define T_ZQ_DDRMODE_MASK 0x2
  48. /* ECC control register bit field definitions */
  49. #define ECC_CTRL_CLR_CE_ERR 0x2
  50. #define ECC_CTRL_CLR_UE_ERR 0x1
  51. /* ECC correctable/uncorrectable error log register definitions */
  52. #define LOG_VALID 0x1
  53. #define CE_LOG_BITPOS_MASK 0xFE
  54. #define CE_LOG_BITPOS_SHIFT 1
  55. /* ECC correctable/uncorrectable error address register definitions */
  56. #define ADDR_COL_MASK 0xFFF
  57. #define ADDR_ROW_MASK 0xFFFF000
  58. #define ADDR_ROW_SHIFT 12
  59. #define ADDR_BANK_MASK 0x70000000
  60. #define ADDR_BANK_SHIFT 28
  61. /* ECC statistic register definitions */
  62. #define STAT_UECNT_MASK 0xFF
  63. #define STAT_CECNT_MASK 0xFF00
  64. #define STAT_CECNT_SHIFT 8
  65. /* ECC scrub register definitions */
  66. #define SCRUB_MODE_MASK 0x7
  67. #define SCRUB_MODE_SECDED 0x4
  68. /* DDR ECC Quirks */
  69. #define DDR_ECC_INTR_SUPPORT BIT(0)
  70. #define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
  71. #define DDR_ECC_INTR_SELF_CLEAR BIT(2)
  72. /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
  73. /* ECC Configuration Registers */
  74. #define ECC_CFG0_OFST 0x70
  75. #define ECC_CFG1_OFST 0x74
  76. /* ECC Status Register */
  77. #define ECC_STAT_OFST 0x78
  78. /* ECC Clear Register */
  79. #define ECC_CLR_OFST 0x7C
  80. /* ECC Error count Register */
  81. #define ECC_ERRCNT_OFST 0x80
  82. /* ECC Corrected Error Address Register */
  83. #define ECC_CEADDR0_OFST 0x84
  84. #define ECC_CEADDR1_OFST 0x88
  85. /* ECC Syndrome Registers */
  86. #define ECC_CSYND0_OFST 0x8C
  87. #define ECC_CSYND1_OFST 0x90
  88. #define ECC_CSYND2_OFST 0x94
  89. /* ECC Bit Mask0 Address Register */
  90. #define ECC_BITMASK0_OFST 0x98
  91. #define ECC_BITMASK1_OFST 0x9C
  92. #define ECC_BITMASK2_OFST 0xA0
  93. /* ECC UnCorrected Error Address Register */
  94. #define ECC_UEADDR0_OFST 0xA4
  95. #define ECC_UEADDR1_OFST 0xA8
  96. /* ECC Syndrome Registers */
  97. #define ECC_UESYND0_OFST 0xAC
  98. #define ECC_UESYND1_OFST 0xB0
  99. #define ECC_UESYND2_OFST 0xB4
  100. /* ECC Poison Address Reg */
  101. #define ECC_POISON0_OFST 0xB8
  102. #define ECC_POISON1_OFST 0xBC
  103. #define ECC_ADDRMAP0_OFFSET 0x200
  104. /* Control register bitfield definitions */
  105. #define ECC_CTRL_BUSWIDTH_MASK 0x3000
  106. #define ECC_CTRL_BUSWIDTH_SHIFT 12
  107. #define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
  108. #define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
  109. /* DDR Control Register width definitions */
  110. #define DDRCTL_EWDTH_16 2
  111. #define DDRCTL_EWDTH_32 1
  112. #define DDRCTL_EWDTH_64 0
  113. /* ECC status register definitions */
  114. #define ECC_STAT_UECNT_MASK 0xF0000
  115. #define ECC_STAT_UECNT_SHIFT 16
  116. #define ECC_STAT_CECNT_MASK 0xF00
  117. #define ECC_STAT_CECNT_SHIFT 8
  118. #define ECC_STAT_BITNUM_MASK 0x7F
  119. /* ECC error count register definitions */
  120. #define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
  121. #define ECC_ERRCNT_UECNT_SHIFT 16
  122. #define ECC_ERRCNT_CECNT_MASK 0xFFFF
  123. /* DDR QOS Interrupt register definitions */
  124. #define DDR_QOS_IRQ_STAT_OFST 0x20200
  125. #define DDR_QOSUE_MASK 0x4
  126. #define DDR_QOSCE_MASK 0x2
  127. #define ECC_CE_UE_INTR_MASK 0x6
  128. #define DDR_QOS_IRQ_EN_OFST 0x20208
  129. #define DDR_QOS_IRQ_DB_OFST 0x2020C
  130. /* DDR QOS Interrupt register definitions */
  131. #define DDR_UE_MASK BIT(9)
  132. #define DDR_CE_MASK BIT(8)
  133. /* ECC Corrected Error Register Mask and Shifts*/
  134. #define ECC_CEADDR0_RW_MASK 0x3FFFF
  135. #define ECC_CEADDR0_RNK_MASK BIT(24)
  136. #define ECC_CEADDR1_BNKGRP_MASK 0x3000000
  137. #define ECC_CEADDR1_BNKNR_MASK 0x70000
  138. #define ECC_CEADDR1_BLKNR_MASK 0xFFF
  139. #define ECC_CEADDR1_BNKGRP_SHIFT 24
  140. #define ECC_CEADDR1_BNKNR_SHIFT 16
  141. /* ECC Poison register shifts */
  142. #define ECC_POISON0_RANK_SHIFT 24
  143. #define ECC_POISON0_RANK_MASK BIT(24)
  144. #define ECC_POISON0_COLUMN_SHIFT 0
  145. #define ECC_POISON0_COLUMN_MASK 0xFFF
  146. #define ECC_POISON1_BG_SHIFT 28
  147. #define ECC_POISON1_BG_MASK 0x30000000
  148. #define ECC_POISON1_BANKNR_SHIFT 24
  149. #define ECC_POISON1_BANKNR_MASK 0x7000000
  150. #define ECC_POISON1_ROW_SHIFT 0
  151. #define ECC_POISON1_ROW_MASK 0x3FFFF
  152. /* DDR Memory type defines */
  153. #define MEM_TYPE_DDR3 0x1
  154. #define MEM_TYPE_LPDDR3 0x8
  155. #define MEM_TYPE_DDR2 0x4
  156. #define MEM_TYPE_DDR4 0x10
  157. #define MEM_TYPE_LPDDR4 0x20
  158. /* DDRC Software control register */
  159. #define DDRC_SWCTL 0x320
  160. /* DDRC ECC CE & UE poison mask */
  161. #define ECC_CEPOISON_MASK 0x3
  162. #define ECC_UEPOISON_MASK 0x1
  163. /* DDRC Device config masks */
  164. #define DDRC_MSTR_CFG_MASK 0xC0000000
  165. #define DDRC_MSTR_CFG_SHIFT 30
  166. #define DDRC_MSTR_CFG_X4_MASK 0x0
  167. #define DDRC_MSTR_CFG_X8_MASK 0x1
  168. #define DDRC_MSTR_CFG_X16_MASK 0x2
  169. #define DDRC_MSTR_CFG_X32_MASK 0x3
  170. #define DDR_MAX_ROW_SHIFT 18
  171. #define DDR_MAX_COL_SHIFT 14
  172. #define DDR_MAX_BANK_SHIFT 3
  173. #define DDR_MAX_BANKGRP_SHIFT 2
  174. #define ROW_MAX_VAL_MASK 0xF
  175. #define COL_MAX_VAL_MASK 0xF
  176. #define BANK_MAX_VAL_MASK 0x1F
  177. #define BANKGRP_MAX_VAL_MASK 0x1F
  178. #define RANK_MAX_VAL_MASK 0x1F
  179. #define ROW_B0_BASE 6
  180. #define ROW_B1_BASE 7
  181. #define ROW_B2_BASE 8
  182. #define ROW_B3_BASE 9
  183. #define ROW_B4_BASE 10
  184. #define ROW_B5_BASE 11
  185. #define ROW_B6_BASE 12
  186. #define ROW_B7_BASE 13
  187. #define ROW_B8_BASE 14
  188. #define ROW_B9_BASE 15
  189. #define ROW_B10_BASE 16
  190. #define ROW_B11_BASE 17
  191. #define ROW_B12_BASE 18
  192. #define ROW_B13_BASE 19
  193. #define ROW_B14_BASE 20
  194. #define ROW_B15_BASE 21
  195. #define ROW_B16_BASE 22
  196. #define ROW_B17_BASE 23
  197. #define COL_B2_BASE 2
  198. #define COL_B3_BASE 3
  199. #define COL_B4_BASE 4
  200. #define COL_B5_BASE 5
  201. #define COL_B6_BASE 6
  202. #define COL_B7_BASE 7
  203. #define COL_B8_BASE 8
  204. #define COL_B9_BASE 9
  205. #define COL_B10_BASE 10
  206. #define COL_B11_BASE 11
  207. #define COL_B12_BASE 12
  208. #define COL_B13_BASE 13
  209. #define BANK_B0_BASE 2
  210. #define BANK_B1_BASE 3
  211. #define BANK_B2_BASE 4
  212. #define BANKGRP_B0_BASE 2
  213. #define BANKGRP_B1_BASE 3
  214. #define RANK_B0_BASE 6
  215. /**
  216. * struct ecc_error_info - ECC error log information.
  217. * @row: Row number.
  218. * @col: Column number.
  219. * @bank: Bank number.
  220. * @bitpos: Bit position.
  221. * @data: Data causing the error.
  222. * @bankgrpnr: Bank group number.
  223. * @blknr: Block number.
  224. */
  225. struct ecc_error_info {
  226. u32 row;
  227. u32 col;
  228. u32 bank;
  229. u32 bitpos;
  230. u32 data;
  231. u32 bankgrpnr;
  232. u32 blknr;
  233. };
  234. /**
  235. * struct synps_ecc_status - ECC status information to report.
  236. * @ce_cnt: Correctable error count.
  237. * @ue_cnt: Uncorrectable error count.
  238. * @ceinfo: Correctable error log information.
  239. * @ueinfo: Uncorrectable error log information.
  240. */
  241. struct synps_ecc_status {
  242. u32 ce_cnt;
  243. u32 ue_cnt;
  244. struct ecc_error_info ceinfo;
  245. struct ecc_error_info ueinfo;
  246. };
  247. /**
  248. * struct synps_edac_priv - DDR memory controller private instance data.
  249. * @baseaddr: Base address of the DDR controller.
  250. * @message: Buffer for framing the event specific info.
  251. * @stat: ECC status information.
  252. * @p_data: Platform data.
  253. * @ce_cnt: Correctable Error count.
  254. * @ue_cnt: Uncorrectable Error count.
  255. * @poison_addr: Data poison address.
  256. * @row_shift: Bit shifts for row bit.
  257. * @col_shift: Bit shifts for column bit.
  258. * @bank_shift: Bit shifts for bank bit.
  259. * @bankgrp_shift: Bit shifts for bank group bit.
  260. * @rank_shift: Bit shifts for rank bit.
  261. */
  262. struct synps_edac_priv {
  263. void __iomem *baseaddr;
  264. char message[SYNPS_EDAC_MSG_SIZE];
  265. struct synps_ecc_status stat;
  266. const struct synps_platform_data *p_data;
  267. u32 ce_cnt;
  268. u32 ue_cnt;
  269. #ifdef CONFIG_EDAC_DEBUG
  270. ulong poison_addr;
  271. u32 row_shift[18];
  272. u32 col_shift[14];
  273. u32 bank_shift[3];
  274. u32 bankgrp_shift[2];
  275. u32 rank_shift[1];
  276. #endif
  277. };
  278. /**
  279. * struct synps_platform_data - synps platform data structure.
  280. * @get_error_info: Get EDAC error info.
  281. * @get_mtype: Get mtype.
  282. * @get_dtype: Get dtype.
  283. * @get_ecc_state: Get ECC state.
  284. * @quirks: To differentiate IPs.
  285. */
  286. struct synps_platform_data {
  287. int (*get_error_info)(struct synps_edac_priv *priv);
  288. enum mem_type (*get_mtype)(const void __iomem *base);
  289. enum dev_type (*get_dtype)(const void __iomem *base);
  290. bool (*get_ecc_state)(void __iomem *base);
  291. int quirks;
  292. };
  293. /**
  294. * zynq_get_error_info - Get the current ECC error info.
  295. * @priv: DDR memory controller private instance data.
  296. *
  297. * Return: one if there is no error, otherwise zero.
  298. */
  299. static int zynq_get_error_info(struct synps_edac_priv *priv)
  300. {
  301. struct synps_ecc_status *p;
  302. u32 regval, clearval = 0;
  303. void __iomem *base;
  304. base = priv->baseaddr;
  305. p = &priv->stat;
  306. regval = readl(base + STAT_OFST);
  307. if (!regval)
  308. return 1;
  309. p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
  310. p->ue_cnt = regval & STAT_UECNT_MASK;
  311. regval = readl(base + CE_LOG_OFST);
  312. if (!(p->ce_cnt && (regval & LOG_VALID)))
  313. goto ue_err;
  314. p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
  315. regval = readl(base + CE_ADDR_OFST);
  316. p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
  317. p->ceinfo.col = regval & ADDR_COL_MASK;
  318. p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
  319. p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
  320. edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
  321. p->ceinfo.data);
  322. clearval = ECC_CTRL_CLR_CE_ERR;
  323. ue_err:
  324. regval = readl(base + UE_LOG_OFST);
  325. if (!(p->ue_cnt && (regval & LOG_VALID)))
  326. goto out;
  327. regval = readl(base + UE_ADDR_OFST);
  328. p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
  329. p->ueinfo.col = regval & ADDR_COL_MASK;
  330. p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
  331. p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
  332. clearval |= ECC_CTRL_CLR_UE_ERR;
  333. out:
  334. writel(clearval, base + ECC_CTRL_OFST);
  335. writel(0x0, base + ECC_CTRL_OFST);
  336. return 0;
  337. }
  338. /**
  339. * zynqmp_get_error_info - Get the current ECC error info.
  340. * @priv: DDR memory controller private instance data.
  341. *
  342. * Return: one if there is no error otherwise returns zero.
  343. */
  344. static int zynqmp_get_error_info(struct synps_edac_priv *priv)
  345. {
  346. struct synps_ecc_status *p;
  347. u32 regval, clearval = 0;
  348. void __iomem *base;
  349. base = priv->baseaddr;
  350. p = &priv->stat;
  351. regval = readl(base + ECC_ERRCNT_OFST);
  352. p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
  353. p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
  354. if (!p->ce_cnt)
  355. goto ue_err;
  356. regval = readl(base + ECC_STAT_OFST);
  357. if (!regval)
  358. return 1;
  359. p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
  360. regval = readl(base + ECC_CEADDR0_OFST);
  361. p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
  362. regval = readl(base + ECC_CEADDR1_OFST);
  363. p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
  364. ECC_CEADDR1_BNKNR_SHIFT;
  365. p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
  366. ECC_CEADDR1_BNKGRP_SHIFT;
  367. p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
  368. p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
  369. edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
  370. readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
  371. readl(base + ECC_CSYND2_OFST));
  372. ue_err:
  373. if (!p->ue_cnt)
  374. goto out;
  375. regval = readl(base + ECC_UEADDR0_OFST);
  376. p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
  377. regval = readl(base + ECC_UEADDR1_OFST);
  378. p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
  379. ECC_CEADDR1_BNKGRP_SHIFT;
  380. p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
  381. ECC_CEADDR1_BNKNR_SHIFT;
  382. p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
  383. p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
  384. out:
  385. clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
  386. clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
  387. writel(clearval, base + ECC_CLR_OFST);
  388. writel(0x0, base + ECC_CLR_OFST);
  389. return 0;
  390. }
  391. /**
  392. * handle_error - Handle Correctable and Uncorrectable errors.
  393. * @mci: EDAC memory controller instance.
  394. * @p: Synopsys ECC status structure.
  395. *
  396. * Handles ECC correctable and uncorrectable errors.
  397. */
  398. static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
  399. {
  400. struct synps_edac_priv *priv = mci->pvt_info;
  401. struct ecc_error_info *pinf;
  402. if (p->ce_cnt) {
  403. pinf = &p->ceinfo;
  404. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  405. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  406. "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
  407. "CE", pinf->row, pinf->bank,
  408. pinf->bankgrpnr, pinf->blknr,
  409. pinf->bitpos, pinf->data);
  410. } else {
  411. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  412. "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
  413. "CE", pinf->row, pinf->bank, pinf->col,
  414. pinf->bitpos, pinf->data);
  415. }
  416. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  417. p->ce_cnt, 0, 0, 0, 0, 0, -1,
  418. priv->message, "");
  419. }
  420. if (p->ue_cnt) {
  421. pinf = &p->ueinfo;
  422. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  423. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  424. "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
  425. "UE", pinf->row, pinf->bank,
  426. pinf->bankgrpnr, pinf->blknr);
  427. } else {
  428. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  429. "DDR ECC error type :%s Row %d Bank %d Col %d ",
  430. "UE", pinf->row, pinf->bank, pinf->col);
  431. }
  432. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  433. p->ue_cnt, 0, 0, 0, 0, 0, -1,
  434. priv->message, "");
  435. }
  436. memset(p, 0, sizeof(*p));
  437. }
  438. static void enable_intr(struct synps_edac_priv *priv)
  439. {
  440. /* Enable UE/CE Interrupts */
  441. if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
  442. writel(DDR_UE_MASK | DDR_CE_MASK,
  443. priv->baseaddr + ECC_CLR_OFST);
  444. else
  445. writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
  446. priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
  447. }
  448. static void disable_intr(struct synps_edac_priv *priv)
  449. {
  450. /* Disable UE/CE Interrupts */
  451. if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
  452. writel(0x0, priv->baseaddr + ECC_CLR_OFST);
  453. else
  454. writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
  455. priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
  456. }
  457. /**
  458. * intr_handler - Interrupt Handler for ECC interrupts.
  459. * @irq: IRQ number.
  460. * @dev_id: Device ID.
  461. *
  462. * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
  463. */
  464. static irqreturn_t intr_handler(int irq, void *dev_id)
  465. {
  466. const struct synps_platform_data *p_data;
  467. struct mem_ctl_info *mci = dev_id;
  468. struct synps_edac_priv *priv;
  469. int status, regval;
  470. priv = mci->pvt_info;
  471. p_data = priv->p_data;
  472. /*
  473. * v3.0 of the controller has the ce/ue bits cleared automatically,
  474. * so this condition does not apply.
  475. */
  476. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
  477. regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
  478. regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
  479. if (!(regval & ECC_CE_UE_INTR_MASK))
  480. return IRQ_NONE;
  481. }
  482. status = p_data->get_error_info(priv);
  483. if (status)
  484. return IRQ_NONE;
  485. priv->ce_cnt += priv->stat.ce_cnt;
  486. priv->ue_cnt += priv->stat.ue_cnt;
  487. handle_error(mci, &priv->stat);
  488. edac_dbg(3, "Total error count CE %d UE %d\n",
  489. priv->ce_cnt, priv->ue_cnt);
  490. /* v3.0 of the controller does not have this register */
  491. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
  492. writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
  493. else
  494. enable_intr(priv);
  495. return IRQ_HANDLED;
  496. }
  497. /**
  498. * check_errors - Check controller for ECC errors.
  499. * @mci: EDAC memory controller instance.
  500. *
  501. * Check and post ECC errors. Called by the polling thread.
  502. */
  503. static void check_errors(struct mem_ctl_info *mci)
  504. {
  505. const struct synps_platform_data *p_data;
  506. struct synps_edac_priv *priv;
  507. int status;
  508. priv = mci->pvt_info;
  509. p_data = priv->p_data;
  510. status = p_data->get_error_info(priv);
  511. if (status)
  512. return;
  513. priv->ce_cnt += priv->stat.ce_cnt;
  514. priv->ue_cnt += priv->stat.ue_cnt;
  515. handle_error(mci, &priv->stat);
  516. edac_dbg(3, "Total error count CE %d UE %d\n",
  517. priv->ce_cnt, priv->ue_cnt);
  518. }
  519. /**
  520. * zynq_get_dtype - Return the controller memory width.
  521. * @base: DDR memory controller base address.
  522. *
  523. * Get the EDAC device type width appropriate for the current controller
  524. * configuration.
  525. *
  526. * Return: a device type width enumeration.
  527. */
  528. static enum dev_type zynq_get_dtype(const void __iomem *base)
  529. {
  530. enum dev_type dt;
  531. u32 width;
  532. width = readl(base + CTRL_OFST);
  533. width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
  534. switch (width) {
  535. case DDRCTL_WDTH_16:
  536. dt = DEV_X2;
  537. break;
  538. case DDRCTL_WDTH_32:
  539. dt = DEV_X4;
  540. break;
  541. default:
  542. dt = DEV_UNKNOWN;
  543. }
  544. return dt;
  545. }
  546. /**
  547. * zynqmp_get_dtype - Return the controller memory width.
  548. * @base: DDR memory controller base address.
  549. *
  550. * Get the EDAC device type width appropriate for the current controller
  551. * configuration.
  552. *
  553. * Return: a device type width enumeration.
  554. */
  555. static enum dev_type zynqmp_get_dtype(const void __iomem *base)
  556. {
  557. enum dev_type dt;
  558. u32 width;
  559. width = readl(base + CTRL_OFST);
  560. width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
  561. switch (width) {
  562. case DDRCTL_EWDTH_16:
  563. dt = DEV_X2;
  564. break;
  565. case DDRCTL_EWDTH_32:
  566. dt = DEV_X4;
  567. break;
  568. case DDRCTL_EWDTH_64:
  569. dt = DEV_X8;
  570. break;
  571. default:
  572. dt = DEV_UNKNOWN;
  573. }
  574. return dt;
  575. }
  576. /**
  577. * zynq_get_ecc_state - Return the controller ECC enable/disable status.
  578. * @base: DDR memory controller base address.
  579. *
  580. * Get the ECC enable/disable status of the controller.
  581. *
  582. * Return: true if enabled, otherwise false.
  583. */
  584. static bool zynq_get_ecc_state(void __iomem *base)
  585. {
  586. enum dev_type dt;
  587. u32 ecctype;
  588. dt = zynq_get_dtype(base);
  589. if (dt == DEV_UNKNOWN)
  590. return false;
  591. ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
  592. if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
  593. return true;
  594. return false;
  595. }
  596. /**
  597. * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
  598. * @base: DDR memory controller base address.
  599. *
  600. * Get the ECC enable/disable status for the controller.
  601. *
  602. * Return: a ECC status boolean i.e true/false - enabled/disabled.
  603. */
  604. static bool zynqmp_get_ecc_state(void __iomem *base)
  605. {
  606. enum dev_type dt;
  607. u32 ecctype;
  608. dt = zynqmp_get_dtype(base);
  609. if (dt == DEV_UNKNOWN)
  610. return false;
  611. ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
  612. if ((ecctype == SCRUB_MODE_SECDED) &&
  613. ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
  614. return true;
  615. return false;
  616. }
  617. /**
  618. * get_memsize - Read the size of the attached memory device.
  619. *
  620. * Return: the memory size in bytes.
  621. */
  622. static u32 get_memsize(void)
  623. {
  624. struct sysinfo inf;
  625. si_meminfo(&inf);
  626. return inf.totalram * inf.mem_unit;
  627. }
  628. /**
  629. * zynq_get_mtype - Return the controller memory type.
  630. * @base: Synopsys ECC status structure.
  631. *
  632. * Get the EDAC memory type appropriate for the current controller
  633. * configuration.
  634. *
  635. * Return: a memory type enumeration.
  636. */
  637. static enum mem_type zynq_get_mtype(const void __iomem *base)
  638. {
  639. enum mem_type mt;
  640. u32 memtype;
  641. memtype = readl(base + T_ZQ_OFST);
  642. if (memtype & T_ZQ_DDRMODE_MASK)
  643. mt = MEM_DDR3;
  644. else
  645. mt = MEM_DDR2;
  646. return mt;
  647. }
  648. /**
  649. * zynqmp_get_mtype - Returns controller memory type.
  650. * @base: Synopsys ECC status structure.
  651. *
  652. * Get the EDAC memory type appropriate for the current controller
  653. * configuration.
  654. *
  655. * Return: a memory type enumeration.
  656. */
  657. static enum mem_type zynqmp_get_mtype(const void __iomem *base)
  658. {
  659. enum mem_type mt;
  660. u32 memtype;
  661. memtype = readl(base + CTRL_OFST);
  662. if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
  663. mt = MEM_DDR3;
  664. else if (memtype & MEM_TYPE_DDR2)
  665. mt = MEM_RDDR2;
  666. else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
  667. mt = MEM_DDR4;
  668. else
  669. mt = MEM_EMPTY;
  670. return mt;
  671. }
  672. /**
  673. * init_csrows - Initialize the csrow data.
  674. * @mci: EDAC memory controller instance.
  675. *
  676. * Initialize the chip select rows associated with the EDAC memory
  677. * controller instance.
  678. */
  679. static void init_csrows(struct mem_ctl_info *mci)
  680. {
  681. struct synps_edac_priv *priv = mci->pvt_info;
  682. const struct synps_platform_data *p_data;
  683. struct csrow_info *csi;
  684. struct dimm_info *dimm;
  685. u32 size, row;
  686. int j;
  687. p_data = priv->p_data;
  688. for (row = 0; row < mci->nr_csrows; row++) {
  689. csi = mci->csrows[row];
  690. size = get_memsize();
  691. for (j = 0; j < csi->nr_channels; j++) {
  692. dimm = csi->channels[j]->dimm;
  693. dimm->edac_mode = EDAC_SECDED;
  694. dimm->mtype = p_data->get_mtype(priv->baseaddr);
  695. dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
  696. dimm->grain = SYNPS_EDAC_ERR_GRAIN;
  697. dimm->dtype = p_data->get_dtype(priv->baseaddr);
  698. }
  699. }
  700. }
  701. /**
  702. * mc_init - Initialize one driver instance.
  703. * @mci: EDAC memory controller instance.
  704. * @pdev: platform device.
  705. *
  706. * Perform initialization of the EDAC memory controller instance and
  707. * related driver-private data associated with the memory controller the
  708. * instance is bound to.
  709. */
  710. static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
  711. {
  712. struct synps_edac_priv *priv;
  713. mci->pdev = &pdev->dev;
  714. priv = mci->pvt_info;
  715. platform_set_drvdata(pdev, mci);
  716. /* Initialize controller capabilities and configuration */
  717. mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
  718. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  719. mci->scrub_cap = SCRUB_HW_SRC;
  720. mci->scrub_mode = SCRUB_NONE;
  721. mci->edac_cap = EDAC_FLAG_SECDED;
  722. mci->ctl_name = "synps_ddr_controller";
  723. mci->dev_name = SYNPS_EDAC_MOD_STRING;
  724. mci->mod_name = SYNPS_EDAC_MOD_VER;
  725. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  726. edac_op_state = EDAC_OPSTATE_INT;
  727. } else {
  728. edac_op_state = EDAC_OPSTATE_POLL;
  729. mci->edac_check = check_errors;
  730. }
  731. mci->ctl_page_to_phys = NULL;
  732. init_csrows(mci);
  733. }
  734. static int setup_irq(struct mem_ctl_info *mci,
  735. struct platform_device *pdev)
  736. {
  737. struct synps_edac_priv *priv = mci->pvt_info;
  738. int ret, irq;
  739. irq = platform_get_irq(pdev, 0);
  740. if (irq < 0) {
  741. edac_printk(KERN_ERR, EDAC_MC,
  742. "No IRQ %d in DT\n", irq);
  743. return irq;
  744. }
  745. ret = devm_request_irq(&pdev->dev, irq, intr_handler,
  746. 0, dev_name(&pdev->dev), mci);
  747. if (ret < 0) {
  748. edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
  749. return ret;
  750. }
  751. enable_intr(priv);
  752. return 0;
  753. }
  754. static const struct synps_platform_data zynq_edac_def = {
  755. .get_error_info = zynq_get_error_info,
  756. .get_mtype = zynq_get_mtype,
  757. .get_dtype = zynq_get_dtype,
  758. .get_ecc_state = zynq_get_ecc_state,
  759. .quirks = 0,
  760. };
  761. static const struct synps_platform_data zynqmp_edac_def = {
  762. .get_error_info = zynqmp_get_error_info,
  763. .get_mtype = zynqmp_get_mtype,
  764. .get_dtype = zynqmp_get_dtype,
  765. .get_ecc_state = zynqmp_get_ecc_state,
  766. .quirks = (DDR_ECC_INTR_SUPPORT
  767. #ifdef CONFIG_EDAC_DEBUG
  768. | DDR_ECC_DATA_POISON_SUPPORT
  769. #endif
  770. ),
  771. };
  772. static const struct synps_platform_data synopsys_edac_def = {
  773. .get_error_info = zynqmp_get_error_info,
  774. .get_mtype = zynqmp_get_mtype,
  775. .get_dtype = zynqmp_get_dtype,
  776. .get_ecc_state = zynqmp_get_ecc_state,
  777. .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
  778. #ifdef CONFIG_EDAC_DEBUG
  779. | DDR_ECC_DATA_POISON_SUPPORT
  780. #endif
  781. ),
  782. };
  783. static const struct of_device_id synps_edac_match[] = {
  784. {
  785. .compatible = "xlnx,zynq-ddrc-a05",
  786. .data = (void *)&zynq_edac_def
  787. },
  788. {
  789. .compatible = "xlnx,zynqmp-ddrc-2.40a",
  790. .data = (void *)&zynqmp_edac_def
  791. },
  792. {
  793. .compatible = "snps,ddrc-3.80a",
  794. .data = (void *)&synopsys_edac_def
  795. },
  796. {
  797. /* end of table */
  798. }
  799. };
  800. MODULE_DEVICE_TABLE(of, synps_edac_match);
  801. #ifdef CONFIG_EDAC_DEBUG
  802. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  803. /**
  804. * ddr_poison_setup - Update poison registers.
  805. * @priv: DDR memory controller private instance data.
  806. *
  807. * Update poison registers as per DDR mapping.
  808. * Return: none.
  809. */
  810. static void ddr_poison_setup(struct synps_edac_priv *priv)
  811. {
  812. int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
  813. int index;
  814. ulong hif_addr = 0;
  815. hif_addr = priv->poison_addr >> 3;
  816. for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
  817. if (priv->row_shift[index])
  818. row |= (((hif_addr >> priv->row_shift[index]) &
  819. BIT(0)) << index);
  820. else
  821. break;
  822. }
  823. for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
  824. if (priv->col_shift[index] || index < 3)
  825. col |= (((hif_addr >> priv->col_shift[index]) &
  826. BIT(0)) << index);
  827. else
  828. break;
  829. }
  830. for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
  831. if (priv->bank_shift[index])
  832. bank |= (((hif_addr >> priv->bank_shift[index]) &
  833. BIT(0)) << index);
  834. else
  835. break;
  836. }
  837. for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
  838. if (priv->bankgrp_shift[index])
  839. bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
  840. & BIT(0)) << index);
  841. else
  842. break;
  843. }
  844. if (priv->rank_shift[0])
  845. rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
  846. regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
  847. regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
  848. writel(regval, priv->baseaddr + ECC_POISON0_OFST);
  849. regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
  850. regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
  851. regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
  852. writel(regval, priv->baseaddr + ECC_POISON1_OFST);
  853. }
  854. static ssize_t inject_data_error_show(struct device *dev,
  855. struct device_attribute *mattr,
  856. char *data)
  857. {
  858. struct mem_ctl_info *mci = to_mci(dev);
  859. struct synps_edac_priv *priv = mci->pvt_info;
  860. return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
  861. "Error injection Address: 0x%lx\n\r",
  862. readl(priv->baseaddr + ECC_POISON0_OFST),
  863. readl(priv->baseaddr + ECC_POISON1_OFST),
  864. priv->poison_addr);
  865. }
  866. static ssize_t inject_data_error_store(struct device *dev,
  867. struct device_attribute *mattr,
  868. const char *data, size_t count)
  869. {
  870. struct mem_ctl_info *mci = to_mci(dev);
  871. struct synps_edac_priv *priv = mci->pvt_info;
  872. if (kstrtoul(data, 0, &priv->poison_addr))
  873. return -EINVAL;
  874. ddr_poison_setup(priv);
  875. return count;
  876. }
  877. static ssize_t inject_data_poison_show(struct device *dev,
  878. struct device_attribute *mattr,
  879. char *data)
  880. {
  881. struct mem_ctl_info *mci = to_mci(dev);
  882. struct synps_edac_priv *priv = mci->pvt_info;
  883. return sprintf(data, "Data Poisoning: %s\n\r",
  884. (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
  885. ? ("Correctable Error") : ("UnCorrectable Error"));
  886. }
  887. static ssize_t inject_data_poison_store(struct device *dev,
  888. struct device_attribute *mattr,
  889. const char *data, size_t count)
  890. {
  891. struct mem_ctl_info *mci = to_mci(dev);
  892. struct synps_edac_priv *priv = mci->pvt_info;
  893. writel(0, priv->baseaddr + DDRC_SWCTL);
  894. if (strncmp(data, "CE", 2) == 0)
  895. writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
  896. else
  897. writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
  898. writel(1, priv->baseaddr + DDRC_SWCTL);
  899. return count;
  900. }
  901. static DEVICE_ATTR_RW(inject_data_error);
  902. static DEVICE_ATTR_RW(inject_data_poison);
  903. static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
  904. {
  905. int rc;
  906. rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
  907. if (rc < 0)
  908. return rc;
  909. rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
  910. if (rc < 0)
  911. return rc;
  912. return 0;
  913. }
  914. static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
  915. {
  916. device_remove_file(&mci->dev, &dev_attr_inject_data_error);
  917. device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
  918. }
  919. static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  920. {
  921. u32 addrmap_row_b2_10;
  922. int index;
  923. priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
  924. priv->row_shift[1] = ((addrmap[5] >> 8) &
  925. ROW_MAX_VAL_MASK) + ROW_B1_BASE;
  926. addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
  927. if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
  928. for (index = 2; index < 11; index++)
  929. priv->row_shift[index] = addrmap_row_b2_10 +
  930. index + ROW_B0_BASE;
  931. } else {
  932. priv->row_shift[2] = (addrmap[9] &
  933. ROW_MAX_VAL_MASK) + ROW_B2_BASE;
  934. priv->row_shift[3] = ((addrmap[9] >> 8) &
  935. ROW_MAX_VAL_MASK) + ROW_B3_BASE;
  936. priv->row_shift[4] = ((addrmap[9] >> 16) &
  937. ROW_MAX_VAL_MASK) + ROW_B4_BASE;
  938. priv->row_shift[5] = ((addrmap[9] >> 24) &
  939. ROW_MAX_VAL_MASK) + ROW_B5_BASE;
  940. priv->row_shift[6] = (addrmap[10] &
  941. ROW_MAX_VAL_MASK) + ROW_B6_BASE;
  942. priv->row_shift[7] = ((addrmap[10] >> 8) &
  943. ROW_MAX_VAL_MASK) + ROW_B7_BASE;
  944. priv->row_shift[8] = ((addrmap[10] >> 16) &
  945. ROW_MAX_VAL_MASK) + ROW_B8_BASE;
  946. priv->row_shift[9] = ((addrmap[10] >> 24) &
  947. ROW_MAX_VAL_MASK) + ROW_B9_BASE;
  948. priv->row_shift[10] = (addrmap[11] &
  949. ROW_MAX_VAL_MASK) + ROW_B10_BASE;
  950. }
  951. priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
  952. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
  953. ROW_MAX_VAL_MASK) + ROW_B11_BASE);
  954. priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
  955. ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
  956. ROW_MAX_VAL_MASK) + ROW_B12_BASE);
  957. priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
  958. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
  959. ROW_MAX_VAL_MASK) + ROW_B13_BASE);
  960. priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
  961. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
  962. ROW_MAX_VAL_MASK) + ROW_B14_BASE);
  963. priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
  964. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
  965. ROW_MAX_VAL_MASK) + ROW_B15_BASE);
  966. priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
  967. ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
  968. ROW_MAX_VAL_MASK) + ROW_B16_BASE);
  969. priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
  970. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
  971. ROW_MAX_VAL_MASK) + ROW_B17_BASE);
  972. }
  973. static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  974. {
  975. u32 width, memtype;
  976. int index;
  977. memtype = readl(priv->baseaddr + CTRL_OFST);
  978. width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
  979. priv->col_shift[0] = 0;
  980. priv->col_shift[1] = 1;
  981. priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
  982. priv->col_shift[3] = ((addrmap[2] >> 8) &
  983. COL_MAX_VAL_MASK) + COL_B3_BASE;
  984. priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
  985. COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
  986. COL_MAX_VAL_MASK) + COL_B4_BASE);
  987. priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
  988. COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
  989. COL_MAX_VAL_MASK) + COL_B5_BASE);
  990. priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
  991. COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
  992. COL_MAX_VAL_MASK) + COL_B6_BASE);
  993. priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
  994. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
  995. COL_MAX_VAL_MASK) + COL_B7_BASE);
  996. priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
  997. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
  998. COL_MAX_VAL_MASK) + COL_B8_BASE);
  999. priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
  1000. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
  1001. COL_MAX_VAL_MASK) + COL_B9_BASE);
  1002. if (width == DDRCTL_EWDTH_64) {
  1003. if (memtype & MEM_TYPE_LPDDR3) {
  1004. priv->col_shift[10] = ((addrmap[4] &
  1005. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1006. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1007. COL_B10_BASE);
  1008. priv->col_shift[11] = (((addrmap[4] >> 8) &
  1009. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1010. (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
  1011. COL_B11_BASE);
  1012. } else {
  1013. priv->col_shift[11] = ((addrmap[4] &
  1014. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1015. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1016. COL_B10_BASE);
  1017. priv->col_shift[13] = (((addrmap[4] >> 8) &
  1018. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1019. (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
  1020. COL_B11_BASE);
  1021. }
  1022. } else if (width == DDRCTL_EWDTH_32) {
  1023. if (memtype & MEM_TYPE_LPDDR3) {
  1024. priv->col_shift[10] = (((addrmap[3] >> 24) &
  1025. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1026. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1027. COL_B9_BASE);
  1028. priv->col_shift[11] = ((addrmap[4] &
  1029. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1030. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1031. COL_B10_BASE);
  1032. } else {
  1033. priv->col_shift[11] = (((addrmap[3] >> 24) &
  1034. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1035. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1036. COL_B9_BASE);
  1037. priv->col_shift[13] = ((addrmap[4] &
  1038. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1039. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1040. COL_B10_BASE);
  1041. }
  1042. } else {
  1043. if (memtype & MEM_TYPE_LPDDR3) {
  1044. priv->col_shift[10] = (((addrmap[3] >> 16) &
  1045. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1046. (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
  1047. COL_B8_BASE);
  1048. priv->col_shift[11] = (((addrmap[3] >> 24) &
  1049. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1050. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1051. COL_B9_BASE);
  1052. priv->col_shift[13] = ((addrmap[4] &
  1053. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1054. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1055. COL_B10_BASE);
  1056. } else {
  1057. priv->col_shift[11] = (((addrmap[3] >> 16) &
  1058. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1059. (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
  1060. COL_B8_BASE);
  1061. priv->col_shift[13] = (((addrmap[3] >> 24) &
  1062. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1063. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1064. COL_B9_BASE);
  1065. }
  1066. }
  1067. if (width) {
  1068. for (index = 9; index > width; index--) {
  1069. priv->col_shift[index] = priv->col_shift[index - width];
  1070. priv->col_shift[index - width] = 0;
  1071. }
  1072. }
  1073. }
  1074. static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1075. {
  1076. priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
  1077. priv->bank_shift[1] = ((addrmap[1] >> 8) &
  1078. BANK_MAX_VAL_MASK) + BANK_B1_BASE;
  1079. priv->bank_shift[2] = (((addrmap[1] >> 16) &
  1080. BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
  1081. (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
  1082. BANK_B2_BASE);
  1083. }
  1084. static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1085. {
  1086. priv->bankgrp_shift[0] = (addrmap[8] &
  1087. BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
  1088. priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
  1089. BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
  1090. & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
  1091. }
  1092. static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1093. {
  1094. priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
  1095. RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
  1096. RANK_MAX_VAL_MASK) + RANK_B0_BASE);
  1097. }
  1098. /**
  1099. * setup_address_map - Set Address Map by querying ADDRMAP registers.
  1100. * @priv: DDR memory controller private instance data.
  1101. *
  1102. * Set Address Map by querying ADDRMAP registers.
  1103. *
  1104. * Return: none.
  1105. */
  1106. static void setup_address_map(struct synps_edac_priv *priv)
  1107. {
  1108. u32 addrmap[12];
  1109. int index;
  1110. for (index = 0; index < 12; index++) {
  1111. u32 addrmap_offset;
  1112. addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
  1113. addrmap[index] = readl(priv->baseaddr + addrmap_offset);
  1114. }
  1115. setup_row_address_map(priv, addrmap);
  1116. setup_column_address_map(priv, addrmap);
  1117. setup_bank_address_map(priv, addrmap);
  1118. setup_bg_address_map(priv, addrmap);
  1119. setup_rank_address_map(priv, addrmap);
  1120. }
  1121. #endif /* CONFIG_EDAC_DEBUG */
  1122. /**
  1123. * mc_probe - Check controller and bind driver.
  1124. * @pdev: platform device.
  1125. *
  1126. * Probe a specific controller instance for binding with the driver.
  1127. *
  1128. * Return: 0 if the controller instance was successfully bound to the
  1129. * driver; otherwise, < 0 on error.
  1130. */
  1131. static int mc_probe(struct platform_device *pdev)
  1132. {
  1133. const struct synps_platform_data *p_data;
  1134. struct edac_mc_layer layers[2];
  1135. struct synps_edac_priv *priv;
  1136. struct mem_ctl_info *mci;
  1137. void __iomem *baseaddr;
  1138. struct resource *res;
  1139. int rc;
  1140. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1141. baseaddr = devm_ioremap_resource(&pdev->dev, res);
  1142. if (IS_ERR(baseaddr))
  1143. return PTR_ERR(baseaddr);
  1144. p_data = of_device_get_match_data(&pdev->dev);
  1145. if (!p_data)
  1146. return -ENODEV;
  1147. if (!p_data->get_ecc_state(baseaddr)) {
  1148. edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
  1149. return -ENXIO;
  1150. }
  1151. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  1152. layers[0].size = SYNPS_EDAC_NR_CSROWS;
  1153. layers[0].is_virt_csrow = true;
  1154. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  1155. layers[1].size = SYNPS_EDAC_NR_CHANS;
  1156. layers[1].is_virt_csrow = false;
  1157. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
  1158. sizeof(struct synps_edac_priv));
  1159. if (!mci) {
  1160. edac_printk(KERN_ERR, EDAC_MC,
  1161. "Failed memory allocation for mc instance\n");
  1162. return -ENOMEM;
  1163. }
  1164. priv = mci->pvt_info;
  1165. priv->baseaddr = baseaddr;
  1166. priv->p_data = p_data;
  1167. mc_init(mci, pdev);
  1168. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  1169. rc = setup_irq(mci, pdev);
  1170. if (rc)
  1171. goto free_edac_mc;
  1172. }
  1173. rc = edac_mc_add_mc(mci);
  1174. if (rc) {
  1175. edac_printk(KERN_ERR, EDAC_MC,
  1176. "Failed to register with EDAC core\n");
  1177. goto free_edac_mc;
  1178. }
  1179. #ifdef CONFIG_EDAC_DEBUG
  1180. if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
  1181. rc = edac_create_sysfs_attributes(mci);
  1182. if (rc) {
  1183. edac_printk(KERN_ERR, EDAC_MC,
  1184. "Failed to create sysfs entries\n");
  1185. goto free_edac_mc;
  1186. }
  1187. }
  1188. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
  1189. setup_address_map(priv);
  1190. #endif
  1191. /*
  1192. * Start capturing the correctable and uncorrectable errors. A write of
  1193. * 0 starts the counters.
  1194. */
  1195. if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
  1196. writel(0x0, baseaddr + ECC_CTRL_OFST);
  1197. return rc;
  1198. free_edac_mc:
  1199. edac_mc_free(mci);
  1200. return rc;
  1201. }
  1202. /**
  1203. * mc_remove - Unbind driver from controller.
  1204. * @pdev: Platform device.
  1205. *
  1206. * Return: Unconditionally 0
  1207. */
  1208. static int mc_remove(struct platform_device *pdev)
  1209. {
  1210. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  1211. struct synps_edac_priv *priv = mci->pvt_info;
  1212. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
  1213. disable_intr(priv);
  1214. #ifdef CONFIG_EDAC_DEBUG
  1215. if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
  1216. edac_remove_sysfs_attributes(mci);
  1217. #endif
  1218. edac_mc_del_mc(&pdev->dev);
  1219. edac_mc_free(mci);
  1220. return 0;
  1221. }
  1222. static struct platform_driver synps_edac_mc_driver = {
  1223. .driver = {
  1224. .name = "synopsys-edac",
  1225. .of_match_table = synps_edac_match,
  1226. },
  1227. .probe = mc_probe,
  1228. .remove = mc_remove,
  1229. };
  1230. module_platform_driver(synps_edac_mc_driver);
  1231. MODULE_AUTHOR("Xilinx Inc");
  1232. MODULE_DESCRIPTION("Synopsys DDR ECC driver");
  1233. MODULE_LICENSE("GPL v2");