igen6_edac.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for Intel client SoC with integrated memory controller using IBECC
  4. *
  5. * Copyright (C) 2020 Intel Corporation
  6. *
  7. * The In-Band ECC (IBECC) IP provides ECC protection to all or specific
  8. * regions of the physical memory space. It's used for memory controllers
  9. * that don't support the out-of-band ECC which often needs an additional
  10. * storage device to each channel for storing ECC data.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/pci.h>
  15. #include <linux/slab.h>
  16. #include <linux/irq_work.h>
  17. #include <linux/llist.h>
  18. #include <linux/genalloc.h>
  19. #include <linux/edac.h>
  20. #include <linux/bits.h>
  21. #include <linux/io.h>
  22. #include <asm/mach_traps.h>
  23. #include <asm/nmi.h>
  24. #include <asm/mce.h>
  25. #include "edac_mc.h"
  26. #include "edac_module.h"
  27. #define IGEN6_REVISION "v2.5.1"
  28. #define EDAC_MOD_STR "igen6_edac"
  29. #define IGEN6_NMI_NAME "igen6_ibecc"
  30. /* Debug macros */
  31. #define igen6_printk(level, fmt, arg...) \
  32. edac_printk(level, "igen6", fmt, ##arg)
  33. #define igen6_mc_printk(mci, level, fmt, arg...) \
  34. edac_mc_chipset_printk(mci, level, "igen6", fmt, ##arg)
  35. #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
  36. #define NUM_IMC 2 /* Max memory controllers */
  37. #define NUM_CHANNELS 2 /* Max channels */
  38. #define NUM_DIMMS 2 /* Max DIMMs per channel */
  39. #define _4GB BIT_ULL(32)
  40. /* Size of physical memory */
  41. #define TOM_OFFSET 0xa0
  42. /* Top of low usable DRAM */
  43. #define TOLUD_OFFSET 0xbc
  44. /* Capability register C */
  45. #define CAPID_C_OFFSET 0xec
  46. #define CAPID_C_IBECC BIT(15)
  47. /* Capability register E */
  48. #define CAPID_E_OFFSET 0xf0
  49. #define CAPID_E_IBECC BIT(12)
  50. /* Error Status */
  51. #define ERRSTS_OFFSET 0xc8
  52. #define ERRSTS_CE BIT_ULL(6)
  53. #define ERRSTS_UE BIT_ULL(7)
  54. /* Error Command */
  55. #define ERRCMD_OFFSET 0xca
  56. #define ERRCMD_CE BIT_ULL(6)
  57. #define ERRCMD_UE BIT_ULL(7)
  58. /* IBECC MMIO base address */
  59. #define IBECC_BASE (res_cfg->ibecc_base)
  60. #define IBECC_ACTIVATE_OFFSET IBECC_BASE
  61. #define IBECC_ACTIVATE_EN BIT(0)
  62. /* IBECC error log */
  63. #define ECC_ERROR_LOG_OFFSET (IBECC_BASE + res_cfg->ibecc_error_log_offset)
  64. #define ECC_ERROR_LOG_CE BIT_ULL(62)
  65. #define ECC_ERROR_LOG_UE BIT_ULL(63)
  66. #define ECC_ERROR_LOG_ADDR_SHIFT 5
  67. #define ECC_ERROR_LOG_ADDR(v) GET_BITFIELD(v, 5, 38)
  68. #define ECC_ERROR_LOG_SYND(v) GET_BITFIELD(v, 46, 61)
  69. /* Host MMIO base address */
  70. #define MCHBAR_OFFSET 0x48
  71. #define MCHBAR_EN BIT_ULL(0)
  72. #define MCHBAR_BASE(v) (GET_BITFIELD(v, 16, 38) << 16)
  73. #define MCHBAR_SIZE 0x10000
  74. /* Parameters for the channel decode stage */
  75. #define IMC_BASE (res_cfg->imc_base)
  76. #define MAD_INTER_CHANNEL_OFFSET IMC_BASE
  77. #define MAD_INTER_CHANNEL_DDR_TYPE(v) GET_BITFIELD(v, 0, 2)
  78. #define MAD_INTER_CHANNEL_ECHM(v) GET_BITFIELD(v, 3, 3)
  79. #define MAD_INTER_CHANNEL_CH_L_MAP(v) GET_BITFIELD(v, 4, 4)
  80. #define MAD_INTER_CHANNEL_CH_S_SIZE(v) ((u64)GET_BITFIELD(v, 12, 19) << 29)
  81. /* Parameters for DRAM decode stage */
  82. #define MAD_INTRA_CH0_OFFSET (IMC_BASE + 4)
  83. #define MAD_INTRA_CH_DIMM_L_MAP(v) GET_BITFIELD(v, 0, 0)
  84. /* DIMM characteristics */
  85. #define MAD_DIMM_CH0_OFFSET (IMC_BASE + 0xc)
  86. #define MAD_DIMM_CH_DIMM_L_SIZE(v) ((u64)GET_BITFIELD(v, 0, 6) << 29)
  87. #define MAD_DIMM_CH_DLW(v) GET_BITFIELD(v, 7, 8)
  88. #define MAD_DIMM_CH_DIMM_S_SIZE(v) ((u64)GET_BITFIELD(v, 16, 22) << 29)
  89. #define MAD_DIMM_CH_DSW(v) GET_BITFIELD(v, 24, 25)
  90. /* Hash for memory controller selection */
  91. #define MAD_MC_HASH_OFFSET (IMC_BASE + 0x1b8)
  92. #define MAC_MC_HASH_LSB(v) GET_BITFIELD(v, 1, 3)
  93. /* Hash for channel selection */
  94. #define CHANNEL_HASH_OFFSET (IMC_BASE + 0x24)
  95. /* Hash for enhanced channel selection */
  96. #define CHANNEL_EHASH_OFFSET (IMC_BASE + 0x28)
  97. #define CHANNEL_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
  98. #define CHANNEL_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
  99. #define CHANNEL_HASH_MODE(v) GET_BITFIELD(v, 28, 28)
  100. /* Parameters for memory slice decode stage */
  101. #define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
  102. #define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
  103. static struct res_config {
  104. bool machine_check;
  105. int num_imc;
  106. u32 imc_base;
  107. u32 cmf_base;
  108. u32 cmf_size;
  109. u32 ms_hash_offset;
  110. u32 ibecc_base;
  111. u32 ibecc_error_log_offset;
  112. bool (*ibecc_available)(struct pci_dev *pdev);
  113. /* Convert error address logged in IBECC to system physical address */
  114. u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc);
  115. /* Convert error address logged in IBECC to integrated memory controller address */
  116. u64 (*err_addr_to_imc_addr)(u64 eaddr, int mc);
  117. } *res_cfg;
  118. struct igen6_imc {
  119. int mc;
  120. struct mem_ctl_info *mci;
  121. struct pci_dev *pdev;
  122. struct device dev;
  123. void __iomem *window;
  124. u64 size;
  125. u64 ch_s_size;
  126. int ch_l_map;
  127. u64 dimm_s_size[NUM_CHANNELS];
  128. u64 dimm_l_size[NUM_CHANNELS];
  129. int dimm_l_map[NUM_CHANNELS];
  130. };
  131. static struct igen6_pvt {
  132. struct igen6_imc imc[NUM_IMC];
  133. u64 ms_hash;
  134. u64 ms_s_size;
  135. int ms_l_map;
  136. } *igen6_pvt;
  137. /* The top of low usable DRAM */
  138. static u32 igen6_tolud;
  139. /* The size of physical memory */
  140. static u64 igen6_tom;
  141. struct decoded_addr {
  142. int mc;
  143. u64 imc_addr;
  144. u64 sys_addr;
  145. int channel_idx;
  146. u64 channel_addr;
  147. int sub_channel_idx;
  148. u64 sub_channel_addr;
  149. };
  150. struct ecclog_node {
  151. struct llist_node llnode;
  152. int mc;
  153. u64 ecclog;
  154. };
  155. /*
  156. * In the NMI handler, the driver uses the lock-less memory allocator
  157. * to allocate memory to store the IBECC error logs and links the logs
  158. * to the lock-less list. Delay printk() and the work of error reporting
  159. * to EDAC core in a worker.
  160. */
  161. #define ECCLOG_POOL_SIZE PAGE_SIZE
  162. static LLIST_HEAD(ecclog_llist);
  163. static struct gen_pool *ecclog_pool;
  164. static char ecclog_buf[ECCLOG_POOL_SIZE];
  165. static struct irq_work ecclog_irq_work;
  166. static struct work_struct ecclog_work;
  167. /* Compute die IDs for Elkhart Lake with IBECC */
  168. #define DID_EHL_SKU5 0x4514
  169. #define DID_EHL_SKU6 0x4528
  170. #define DID_EHL_SKU7 0x452a
  171. #define DID_EHL_SKU8 0x4516
  172. #define DID_EHL_SKU9 0x452c
  173. #define DID_EHL_SKU10 0x452e
  174. #define DID_EHL_SKU11 0x4532
  175. #define DID_EHL_SKU12 0x4518
  176. #define DID_EHL_SKU13 0x451a
  177. #define DID_EHL_SKU14 0x4534
  178. #define DID_EHL_SKU15 0x4536
  179. /* Compute die IDs for ICL-NNPI with IBECC */
  180. #define DID_ICL_SKU8 0x4581
  181. #define DID_ICL_SKU10 0x4585
  182. #define DID_ICL_SKU11 0x4589
  183. #define DID_ICL_SKU12 0x458d
  184. /* Compute die IDs for Tiger Lake with IBECC */
  185. #define DID_TGL_SKU 0x9a14
  186. /* Compute die IDs for Alder Lake with IBECC */
  187. #define DID_ADL_SKU1 0x4601
  188. #define DID_ADL_SKU2 0x4602
  189. #define DID_ADL_SKU3 0x4621
  190. #define DID_ADL_SKU4 0x4641
  191. static bool ehl_ibecc_available(struct pci_dev *pdev)
  192. {
  193. u32 v;
  194. if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v))
  195. return false;
  196. return !!(CAPID_C_IBECC & v);
  197. }
  198. static u64 ehl_err_addr_to_sys_addr(u64 eaddr, int mc)
  199. {
  200. return eaddr;
  201. }
  202. static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc)
  203. {
  204. if (eaddr < igen6_tolud)
  205. return eaddr;
  206. if (igen6_tom <= _4GB)
  207. return eaddr + igen6_tolud - _4GB;
  208. if (eaddr < _4GB)
  209. return eaddr + igen6_tolud - igen6_tom;
  210. return eaddr;
  211. }
  212. static bool icl_ibecc_available(struct pci_dev *pdev)
  213. {
  214. u32 v;
  215. if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v))
  216. return false;
  217. return !(CAPID_C_IBECC & v) &&
  218. (boot_cpu_data.x86_stepping >= 1);
  219. }
  220. static bool tgl_ibecc_available(struct pci_dev *pdev)
  221. {
  222. u32 v;
  223. if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v))
  224. return false;
  225. return !(CAPID_E_IBECC & v);
  226. }
  227. static u64 mem_addr_to_sys_addr(u64 maddr)
  228. {
  229. if (maddr < igen6_tolud)
  230. return maddr;
  231. if (igen6_tom <= _4GB)
  232. return maddr - igen6_tolud + _4GB;
  233. if (maddr < _4GB)
  234. return maddr - igen6_tolud + igen6_tom;
  235. return maddr;
  236. }
  237. static u64 mem_slice_hash(u64 addr, u64 mask, u64 hash_init, int intlv_bit)
  238. {
  239. u64 hash_addr = addr & mask, hash = hash_init;
  240. u64 intlv = (addr >> intlv_bit) & 1;
  241. int i;
  242. for (i = 6; i < 20; i++)
  243. hash ^= (hash_addr >> i) & 1;
  244. return hash ^ intlv;
  245. }
  246. static u64 tgl_err_addr_to_mem_addr(u64 eaddr, int mc)
  247. {
  248. u64 maddr, hash, mask, ms_s_size;
  249. int intlv_bit;
  250. u32 ms_hash;
  251. ms_s_size = igen6_pvt->ms_s_size;
  252. if (eaddr >= ms_s_size)
  253. return eaddr + ms_s_size;
  254. ms_hash = igen6_pvt->ms_hash;
  255. mask = MEM_SLICE_HASH_MASK(ms_hash);
  256. intlv_bit = MEM_SLICE_HASH_LSB_MASK_BIT(ms_hash) + 6;
  257. maddr = GET_BITFIELD(eaddr, intlv_bit, 63) << (intlv_bit + 1) |
  258. GET_BITFIELD(eaddr, 0, intlv_bit - 1);
  259. hash = mem_slice_hash(maddr, mask, mc, intlv_bit);
  260. return maddr | (hash << intlv_bit);
  261. }
  262. static u64 tgl_err_addr_to_sys_addr(u64 eaddr, int mc)
  263. {
  264. u64 maddr = tgl_err_addr_to_mem_addr(eaddr, mc);
  265. return mem_addr_to_sys_addr(maddr);
  266. }
  267. static u64 tgl_err_addr_to_imc_addr(u64 eaddr, int mc)
  268. {
  269. return eaddr;
  270. }
  271. static u64 adl_err_addr_to_sys_addr(u64 eaddr, int mc)
  272. {
  273. return mem_addr_to_sys_addr(eaddr);
  274. }
  275. static u64 adl_err_addr_to_imc_addr(u64 eaddr, int mc)
  276. {
  277. u64 imc_addr, ms_s_size = igen6_pvt->ms_s_size;
  278. struct igen6_imc *imc = &igen6_pvt->imc[mc];
  279. int intlv_bit;
  280. u32 mc_hash;
  281. if (eaddr >= 2 * ms_s_size)
  282. return eaddr - ms_s_size;
  283. mc_hash = readl(imc->window + MAD_MC_HASH_OFFSET);
  284. intlv_bit = MAC_MC_HASH_LSB(mc_hash) + 6;
  285. imc_addr = GET_BITFIELD(eaddr, intlv_bit + 1, 63) << intlv_bit |
  286. GET_BITFIELD(eaddr, 0, intlv_bit - 1);
  287. return imc_addr;
  288. }
  289. static struct res_config ehl_cfg = {
  290. .num_imc = 1,
  291. .imc_base = 0x5000,
  292. .ibecc_base = 0xdc00,
  293. .ibecc_available = ehl_ibecc_available,
  294. .ibecc_error_log_offset = 0x170,
  295. .err_addr_to_sys_addr = ehl_err_addr_to_sys_addr,
  296. .err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
  297. };
  298. static struct res_config icl_cfg = {
  299. .num_imc = 1,
  300. .imc_base = 0x5000,
  301. .ibecc_base = 0xd800,
  302. .ibecc_error_log_offset = 0x170,
  303. .ibecc_available = icl_ibecc_available,
  304. .err_addr_to_sys_addr = ehl_err_addr_to_sys_addr,
  305. .err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
  306. };
  307. static struct res_config tgl_cfg = {
  308. .machine_check = true,
  309. .num_imc = 2,
  310. .imc_base = 0x5000,
  311. .cmf_base = 0x11000,
  312. .cmf_size = 0x800,
  313. .ms_hash_offset = 0xac,
  314. .ibecc_base = 0xd400,
  315. .ibecc_error_log_offset = 0x170,
  316. .ibecc_available = tgl_ibecc_available,
  317. .err_addr_to_sys_addr = tgl_err_addr_to_sys_addr,
  318. .err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
  319. };
  320. static struct res_config adl_cfg = {
  321. .machine_check = true,
  322. .num_imc = 2,
  323. .imc_base = 0xd800,
  324. .ibecc_base = 0xd400,
  325. .ibecc_error_log_offset = 0x68,
  326. .ibecc_available = tgl_ibecc_available,
  327. .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
  328. .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
  329. };
  330. static const struct pci_device_id igen6_pci_tbl[] = {
  331. { PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg },
  332. { PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg },
  333. { PCI_VDEVICE(INTEL, DID_EHL_SKU7), (kernel_ulong_t)&ehl_cfg },
  334. { PCI_VDEVICE(INTEL, DID_EHL_SKU8), (kernel_ulong_t)&ehl_cfg },
  335. { PCI_VDEVICE(INTEL, DID_EHL_SKU9), (kernel_ulong_t)&ehl_cfg },
  336. { PCI_VDEVICE(INTEL, DID_EHL_SKU10), (kernel_ulong_t)&ehl_cfg },
  337. { PCI_VDEVICE(INTEL, DID_EHL_SKU11), (kernel_ulong_t)&ehl_cfg },
  338. { PCI_VDEVICE(INTEL, DID_EHL_SKU12), (kernel_ulong_t)&ehl_cfg },
  339. { PCI_VDEVICE(INTEL, DID_EHL_SKU13), (kernel_ulong_t)&ehl_cfg },
  340. { PCI_VDEVICE(INTEL, DID_EHL_SKU14), (kernel_ulong_t)&ehl_cfg },
  341. { PCI_VDEVICE(INTEL, DID_EHL_SKU15), (kernel_ulong_t)&ehl_cfg },
  342. { PCI_VDEVICE(INTEL, DID_ICL_SKU8), (kernel_ulong_t)&icl_cfg },
  343. { PCI_VDEVICE(INTEL, DID_ICL_SKU10), (kernel_ulong_t)&icl_cfg },
  344. { PCI_VDEVICE(INTEL, DID_ICL_SKU11), (kernel_ulong_t)&icl_cfg },
  345. { PCI_VDEVICE(INTEL, DID_ICL_SKU12), (kernel_ulong_t)&icl_cfg },
  346. { PCI_VDEVICE(INTEL, DID_TGL_SKU), (kernel_ulong_t)&tgl_cfg },
  347. { PCI_VDEVICE(INTEL, DID_ADL_SKU1), (kernel_ulong_t)&adl_cfg },
  348. { PCI_VDEVICE(INTEL, DID_ADL_SKU2), (kernel_ulong_t)&adl_cfg },
  349. { PCI_VDEVICE(INTEL, DID_ADL_SKU3), (kernel_ulong_t)&adl_cfg },
  350. { PCI_VDEVICE(INTEL, DID_ADL_SKU4), (kernel_ulong_t)&adl_cfg },
  351. { },
  352. };
  353. MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
  354. static enum dev_type get_width(int dimm_l, u32 mad_dimm)
  355. {
  356. u32 w = dimm_l ? MAD_DIMM_CH_DLW(mad_dimm) :
  357. MAD_DIMM_CH_DSW(mad_dimm);
  358. switch (w) {
  359. case 0:
  360. return DEV_X8;
  361. case 1:
  362. return DEV_X16;
  363. case 2:
  364. return DEV_X32;
  365. default:
  366. return DEV_UNKNOWN;
  367. }
  368. }
  369. static enum mem_type get_memory_type(u32 mad_inter)
  370. {
  371. u32 t = MAD_INTER_CHANNEL_DDR_TYPE(mad_inter);
  372. switch (t) {
  373. case 0:
  374. return MEM_DDR4;
  375. case 1:
  376. return MEM_DDR3;
  377. case 2:
  378. return MEM_LPDDR3;
  379. case 3:
  380. return MEM_LPDDR4;
  381. case 4:
  382. return MEM_WIO2;
  383. default:
  384. return MEM_UNKNOWN;
  385. }
  386. }
  387. static int decode_chan_idx(u64 addr, u64 mask, int intlv_bit)
  388. {
  389. u64 hash_addr = addr & mask, hash = 0;
  390. u64 intlv = (addr >> intlv_bit) & 1;
  391. int i;
  392. for (i = 6; i < 20; i++)
  393. hash ^= (hash_addr >> i) & 1;
  394. return (int)hash ^ intlv;
  395. }
  396. static u64 decode_channel_addr(u64 addr, int intlv_bit)
  397. {
  398. u64 channel_addr;
  399. /* Remove the interleave bit and shift upper part down to fill gap */
  400. channel_addr = GET_BITFIELD(addr, intlv_bit + 1, 63) << intlv_bit;
  401. channel_addr |= GET_BITFIELD(addr, 0, intlv_bit - 1);
  402. return channel_addr;
  403. }
  404. static void decode_addr(u64 addr, u32 hash, u64 s_size, int l_map,
  405. int *idx, u64 *sub_addr)
  406. {
  407. int intlv_bit = CHANNEL_HASH_LSB_MASK_BIT(hash) + 6;
  408. if (addr > 2 * s_size) {
  409. *sub_addr = addr - s_size;
  410. *idx = l_map;
  411. return;
  412. }
  413. if (CHANNEL_HASH_MODE(hash)) {
  414. *sub_addr = decode_channel_addr(addr, intlv_bit);
  415. *idx = decode_chan_idx(addr, CHANNEL_HASH_MASK(hash), intlv_bit);
  416. } else {
  417. *sub_addr = decode_channel_addr(addr, 6);
  418. *idx = GET_BITFIELD(addr, 6, 6);
  419. }
  420. }
  421. static int igen6_decode(struct decoded_addr *res)
  422. {
  423. struct igen6_imc *imc = &igen6_pvt->imc[res->mc];
  424. u64 addr = res->imc_addr, sub_addr, s_size;
  425. int idx, l_map;
  426. u32 hash;
  427. if (addr >= igen6_tom) {
  428. edac_dbg(0, "Address 0x%llx out of range\n", addr);
  429. return -EINVAL;
  430. }
  431. /* Decode channel */
  432. hash = readl(imc->window + CHANNEL_HASH_OFFSET);
  433. s_size = imc->ch_s_size;
  434. l_map = imc->ch_l_map;
  435. decode_addr(addr, hash, s_size, l_map, &idx, &sub_addr);
  436. res->channel_idx = idx;
  437. res->channel_addr = sub_addr;
  438. /* Decode sub-channel/DIMM */
  439. hash = readl(imc->window + CHANNEL_EHASH_OFFSET);
  440. s_size = imc->dimm_s_size[idx];
  441. l_map = imc->dimm_l_map[idx];
  442. decode_addr(res->channel_addr, hash, s_size, l_map, &idx, &sub_addr);
  443. res->sub_channel_idx = idx;
  444. res->sub_channel_addr = sub_addr;
  445. return 0;
  446. }
  447. static void igen6_output_error(struct decoded_addr *res,
  448. struct mem_ctl_info *mci, u64 ecclog)
  449. {
  450. enum hw_event_mc_err_type type = ecclog & ECC_ERROR_LOG_UE ?
  451. HW_EVENT_ERR_UNCORRECTED :
  452. HW_EVENT_ERR_CORRECTED;
  453. edac_mc_handle_error(type, mci, 1,
  454. res->sys_addr >> PAGE_SHIFT,
  455. res->sys_addr & ~PAGE_MASK,
  456. ECC_ERROR_LOG_SYND(ecclog),
  457. res->channel_idx, res->sub_channel_idx,
  458. -1, "", "");
  459. }
  460. static struct gen_pool *ecclog_gen_pool_create(void)
  461. {
  462. struct gen_pool *pool;
  463. pool = gen_pool_create(ilog2(sizeof(struct ecclog_node)), -1);
  464. if (!pool)
  465. return NULL;
  466. if (gen_pool_add(pool, (unsigned long)ecclog_buf, ECCLOG_POOL_SIZE, -1)) {
  467. gen_pool_destroy(pool);
  468. return NULL;
  469. }
  470. return pool;
  471. }
  472. static int ecclog_gen_pool_add(int mc, u64 ecclog)
  473. {
  474. struct ecclog_node *node;
  475. node = (void *)gen_pool_alloc(ecclog_pool, sizeof(*node));
  476. if (!node)
  477. return -ENOMEM;
  478. node->mc = mc;
  479. node->ecclog = ecclog;
  480. llist_add(&node->llnode, &ecclog_llist);
  481. return 0;
  482. }
  483. /*
  484. * Either the memory-mapped I/O status register ECC_ERROR_LOG or the PCI
  485. * configuration space status register ERRSTS can indicate whether a
  486. * correctable error or an uncorrectable error occurred. We only use the
  487. * ECC_ERROR_LOG register to check error type, but need to clear both
  488. * registers to enable future error events.
  489. */
  490. static u64 ecclog_read_and_clear(struct igen6_imc *imc)
  491. {
  492. u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
  493. if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) {
  494. /* Clear CE/UE bits by writing 1s */
  495. writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
  496. return ecclog;
  497. }
  498. return 0;
  499. }
  500. static void errsts_clear(struct igen6_imc *imc)
  501. {
  502. u16 errsts;
  503. if (pci_read_config_word(imc->pdev, ERRSTS_OFFSET, &errsts)) {
  504. igen6_printk(KERN_ERR, "Failed to read ERRSTS\n");
  505. return;
  506. }
  507. /* Clear CE/UE bits by writing 1s */
  508. if (errsts & (ERRSTS_CE | ERRSTS_UE))
  509. pci_write_config_word(imc->pdev, ERRSTS_OFFSET, errsts);
  510. }
  511. static int errcmd_enable_error_reporting(bool enable)
  512. {
  513. struct igen6_imc *imc = &igen6_pvt->imc[0];
  514. u16 errcmd;
  515. int rc;
  516. rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
  517. if (rc)
  518. return rc;
  519. if (enable)
  520. errcmd |= ERRCMD_CE | ERRSTS_UE;
  521. else
  522. errcmd &= ~(ERRCMD_CE | ERRSTS_UE);
  523. rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
  524. if (rc)
  525. return rc;
  526. return 0;
  527. }
  528. static int ecclog_handler(void)
  529. {
  530. struct igen6_imc *imc;
  531. int i, n = 0;
  532. u64 ecclog;
  533. for (i = 0; i < res_cfg->num_imc; i++) {
  534. imc = &igen6_pvt->imc[i];
  535. /* errsts_clear() isn't NMI-safe. Delay it in the IRQ context */
  536. ecclog = ecclog_read_and_clear(imc);
  537. if (!ecclog)
  538. continue;
  539. if (!ecclog_gen_pool_add(i, ecclog))
  540. irq_work_queue(&ecclog_irq_work);
  541. n++;
  542. }
  543. return n;
  544. }
  545. static void ecclog_work_cb(struct work_struct *work)
  546. {
  547. struct ecclog_node *node, *tmp;
  548. struct mem_ctl_info *mci;
  549. struct llist_node *head;
  550. struct decoded_addr res;
  551. u64 eaddr;
  552. head = llist_del_all(&ecclog_llist);
  553. if (!head)
  554. return;
  555. llist_for_each_entry_safe(node, tmp, head, llnode) {
  556. memset(&res, 0, sizeof(res));
  557. eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) <<
  558. ECC_ERROR_LOG_ADDR_SHIFT;
  559. res.mc = node->mc;
  560. res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr, res.mc);
  561. res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr, res.mc);
  562. mci = igen6_pvt->imc[res.mc].mci;
  563. edac_dbg(2, "MC %d, ecclog = 0x%llx\n", node->mc, node->ecclog);
  564. igen6_mc_printk(mci, KERN_DEBUG, "HANDLING IBECC MEMORY ERROR\n");
  565. igen6_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", res.sys_addr);
  566. if (!igen6_decode(&res))
  567. igen6_output_error(&res, mci, node->ecclog);
  568. gen_pool_free(ecclog_pool, (unsigned long)node, sizeof(*node));
  569. }
  570. }
  571. static void ecclog_irq_work_cb(struct irq_work *irq_work)
  572. {
  573. int i;
  574. for (i = 0; i < res_cfg->num_imc; i++)
  575. errsts_clear(&igen6_pvt->imc[i]);
  576. if (!llist_empty(&ecclog_llist))
  577. schedule_work(&ecclog_work);
  578. }
  579. static int ecclog_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  580. {
  581. unsigned char reason;
  582. if (!ecclog_handler())
  583. return NMI_DONE;
  584. /*
  585. * Both In-Band ECC correctable error and uncorrectable error are
  586. * reported by SERR# NMI. The NMI generic code (see pci_serr_error())
  587. * doesn't clear the bit NMI_REASON_CLEAR_SERR (in port 0x61) to
  588. * re-enable the SERR# NMI after NMI handling. So clear this bit here
  589. * to re-enable SERR# NMI for receiving future In-Band ECC errors.
  590. */
  591. reason = x86_platform.get_nmi_reason() & NMI_REASON_CLEAR_MASK;
  592. reason |= NMI_REASON_CLEAR_SERR;
  593. outb(reason, NMI_REASON_PORT);
  594. reason &= ~NMI_REASON_CLEAR_SERR;
  595. outb(reason, NMI_REASON_PORT);
  596. return NMI_HANDLED;
  597. }
  598. static int ecclog_mce_handler(struct notifier_block *nb, unsigned long val,
  599. void *data)
  600. {
  601. struct mce *mce = (struct mce *)data;
  602. char *type;
  603. if (mce->kflags & MCE_HANDLED_CEC)
  604. return NOTIFY_DONE;
  605. /*
  606. * Ignore unless this is a memory related error.
  607. * We don't check the bit MCI_STATUS_ADDRV of MCi_STATUS here,
  608. * since this bit isn't set on some CPU (e.g., Tiger Lake UP3).
  609. */
  610. if ((mce->status & 0xefff) >> 7 != 1)
  611. return NOTIFY_DONE;
  612. if (mce->mcgstatus & MCG_STATUS_MCIP)
  613. type = "Exception";
  614. else
  615. type = "Event";
  616. edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
  617. mce->extcpu, type, mce->mcgstatus,
  618. mce->bank, mce->status);
  619. edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
  620. edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
  621. edac_dbg(0, "MISC 0x%llx\n", mce->misc);
  622. edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
  623. mce->cpuvendor, mce->cpuid, mce->time,
  624. mce->socketid, mce->apicid);
  625. /*
  626. * We just use the Machine Check for the memory error notification.
  627. * Each memory controller is associated with an IBECC instance.
  628. * Directly read and clear the error information(error address and
  629. * error type) on all the IBECC instances so that we know on which
  630. * memory controller the memory error(s) occurred.
  631. */
  632. if (!ecclog_handler())
  633. return NOTIFY_DONE;
  634. mce->kflags |= MCE_HANDLED_EDAC;
  635. return NOTIFY_DONE;
  636. }
  637. static struct notifier_block ecclog_mce_dec = {
  638. .notifier_call = ecclog_mce_handler,
  639. .priority = MCE_PRIO_EDAC,
  640. };
  641. static bool igen6_check_ecc(struct igen6_imc *imc)
  642. {
  643. u32 activate = readl(imc->window + IBECC_ACTIVATE_OFFSET);
  644. return !!(activate & IBECC_ACTIVATE_EN);
  645. }
  646. static int igen6_get_dimm_config(struct mem_ctl_info *mci)
  647. {
  648. struct igen6_imc *imc = mci->pvt_info;
  649. u32 mad_inter, mad_intra, mad_dimm;
  650. int i, j, ndimms, mc = imc->mc;
  651. struct dimm_info *dimm;
  652. enum mem_type mtype;
  653. enum dev_type dtype;
  654. u64 dsize;
  655. bool ecc;
  656. edac_dbg(2, "\n");
  657. mad_inter = readl(imc->window + MAD_INTER_CHANNEL_OFFSET);
  658. mtype = get_memory_type(mad_inter);
  659. ecc = igen6_check_ecc(imc);
  660. imc->ch_s_size = MAD_INTER_CHANNEL_CH_S_SIZE(mad_inter);
  661. imc->ch_l_map = MAD_INTER_CHANNEL_CH_L_MAP(mad_inter);
  662. for (i = 0; i < NUM_CHANNELS; i++) {
  663. mad_intra = readl(imc->window + MAD_INTRA_CH0_OFFSET + i * 4);
  664. mad_dimm = readl(imc->window + MAD_DIMM_CH0_OFFSET + i * 4);
  665. imc->dimm_l_size[i] = MAD_DIMM_CH_DIMM_L_SIZE(mad_dimm);
  666. imc->dimm_s_size[i] = MAD_DIMM_CH_DIMM_S_SIZE(mad_dimm);
  667. imc->dimm_l_map[i] = MAD_INTRA_CH_DIMM_L_MAP(mad_intra);
  668. imc->size += imc->dimm_s_size[i];
  669. imc->size += imc->dimm_l_size[i];
  670. ndimms = 0;
  671. for (j = 0; j < NUM_DIMMS; j++) {
  672. dimm = edac_get_dimm(mci, i, j, 0);
  673. if (j ^ imc->dimm_l_map[i]) {
  674. dtype = get_width(0, mad_dimm);
  675. dsize = imc->dimm_s_size[i];
  676. } else {
  677. dtype = get_width(1, mad_dimm);
  678. dsize = imc->dimm_l_size[i];
  679. }
  680. if (!dsize)
  681. continue;
  682. dimm->grain = 64;
  683. dimm->mtype = mtype;
  684. dimm->dtype = dtype;
  685. dimm->nr_pages = MiB_TO_PAGES(dsize >> 20);
  686. dimm->edac_mode = EDAC_SECDED;
  687. snprintf(dimm->label, sizeof(dimm->label),
  688. "MC#%d_Chan#%d_DIMM#%d", mc, i, j);
  689. edac_dbg(0, "MC %d, Channel %d, DIMM %d, Size %llu MiB (%u pages)\n",
  690. mc, i, j, dsize >> 20, dimm->nr_pages);
  691. ndimms++;
  692. }
  693. if (ndimms && !ecc) {
  694. igen6_printk(KERN_ERR, "MC%d In-Band ECC is disabled\n", mc);
  695. return -ENODEV;
  696. }
  697. }
  698. edac_dbg(0, "MC %d, total size %llu MiB\n", mc, imc->size >> 20);
  699. return 0;
  700. }
  701. #ifdef CONFIG_EDAC_DEBUG
  702. /* Top of upper usable DRAM */
  703. static u64 igen6_touud;
  704. #define TOUUD_OFFSET 0xa8
  705. static void igen6_reg_dump(struct igen6_imc *imc)
  706. {
  707. int i;
  708. edac_dbg(2, "CHANNEL_HASH : 0x%x\n",
  709. readl(imc->window + CHANNEL_HASH_OFFSET));
  710. edac_dbg(2, "CHANNEL_EHASH : 0x%x\n",
  711. readl(imc->window + CHANNEL_EHASH_OFFSET));
  712. edac_dbg(2, "MAD_INTER_CHANNEL: 0x%x\n",
  713. readl(imc->window + MAD_INTER_CHANNEL_OFFSET));
  714. edac_dbg(2, "ECC_ERROR_LOG : 0x%llx\n",
  715. readq(imc->window + ECC_ERROR_LOG_OFFSET));
  716. for (i = 0; i < NUM_CHANNELS; i++) {
  717. edac_dbg(2, "MAD_INTRA_CH%d : 0x%x\n", i,
  718. readl(imc->window + MAD_INTRA_CH0_OFFSET + i * 4));
  719. edac_dbg(2, "MAD_DIMM_CH%d : 0x%x\n", i,
  720. readl(imc->window + MAD_DIMM_CH0_OFFSET + i * 4));
  721. }
  722. edac_dbg(2, "TOLUD : 0x%x", igen6_tolud);
  723. edac_dbg(2, "TOUUD : 0x%llx", igen6_touud);
  724. edac_dbg(2, "TOM : 0x%llx", igen6_tom);
  725. }
  726. static struct dentry *igen6_test;
  727. static int debugfs_u64_set(void *data, u64 val)
  728. {
  729. u64 ecclog;
  730. if ((val >= igen6_tolud && val < _4GB) || val >= igen6_touud) {
  731. edac_dbg(0, "Address 0x%llx out of range\n", val);
  732. return 0;
  733. }
  734. pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
  735. val >>= ECC_ERROR_LOG_ADDR_SHIFT;
  736. ecclog = (val << ECC_ERROR_LOG_ADDR_SHIFT) | ECC_ERROR_LOG_CE;
  737. if (!ecclog_gen_pool_add(0, ecclog))
  738. irq_work_queue(&ecclog_irq_work);
  739. return 0;
  740. }
  741. DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
  742. static void igen6_debug_setup(void)
  743. {
  744. igen6_test = edac_debugfs_create_dir("igen6_test");
  745. if (!igen6_test)
  746. return;
  747. if (!edac_debugfs_create_file("addr", 0200, igen6_test,
  748. NULL, &fops_u64_wo)) {
  749. debugfs_remove(igen6_test);
  750. igen6_test = NULL;
  751. }
  752. }
  753. static void igen6_debug_teardown(void)
  754. {
  755. debugfs_remove_recursive(igen6_test);
  756. }
  757. #else
  758. static void igen6_reg_dump(struct igen6_imc *imc) {}
  759. static void igen6_debug_setup(void) {}
  760. static void igen6_debug_teardown(void) {}
  761. #endif
  762. static int igen6_pci_setup(struct pci_dev *pdev, u64 *mchbar)
  763. {
  764. union {
  765. u64 v;
  766. struct {
  767. u32 v_lo;
  768. u32 v_hi;
  769. };
  770. } u;
  771. edac_dbg(2, "\n");
  772. if (!res_cfg->ibecc_available(pdev)) {
  773. edac_dbg(2, "No In-Band ECC IP\n");
  774. goto fail;
  775. }
  776. if (pci_read_config_dword(pdev, TOLUD_OFFSET, &igen6_tolud)) {
  777. igen6_printk(KERN_ERR, "Failed to read TOLUD\n");
  778. goto fail;
  779. }
  780. igen6_tolud &= GENMASK(31, 20);
  781. if (pci_read_config_dword(pdev, TOM_OFFSET, &u.v_lo)) {
  782. igen6_printk(KERN_ERR, "Failed to read lower TOM\n");
  783. goto fail;
  784. }
  785. if (pci_read_config_dword(pdev, TOM_OFFSET + 4, &u.v_hi)) {
  786. igen6_printk(KERN_ERR, "Failed to read upper TOM\n");
  787. goto fail;
  788. }
  789. igen6_tom = u.v & GENMASK_ULL(38, 20);
  790. if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) {
  791. igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n");
  792. goto fail;
  793. }
  794. if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) {
  795. igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n");
  796. goto fail;
  797. }
  798. if (!(u.v & MCHBAR_EN)) {
  799. igen6_printk(KERN_ERR, "MCHBAR is disabled\n");
  800. goto fail;
  801. }
  802. *mchbar = MCHBAR_BASE(u.v);
  803. #ifdef CONFIG_EDAC_DEBUG
  804. if (pci_read_config_dword(pdev, TOUUD_OFFSET, &u.v_lo))
  805. edac_dbg(2, "Failed to read lower TOUUD\n");
  806. else if (pci_read_config_dword(pdev, TOUUD_OFFSET + 4, &u.v_hi))
  807. edac_dbg(2, "Failed to read upper TOUUD\n");
  808. else
  809. igen6_touud = u.v & GENMASK_ULL(38, 20);
  810. #endif
  811. return 0;
  812. fail:
  813. return -ENODEV;
  814. }
  815. static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
  816. {
  817. struct edac_mc_layer layers[2];
  818. struct mem_ctl_info *mci;
  819. struct igen6_imc *imc;
  820. void __iomem *window;
  821. int rc;
  822. edac_dbg(2, "\n");
  823. mchbar += mc * MCHBAR_SIZE;
  824. window = ioremap(mchbar, MCHBAR_SIZE);
  825. if (!window) {
  826. igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
  827. return -ENODEV;
  828. }
  829. layers[0].type = EDAC_MC_LAYER_CHANNEL;
  830. layers[0].size = NUM_CHANNELS;
  831. layers[0].is_virt_csrow = false;
  832. layers[1].type = EDAC_MC_LAYER_SLOT;
  833. layers[1].size = NUM_DIMMS;
  834. layers[1].is_virt_csrow = true;
  835. mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
  836. if (!mci) {
  837. rc = -ENOMEM;
  838. goto fail;
  839. }
  840. mci->ctl_name = kasprintf(GFP_KERNEL, "Intel_client_SoC MC#%d", mc);
  841. if (!mci->ctl_name) {
  842. rc = -ENOMEM;
  843. goto fail2;
  844. }
  845. mci->mtype_cap = MEM_FLAG_LPDDR4 | MEM_FLAG_DDR4;
  846. mci->edac_ctl_cap = EDAC_FLAG_SECDED;
  847. mci->edac_cap = EDAC_FLAG_SECDED;
  848. mci->mod_name = EDAC_MOD_STR;
  849. mci->dev_name = pci_name(pdev);
  850. mci->pvt_info = &igen6_pvt->imc[mc];
  851. imc = mci->pvt_info;
  852. device_initialize(&imc->dev);
  853. /*
  854. * EDAC core uses mci->pdev(pointer of structure device) as
  855. * memory controller ID. The client SoCs attach one or more
  856. * memory controllers to single pci_dev (single pci_dev->dev
  857. * can be for multiple memory controllers).
  858. *
  859. * To make mci->pdev unique, assign pci_dev->dev to mci->pdev
  860. * for the first memory controller and assign a unique imc->dev
  861. * to mci->pdev for each non-first memory controller.
  862. */
  863. mci->pdev = mc ? &imc->dev : &pdev->dev;
  864. imc->mc = mc;
  865. imc->pdev = pdev;
  866. imc->window = window;
  867. igen6_reg_dump(imc);
  868. rc = igen6_get_dimm_config(mci);
  869. if (rc)
  870. goto fail3;
  871. rc = edac_mc_add_mc(mci);
  872. if (rc) {
  873. igen6_printk(KERN_ERR, "Failed to register mci#%d\n", mc);
  874. goto fail3;
  875. }
  876. imc->mci = mci;
  877. return 0;
  878. fail3:
  879. kfree(mci->ctl_name);
  880. fail2:
  881. edac_mc_free(mci);
  882. fail:
  883. iounmap(window);
  884. return rc;
  885. }
  886. static void igen6_unregister_mcis(void)
  887. {
  888. struct mem_ctl_info *mci;
  889. struct igen6_imc *imc;
  890. int i;
  891. edac_dbg(2, "\n");
  892. for (i = 0; i < res_cfg->num_imc; i++) {
  893. imc = &igen6_pvt->imc[i];
  894. mci = imc->mci;
  895. if (!mci)
  896. continue;
  897. edac_mc_del_mc(mci->pdev);
  898. kfree(mci->ctl_name);
  899. edac_mc_free(mci);
  900. iounmap(imc->window);
  901. }
  902. }
  903. static int igen6_mem_slice_setup(u64 mchbar)
  904. {
  905. struct igen6_imc *imc = &igen6_pvt->imc[0];
  906. u64 base = mchbar + res_cfg->cmf_base;
  907. u32 offset = res_cfg->ms_hash_offset;
  908. u32 size = res_cfg->cmf_size;
  909. u64 ms_s_size, ms_hash;
  910. void __iomem *cmf;
  911. int ms_l_map;
  912. edac_dbg(2, "\n");
  913. if (imc[0].size < imc[1].size) {
  914. ms_s_size = imc[0].size;
  915. ms_l_map = 1;
  916. } else {
  917. ms_s_size = imc[1].size;
  918. ms_l_map = 0;
  919. }
  920. igen6_pvt->ms_s_size = ms_s_size;
  921. igen6_pvt->ms_l_map = ms_l_map;
  922. edac_dbg(0, "ms_s_size: %llu MiB, ms_l_map %d\n",
  923. ms_s_size >> 20, ms_l_map);
  924. if (!size)
  925. return 0;
  926. cmf = ioremap(base, size);
  927. if (!cmf) {
  928. igen6_printk(KERN_ERR, "Failed to ioremap cmf 0x%llx\n", base);
  929. return -ENODEV;
  930. }
  931. ms_hash = readq(cmf + offset);
  932. igen6_pvt->ms_hash = ms_hash;
  933. edac_dbg(0, "MEM_SLICE_HASH: 0x%llx\n", ms_hash);
  934. iounmap(cmf);
  935. return 0;
  936. }
  937. static int register_err_handler(void)
  938. {
  939. int rc;
  940. if (res_cfg->machine_check) {
  941. mce_register_decode_chain(&ecclog_mce_dec);
  942. return 0;
  943. }
  944. rc = register_nmi_handler(NMI_SERR, ecclog_nmi_handler,
  945. 0, IGEN6_NMI_NAME);
  946. if (rc) {
  947. igen6_printk(KERN_ERR, "Failed to register NMI handler\n");
  948. return rc;
  949. }
  950. return 0;
  951. }
  952. static void unregister_err_handler(void)
  953. {
  954. if (res_cfg->machine_check) {
  955. mce_unregister_decode_chain(&ecclog_mce_dec);
  956. return;
  957. }
  958. unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
  959. }
  960. static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  961. {
  962. u64 mchbar;
  963. int i, rc;
  964. edac_dbg(2, "\n");
  965. igen6_pvt = kzalloc(sizeof(*igen6_pvt), GFP_KERNEL);
  966. if (!igen6_pvt)
  967. return -ENOMEM;
  968. res_cfg = (struct res_config *)ent->driver_data;
  969. rc = igen6_pci_setup(pdev, &mchbar);
  970. if (rc)
  971. goto fail;
  972. for (i = 0; i < res_cfg->num_imc; i++) {
  973. rc = igen6_register_mci(i, mchbar, pdev);
  974. if (rc)
  975. goto fail2;
  976. }
  977. if (res_cfg->num_imc > 1) {
  978. rc = igen6_mem_slice_setup(mchbar);
  979. if (rc)
  980. goto fail2;
  981. }
  982. ecclog_pool = ecclog_gen_pool_create();
  983. if (!ecclog_pool) {
  984. rc = -ENOMEM;
  985. goto fail2;
  986. }
  987. INIT_WORK(&ecclog_work, ecclog_work_cb);
  988. init_irq_work(&ecclog_irq_work, ecclog_irq_work_cb);
  989. rc = register_err_handler();
  990. if (rc)
  991. goto fail3;
  992. /* Enable error reporting */
  993. rc = errcmd_enable_error_reporting(true);
  994. if (rc) {
  995. igen6_printk(KERN_ERR, "Failed to enable error reporting\n");
  996. goto fail4;
  997. }
  998. /* Check if any pending errors before/during the registration of the error handler */
  999. ecclog_handler();
  1000. igen6_debug_setup();
  1001. return 0;
  1002. fail4:
  1003. unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
  1004. fail3:
  1005. gen_pool_destroy(ecclog_pool);
  1006. fail2:
  1007. igen6_unregister_mcis();
  1008. fail:
  1009. kfree(igen6_pvt);
  1010. return rc;
  1011. }
  1012. static void igen6_remove(struct pci_dev *pdev)
  1013. {
  1014. edac_dbg(2, "\n");
  1015. igen6_debug_teardown();
  1016. errcmd_enable_error_reporting(false);
  1017. unregister_err_handler();
  1018. irq_work_sync(&ecclog_irq_work);
  1019. flush_work(&ecclog_work);
  1020. gen_pool_destroy(ecclog_pool);
  1021. igen6_unregister_mcis();
  1022. kfree(igen6_pvt);
  1023. }
  1024. static struct pci_driver igen6_driver = {
  1025. .name = EDAC_MOD_STR,
  1026. .probe = igen6_probe,
  1027. .remove = igen6_remove,
  1028. .id_table = igen6_pci_tbl,
  1029. };
  1030. static int __init igen6_init(void)
  1031. {
  1032. const char *owner;
  1033. int rc;
  1034. edac_dbg(2, "\n");
  1035. owner = edac_get_owner();
  1036. if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
  1037. return -ENODEV;
  1038. edac_op_state = EDAC_OPSTATE_NMI;
  1039. rc = pci_register_driver(&igen6_driver);
  1040. if (rc)
  1041. return rc;
  1042. igen6_printk(KERN_INFO, "%s\n", IGEN6_REVISION);
  1043. return 0;
  1044. }
  1045. static void __exit igen6_exit(void)
  1046. {
  1047. edac_dbg(2, "\n");
  1048. pci_unregister_driver(&igen6_driver);
  1049. }
  1050. module_init(igen6_init);
  1051. module_exit(igen6_exit);
  1052. MODULE_LICENSE("GPL v2");
  1053. MODULE_AUTHOR("Qiuxu Zhuo");
  1054. MODULE_DESCRIPTION("MC Driver for Intel client SoC using In-Band ECC");