pnd2_edac.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Driver for Pondicherry2 memory controller.
  4. *
  5. * Copyright (c) 2016, Intel Corporation.
  6. *
  7. * [Derived from sb_edac.c]
  8. *
  9. * Translation of system physical addresses to DIMM addresses
  10. * is a two stage process:
  11. *
  12. * First the Pondicherry 2 memory controller handles slice and channel interleaving
  13. * in "sys2pmi()". This is (almost) completley common between platforms.
  14. *
  15. * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
  16. * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci_ids.h>
  22. #include <linux/slab.h>
  23. #include <linux/delay.h>
  24. #include <linux/edac.h>
  25. #include <linux/mmzone.h>
  26. #include <linux/smp.h>
  27. #include <linux/bitmap.h>
  28. #include <linux/math64.h>
  29. #include <linux/mod_devicetable.h>
  30. #include <linux/platform_data/x86/p2sb.h>
  31. #include <asm/cpu_device_id.h>
  32. #include <asm/intel-family.h>
  33. #include <asm/processor.h>
  34. #include <asm/mce.h>
  35. #include "edac_mc.h"
  36. #include "edac_module.h"
  37. #include "pnd2_edac.h"
  38. #define EDAC_MOD_STR "pnd2_edac"
  39. #define APL_NUM_CHANNELS 4
  40. #define DNV_NUM_CHANNELS 2
  41. #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
  42. enum type {
  43. APL,
  44. DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
  45. };
  46. struct dram_addr {
  47. int chan;
  48. int dimm;
  49. int rank;
  50. int bank;
  51. int row;
  52. int col;
  53. };
  54. struct pnd2_pvt {
  55. int dimm_geom[APL_NUM_CHANNELS];
  56. u64 tolm, tohm;
  57. };
  58. /*
  59. * System address space is divided into multiple regions with
  60. * different interleave rules in each. The as0/as1 regions
  61. * have no interleaving at all. The as2 region is interleaved
  62. * between two channels. The mot region is magic and may overlap
  63. * other regions, with its interleave rules taking precedence.
  64. * Addresses not in any of these regions are interleaved across
  65. * all four channels.
  66. */
  67. static struct region {
  68. u64 base;
  69. u64 limit;
  70. u8 enabled;
  71. } mot, as0, as1, as2;
  72. static struct dunit_ops {
  73. char *name;
  74. enum type type;
  75. int pmiaddr_shift;
  76. int pmiidx_shift;
  77. int channels;
  78. int dimms_per_channel;
  79. int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
  80. int (*get_registers)(void);
  81. int (*check_ecc)(void);
  82. void (*mk_region)(char *name, struct region *rp, void *asym);
  83. void (*get_dimm_config)(struct mem_ctl_info *mci);
  84. int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
  85. struct dram_addr *daddr, char *msg);
  86. } *ops;
  87. static struct mem_ctl_info *pnd2_mci;
  88. #define PND2_MSG_SIZE 256
  89. /* Debug macros */
  90. #define pnd2_printk(level, fmt, arg...) \
  91. edac_printk(level, "pnd2", fmt, ##arg)
  92. #define pnd2_mc_printk(mci, level, fmt, arg...) \
  93. edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
  94. #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
  95. #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
  96. #define SELECTOR_DISABLED (-1)
  97. #define _4GB (1ul << 32)
  98. #define PMI_ADDRESS_WIDTH 31
  99. #define PND_MAX_PHYS_BIT 39
  100. #define APL_ASYMSHIFT 28
  101. #define DNV_ASYMSHIFT 31
  102. #define CH_HASH_MASK_LSB 6
  103. #define SLICE_HASH_MASK_LSB 6
  104. #define MOT_SLC_INTLV_BIT 12
  105. #define LOG2_PMI_ADDR_GRANULARITY 5
  106. #define MOT_SHIFT 24
  107. #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
  108. #define U64_LSHIFT(val, s) ((u64)(val) << (s))
  109. /*
  110. * On Apollo Lake we access memory controller registers via a
  111. * side-band mailbox style interface in a hidden PCI device
  112. * configuration space.
  113. */
  114. static struct pci_bus *p2sb_bus;
  115. #define P2SB_DEVFN PCI_DEVFN(0xd, 0)
  116. #define P2SB_ADDR_OFF 0xd0
  117. #define P2SB_DATA_OFF 0xd4
  118. #define P2SB_STAT_OFF 0xd8
  119. #define P2SB_ROUT_OFF 0xda
  120. #define P2SB_EADD_OFF 0xdc
  121. #define P2SB_HIDE_OFF 0xe1
  122. #define P2SB_BUSY 1
  123. #define P2SB_READ(size, off, ptr) \
  124. pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
  125. #define P2SB_WRITE(size, off, val) \
  126. pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
  127. static bool p2sb_is_busy(u16 *status)
  128. {
  129. P2SB_READ(word, P2SB_STAT_OFF, status);
  130. return !!(*status & P2SB_BUSY);
  131. }
  132. static int _apl_rd_reg(int port, int off, int op, u32 *data)
  133. {
  134. int retries = 0xff, ret;
  135. u16 status;
  136. u8 hidden;
  137. /* Unhide the P2SB device, if it's hidden */
  138. P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
  139. if (hidden)
  140. P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
  141. if (p2sb_is_busy(&status)) {
  142. ret = -EAGAIN;
  143. goto out;
  144. }
  145. P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
  146. P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
  147. P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
  148. P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
  149. P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
  150. while (p2sb_is_busy(&status)) {
  151. if (retries-- == 0) {
  152. ret = -EBUSY;
  153. goto out;
  154. }
  155. }
  156. P2SB_READ(dword, P2SB_DATA_OFF, data);
  157. ret = (status >> 1) & 0x3;
  158. out:
  159. /* Hide the P2SB device, if it was hidden before */
  160. if (hidden)
  161. P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
  162. return ret;
  163. }
  164. static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
  165. {
  166. int ret = 0;
  167. edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
  168. switch (sz) {
  169. case 8:
  170. ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
  171. fallthrough;
  172. case 4:
  173. ret |= _apl_rd_reg(port, off, op, (u32 *)data);
  174. pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
  175. sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
  176. break;
  177. }
  178. return ret;
  179. }
  180. static u64 get_mem_ctrl_hub_base_addr(void)
  181. {
  182. struct b_cr_mchbar_lo_pci lo;
  183. struct b_cr_mchbar_hi_pci hi;
  184. struct pci_dev *pdev;
  185. pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
  186. if (pdev) {
  187. pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
  188. pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
  189. pci_dev_put(pdev);
  190. } else {
  191. return 0;
  192. }
  193. if (!lo.enable) {
  194. edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
  195. return 0;
  196. }
  197. return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
  198. }
  199. #define DNV_MCHBAR_SIZE 0x8000
  200. #define DNV_SB_PORT_SIZE 0x10000
  201. static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
  202. {
  203. struct pci_dev *pdev;
  204. void __iomem *base;
  205. struct resource r;
  206. int ret;
  207. if (op == 4) {
  208. pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
  209. if (!pdev)
  210. return -ENODEV;
  211. pci_read_config_dword(pdev, off, data);
  212. pci_dev_put(pdev);
  213. } else {
  214. /* MMIO via memory controller hub base address */
  215. if (op == 0 && port == 0x4c) {
  216. memset(&r, 0, sizeof(r));
  217. r.start = get_mem_ctrl_hub_base_addr();
  218. if (!r.start)
  219. return -ENODEV;
  220. r.end = r.start + DNV_MCHBAR_SIZE - 1;
  221. } else {
  222. /* MMIO via sideband register base address */
  223. ret = p2sb_bar(NULL, 0, &r);
  224. if (ret)
  225. return ret;
  226. r.start += (port << 16);
  227. r.end = r.start + DNV_SB_PORT_SIZE - 1;
  228. }
  229. base = ioremap(r.start, resource_size(&r));
  230. if (!base)
  231. return -ENODEV;
  232. if (sz == 8)
  233. *(u64 *)data = readq(base + off);
  234. else
  235. *(u32 *)data = readl(base + off);
  236. iounmap(base);
  237. }
  238. edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
  239. (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
  240. return 0;
  241. }
  242. #define RD_REGP(regp, regname, port) \
  243. ops->rd_reg(port, \
  244. regname##_offset, \
  245. regname##_r_opcode, \
  246. regp, sizeof(struct regname), \
  247. #regname)
  248. #define RD_REG(regp, regname) \
  249. ops->rd_reg(regname ## _port, \
  250. regname##_offset, \
  251. regname##_r_opcode, \
  252. regp, sizeof(struct regname), \
  253. #regname)
  254. static u64 top_lm, top_hm;
  255. static bool two_slices;
  256. static bool two_channels; /* Both PMI channels in one slice enabled */
  257. static u8 sym_chan_mask;
  258. static u8 asym_chan_mask;
  259. static u8 chan_mask;
  260. static int slice_selector = -1;
  261. static int chan_selector = -1;
  262. static u64 slice_hash_mask;
  263. static u64 chan_hash_mask;
  264. static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
  265. {
  266. rp->enabled = 1;
  267. rp->base = base;
  268. rp->limit = limit;
  269. edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
  270. }
  271. static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
  272. {
  273. if (mask == 0) {
  274. pr_info(FW_BUG "MOT mask cannot be zero\n");
  275. return;
  276. }
  277. if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
  278. pr_info(FW_BUG "MOT mask not power of two\n");
  279. return;
  280. }
  281. if (base & ~mask) {
  282. pr_info(FW_BUG "MOT region base/mask alignment error\n");
  283. return;
  284. }
  285. rp->base = base;
  286. rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
  287. rp->enabled = 1;
  288. edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
  289. }
  290. static bool in_region(struct region *rp, u64 addr)
  291. {
  292. if (!rp->enabled)
  293. return false;
  294. return rp->base <= addr && addr <= rp->limit;
  295. }
  296. static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
  297. {
  298. int mask = 0;
  299. if (!p->slice_0_mem_disabled)
  300. mask |= p->sym_slice0_channel_enabled;
  301. if (!p->slice_1_disabled)
  302. mask |= p->sym_slice1_channel_enabled << 2;
  303. if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
  304. mask &= 0x5;
  305. return mask;
  306. }
  307. static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
  308. struct b_cr_asym_mem_region0_mchbar *as0,
  309. struct b_cr_asym_mem_region1_mchbar *as1,
  310. struct b_cr_asym_2way_mem_region_mchbar *as2way)
  311. {
  312. const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
  313. int mask = 0;
  314. if (as2way->asym_2way_interleave_enable)
  315. mask = intlv[as2way->asym_2way_intlv_mode];
  316. if (as0->slice0_asym_enable)
  317. mask |= (1 << as0->slice0_asym_channel_select);
  318. if (as1->slice1_asym_enable)
  319. mask |= (4 << as1->slice1_asym_channel_select);
  320. if (p->slice_0_mem_disabled)
  321. mask &= 0xc;
  322. if (p->slice_1_disabled)
  323. mask &= 0x3;
  324. if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
  325. mask &= 0x5;
  326. return mask;
  327. }
  328. static struct b_cr_tolud_pci tolud;
  329. static struct b_cr_touud_lo_pci touud_lo;
  330. static struct b_cr_touud_hi_pci touud_hi;
  331. static struct b_cr_asym_mem_region0_mchbar asym0;
  332. static struct b_cr_asym_mem_region1_mchbar asym1;
  333. static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
  334. static struct b_cr_mot_out_base_mchbar mot_base;
  335. static struct b_cr_mot_out_mask_mchbar mot_mask;
  336. static struct b_cr_slice_channel_hash chash;
  337. /* Apollo Lake dunit */
  338. /*
  339. * Validated on board with just two DIMMs in the [0] and [2] positions
  340. * in this array. Other port number matches documentation, but caution
  341. * advised.
  342. */
  343. static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
  344. static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
  345. /* Denverton dunit */
  346. static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
  347. static struct d_cr_dsch dsch;
  348. static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
  349. static struct d_cr_drp drp[DNV_NUM_CHANNELS];
  350. static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
  351. static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
  352. static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
  353. static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
  354. static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
  355. static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
  356. static void apl_mk_region(char *name, struct region *rp, void *asym)
  357. {
  358. struct b_cr_asym_mem_region0_mchbar *a = asym;
  359. mk_region(name, rp,
  360. U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
  361. U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
  362. GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
  363. }
  364. static void dnv_mk_region(char *name, struct region *rp, void *asym)
  365. {
  366. struct b_cr_asym_mem_region_denverton *a = asym;
  367. mk_region(name, rp,
  368. U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
  369. U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
  370. GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
  371. }
  372. static int apl_get_registers(void)
  373. {
  374. int ret = -ENODEV;
  375. int i;
  376. if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
  377. return -ENODEV;
  378. /*
  379. * RD_REGP() will fail for unpopulated or non-existent
  380. * DIMM slots. Return success if we find at least one DIMM.
  381. */
  382. for (i = 0; i < APL_NUM_CHANNELS; i++)
  383. if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
  384. ret = 0;
  385. return ret;
  386. }
  387. static int dnv_get_registers(void)
  388. {
  389. int i;
  390. if (RD_REG(&dsch, d_cr_dsch))
  391. return -ENODEV;
  392. for (i = 0; i < DNV_NUM_CHANNELS; i++)
  393. if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
  394. RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
  395. RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
  396. RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
  397. RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
  398. RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
  399. RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
  400. RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
  401. return -ENODEV;
  402. return 0;
  403. }
  404. /*
  405. * Read all the h/w config registers once here (they don't
  406. * change at run time. Figure out which address ranges have
  407. * which interleave characteristics.
  408. */
  409. static int get_registers(void)
  410. {
  411. const int intlv[] = { 10, 11, 12, 12 };
  412. if (RD_REG(&tolud, b_cr_tolud_pci) ||
  413. RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
  414. RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
  415. RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
  416. RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
  417. RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
  418. RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
  419. RD_REG(&chash, b_cr_slice_channel_hash))
  420. return -ENODEV;
  421. if (ops->get_registers())
  422. return -ENODEV;
  423. if (ops->type == DNV) {
  424. /* PMI channel idx (always 0) for asymmetric region */
  425. asym0.slice0_asym_channel_select = 0;
  426. asym1.slice1_asym_channel_select = 0;
  427. /* PMI channel bitmap (always 1) for symmetric region */
  428. chash.sym_slice0_channel_enabled = 0x1;
  429. chash.sym_slice1_channel_enabled = 0x1;
  430. }
  431. if (asym0.slice0_asym_enable)
  432. ops->mk_region("as0", &as0, &asym0);
  433. if (asym1.slice1_asym_enable)
  434. ops->mk_region("as1", &as1, &asym1);
  435. if (asym_2way.asym_2way_interleave_enable) {
  436. mk_region("as2way", &as2,
  437. U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
  438. U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
  439. GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
  440. }
  441. if (mot_base.imr_en) {
  442. mk_region_mask("mot", &mot,
  443. U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
  444. U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
  445. }
  446. top_lm = U64_LSHIFT(tolud.tolud, 20);
  447. top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
  448. two_slices = !chash.slice_1_disabled &&
  449. !chash.slice_0_mem_disabled &&
  450. (chash.sym_slice0_channel_enabled != 0) &&
  451. (chash.sym_slice1_channel_enabled != 0);
  452. two_channels = !chash.ch_1_disabled &&
  453. !chash.enable_pmi_dual_data_mode &&
  454. ((chash.sym_slice0_channel_enabled == 3) ||
  455. (chash.sym_slice1_channel_enabled == 3));
  456. sym_chan_mask = gen_sym_mask(&chash);
  457. asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
  458. chan_mask = sym_chan_mask | asym_chan_mask;
  459. if (two_slices && !two_channels) {
  460. if (chash.hvm_mode)
  461. slice_selector = 29;
  462. else
  463. slice_selector = intlv[chash.interleave_mode];
  464. } else if (!two_slices && two_channels) {
  465. if (chash.hvm_mode)
  466. chan_selector = 29;
  467. else
  468. chan_selector = intlv[chash.interleave_mode];
  469. } else if (two_slices && two_channels) {
  470. if (chash.hvm_mode) {
  471. slice_selector = 29;
  472. chan_selector = 30;
  473. } else {
  474. slice_selector = intlv[chash.interleave_mode];
  475. chan_selector = intlv[chash.interleave_mode] + 1;
  476. }
  477. }
  478. if (two_slices) {
  479. if (!chash.hvm_mode)
  480. slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
  481. if (!two_channels)
  482. slice_hash_mask |= BIT_ULL(slice_selector);
  483. }
  484. if (two_channels) {
  485. if (!chash.hvm_mode)
  486. chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
  487. if (!two_slices)
  488. chan_hash_mask |= BIT_ULL(chan_selector);
  489. }
  490. return 0;
  491. }
  492. /* Get a contiguous memory address (remove the MMIO gap) */
  493. static u64 remove_mmio_gap(u64 sys)
  494. {
  495. return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
  496. }
  497. /* Squeeze out one address bit, shift upper part down to fill gap */
  498. static void remove_addr_bit(u64 *addr, int bitidx)
  499. {
  500. u64 mask;
  501. if (bitidx == -1)
  502. return;
  503. mask = (1ull << bitidx) - 1;
  504. *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
  505. }
  506. /* XOR all the bits from addr specified in mask */
  507. static int hash_by_mask(u64 addr, u64 mask)
  508. {
  509. u64 result = addr & mask;
  510. result = (result >> 32) ^ result;
  511. result = (result >> 16) ^ result;
  512. result = (result >> 8) ^ result;
  513. result = (result >> 4) ^ result;
  514. result = (result >> 2) ^ result;
  515. result = (result >> 1) ^ result;
  516. return (int)result & 1;
  517. }
  518. /*
  519. * First stage decode. Take the system address and figure out which
  520. * second stage will deal with it based on interleave modes.
  521. */
  522. static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
  523. {
  524. u64 contig_addr, contig_base, contig_offset, contig_base_adj;
  525. int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
  526. MOT_CHAN_INTLV_BIT_1SLC_2CH;
  527. int slice_intlv_bit_rm = SELECTOR_DISABLED;
  528. int chan_intlv_bit_rm = SELECTOR_DISABLED;
  529. /* Determine if address is in the MOT region. */
  530. bool mot_hit = in_region(&mot, addr);
  531. /* Calculate the number of symmetric regions enabled. */
  532. int sym_channels = hweight8(sym_chan_mask);
  533. /*
  534. * The amount we need to shift the asym base can be determined by the
  535. * number of enabled symmetric channels.
  536. * NOTE: This can only work because symmetric memory is not supposed
  537. * to do a 3-way interleave.
  538. */
  539. int sym_chan_shift = sym_channels >> 1;
  540. /* Give up if address is out of range, or in MMIO gap */
  541. if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
  542. (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
  543. snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
  544. return -EINVAL;
  545. }
  546. /* Get a contiguous memory address (remove the MMIO gap) */
  547. contig_addr = remove_mmio_gap(addr);
  548. if (in_region(&as0, addr)) {
  549. *pmiidx = asym0.slice0_asym_channel_select;
  550. contig_base = remove_mmio_gap(as0.base);
  551. contig_offset = contig_addr - contig_base;
  552. contig_base_adj = (contig_base >> sym_chan_shift) *
  553. ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
  554. contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
  555. } else if (in_region(&as1, addr)) {
  556. *pmiidx = 2u + asym1.slice1_asym_channel_select;
  557. contig_base = remove_mmio_gap(as1.base);
  558. contig_offset = contig_addr - contig_base;
  559. contig_base_adj = (contig_base >> sym_chan_shift) *
  560. ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
  561. contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
  562. } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
  563. bool channel1;
  564. mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
  565. *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
  566. channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
  567. hash_by_mask(contig_addr, chan_hash_mask);
  568. *pmiidx |= (u32)channel1;
  569. contig_base = remove_mmio_gap(as2.base);
  570. chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
  571. contig_offset = contig_addr - contig_base;
  572. remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
  573. contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
  574. } else {
  575. /* Otherwise we're in normal, boring symmetric mode. */
  576. *pmiidx = 0u;
  577. if (two_slices) {
  578. bool slice1;
  579. if (mot_hit) {
  580. slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
  581. slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
  582. } else {
  583. slice_intlv_bit_rm = slice_selector;
  584. slice1 = hash_by_mask(addr, slice_hash_mask);
  585. }
  586. *pmiidx = (u32)slice1 << 1;
  587. }
  588. if (two_channels) {
  589. bool channel1;
  590. mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
  591. MOT_CHAN_INTLV_BIT_1SLC_2CH;
  592. if (mot_hit) {
  593. chan_intlv_bit_rm = mot_intlv_bit;
  594. channel1 = (addr >> mot_intlv_bit) & 1;
  595. } else {
  596. chan_intlv_bit_rm = chan_selector;
  597. channel1 = hash_by_mask(contig_addr, chan_hash_mask);
  598. }
  599. *pmiidx |= (u32)channel1;
  600. }
  601. }
  602. /* Remove the chan_selector bit first */
  603. remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
  604. /* Remove the slice bit (we remove it second because it must be lower */
  605. remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
  606. *pmiaddr = contig_addr;
  607. return 0;
  608. }
  609. /* Translate PMI address to memory (rank, row, bank, column) */
  610. #define C(n) (0x10 | (n)) /* column */
  611. #define B(n) (0x20 | (n)) /* bank */
  612. #define R(n) (0x40 | (n)) /* row */
  613. #define RS (0x80) /* rank */
  614. /* addrdec values */
  615. #define AMAP_1KB 0
  616. #define AMAP_2KB 1
  617. #define AMAP_4KB 2
  618. #define AMAP_RSVD 3
  619. /* dden values */
  620. #define DEN_4Gb 0
  621. #define DEN_8Gb 2
  622. /* dwid values */
  623. #define X8 0
  624. #define X16 1
  625. static struct dimm_geometry {
  626. u8 addrdec;
  627. u8 dden;
  628. u8 dwid;
  629. u8 rowbits, colbits;
  630. u16 bits[PMI_ADDRESS_WIDTH];
  631. } dimms[] = {
  632. {
  633. .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
  634. .rowbits = 15, .colbits = 10,
  635. .bits = {
  636. C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
  637. R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
  638. R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
  639. 0, 0, 0, 0
  640. }
  641. },
  642. {
  643. .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
  644. .rowbits = 16, .colbits = 10,
  645. .bits = {
  646. C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
  647. R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
  648. R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
  649. R(15), 0, 0, 0
  650. }
  651. },
  652. {
  653. .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
  654. .rowbits = 16, .colbits = 10,
  655. .bits = {
  656. C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
  657. R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
  658. R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
  659. R(15), 0, 0, 0
  660. }
  661. },
  662. {
  663. .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
  664. .rowbits = 16, .colbits = 11,
  665. .bits = {
  666. C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
  667. R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
  668. R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
  669. R(14), R(15), 0, 0
  670. }
  671. },
  672. {
  673. .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
  674. .rowbits = 15, .colbits = 10,
  675. .bits = {
  676. C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
  677. R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
  678. R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
  679. 0, 0, 0, 0
  680. }
  681. },
  682. {
  683. .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
  684. .rowbits = 16, .colbits = 10,
  685. .bits = {
  686. C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
  687. R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
  688. R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
  689. R(15), 0, 0, 0
  690. }
  691. },
  692. {
  693. .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
  694. .rowbits = 16, .colbits = 10,
  695. .bits = {
  696. C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
  697. R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
  698. R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
  699. R(15), 0, 0, 0
  700. }
  701. },
  702. {
  703. .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
  704. .rowbits = 16, .colbits = 11,
  705. .bits = {
  706. C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
  707. R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
  708. R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
  709. R(14), R(15), 0, 0
  710. }
  711. },
  712. {
  713. .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
  714. .rowbits = 15, .colbits = 10,
  715. .bits = {
  716. C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
  717. B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
  718. R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
  719. 0, 0, 0, 0
  720. }
  721. },
  722. {
  723. .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
  724. .rowbits = 16, .colbits = 10,
  725. .bits = {
  726. C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
  727. B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
  728. R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
  729. R(15), 0, 0, 0
  730. }
  731. },
  732. {
  733. .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
  734. .rowbits = 16, .colbits = 10,
  735. .bits = {
  736. C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
  737. B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
  738. R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
  739. R(15), 0, 0, 0
  740. }
  741. },
  742. {
  743. .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
  744. .rowbits = 16, .colbits = 11,
  745. .bits = {
  746. C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
  747. B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
  748. R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
  749. R(14), R(15), 0, 0
  750. }
  751. }
  752. };
  753. static int bank_hash(u64 pmiaddr, int idx, int shft)
  754. {
  755. int bhash = 0;
  756. switch (idx) {
  757. case 0:
  758. bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
  759. break;
  760. case 1:
  761. bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
  762. bhash ^= ((pmiaddr >> 22) & 1) << 1;
  763. break;
  764. case 2:
  765. bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
  766. break;
  767. }
  768. return bhash;
  769. }
  770. static int rank_hash(u64 pmiaddr)
  771. {
  772. return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
  773. }
  774. /* Second stage decode. Compute rank, bank, row & column. */
  775. static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
  776. struct dram_addr *daddr, char *msg)
  777. {
  778. struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
  779. struct pnd2_pvt *pvt = mci->pvt_info;
  780. int g = pvt->dimm_geom[pmiidx];
  781. struct dimm_geometry *d = &dimms[g];
  782. int column = 0, bank = 0, row = 0, rank = 0;
  783. int i, idx, type, skiprs = 0;
  784. for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
  785. int bit = (pmiaddr >> i) & 1;
  786. if (i + skiprs >= PMI_ADDRESS_WIDTH) {
  787. snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
  788. return -EINVAL;
  789. }
  790. type = d->bits[i + skiprs] & ~0xf;
  791. idx = d->bits[i + skiprs] & 0xf;
  792. /*
  793. * On single rank DIMMs ignore the rank select bit
  794. * and shift remainder of "bits[]" down one place.
  795. */
  796. if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
  797. skiprs = 1;
  798. type = d->bits[i + skiprs] & ~0xf;
  799. idx = d->bits[i + skiprs] & 0xf;
  800. }
  801. switch (type) {
  802. case C(0):
  803. column |= (bit << idx);
  804. break;
  805. case B(0):
  806. bank |= (bit << idx);
  807. if (cr_drp0->bahen)
  808. bank ^= bank_hash(pmiaddr, idx, d->addrdec);
  809. break;
  810. case R(0):
  811. row |= (bit << idx);
  812. break;
  813. case RS:
  814. rank = bit;
  815. if (cr_drp0->rsien)
  816. rank ^= rank_hash(pmiaddr);
  817. break;
  818. default:
  819. if (bit) {
  820. snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
  821. return -EINVAL;
  822. }
  823. goto done;
  824. }
  825. }
  826. done:
  827. daddr->col = column;
  828. daddr->bank = bank;
  829. daddr->row = row;
  830. daddr->rank = rank;
  831. daddr->dimm = 0;
  832. return 0;
  833. }
  834. /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
  835. #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
  836. static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
  837. struct dram_addr *daddr, char *msg)
  838. {
  839. /* Rank 0 or 1 */
  840. daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
  841. /* Rank 2 or 3 */
  842. daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
  843. /*
  844. * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
  845. * flip them if DIMM1 is larger than DIMM0.
  846. */
  847. daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
  848. daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
  849. daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
  850. daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
  851. if (dsch.ddr4en)
  852. daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
  853. if (dmap1[pmiidx].bxor) {
  854. if (dsch.ddr4en) {
  855. daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
  856. daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
  857. if (dsch.chan_width == 0)
  858. /* 64/72 bit dram channel width */
  859. daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
  860. else
  861. /* 32/40 bit dram channel width */
  862. daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
  863. daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
  864. } else {
  865. daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
  866. daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
  867. if (dsch.chan_width == 0)
  868. daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
  869. else
  870. daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
  871. }
  872. }
  873. daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
  874. daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
  875. daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
  876. daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
  877. daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
  878. daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
  879. daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
  880. daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
  881. daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
  882. daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
  883. daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
  884. daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
  885. daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
  886. daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
  887. if (dmap4[pmiidx].row14 != 31)
  888. daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
  889. if (dmap4[pmiidx].row15 != 31)
  890. daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
  891. if (dmap4[pmiidx].row16 != 31)
  892. daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
  893. if (dmap4[pmiidx].row17 != 31)
  894. daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
  895. daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
  896. daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
  897. daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
  898. daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
  899. daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
  900. daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
  901. daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
  902. if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
  903. daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
  904. return 0;
  905. }
  906. static int check_channel(int ch)
  907. {
  908. if (drp0[ch].dramtype != 0) {
  909. pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
  910. return 1;
  911. } else if (drp0[ch].eccen == 0) {
  912. pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
  913. return 1;
  914. }
  915. return 0;
  916. }
  917. static int apl_check_ecc_active(void)
  918. {
  919. int i, ret = 0;
  920. /* Check dramtype and ECC mode for each present DIMM */
  921. for (i = 0; i < APL_NUM_CHANNELS; i++)
  922. if (chan_mask & BIT(i))
  923. ret += check_channel(i);
  924. return ret ? -EINVAL : 0;
  925. }
  926. #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
  927. static int check_unit(int ch)
  928. {
  929. struct d_cr_drp *d = &drp[ch];
  930. if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
  931. pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
  932. return 1;
  933. }
  934. return 0;
  935. }
  936. static int dnv_check_ecc_active(void)
  937. {
  938. int i, ret = 0;
  939. for (i = 0; i < DNV_NUM_CHANNELS; i++)
  940. ret += check_unit(i);
  941. return ret ? -EINVAL : 0;
  942. }
  943. static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
  944. struct dram_addr *daddr, char *msg)
  945. {
  946. u64 pmiaddr;
  947. u32 pmiidx;
  948. int ret;
  949. ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
  950. if (ret)
  951. return ret;
  952. pmiaddr >>= ops->pmiaddr_shift;
  953. /* pmi channel idx to dimm channel idx */
  954. pmiidx >>= ops->pmiidx_shift;
  955. daddr->chan = pmiidx;
  956. ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
  957. if (ret)
  958. return ret;
  959. edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
  960. addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
  961. return 0;
  962. }
  963. static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
  964. struct dram_addr *daddr)
  965. {
  966. enum hw_event_mc_err_type tp_event;
  967. char *optype, msg[PND2_MSG_SIZE];
  968. bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
  969. bool overflow = m->status & MCI_STATUS_OVER;
  970. bool uc_err = m->status & MCI_STATUS_UC;
  971. bool recov = m->status & MCI_STATUS_S;
  972. u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
  973. u32 mscod = GET_BITFIELD(m->status, 16, 31);
  974. u32 errcode = GET_BITFIELD(m->status, 0, 15);
  975. u32 optypenum = GET_BITFIELD(m->status, 4, 6);
  976. int rc;
  977. tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
  978. HW_EVENT_ERR_CORRECTED;
  979. /*
  980. * According with Table 15-9 of the Intel Architecture spec vol 3A,
  981. * memory errors should fit in this mask:
  982. * 000f 0000 1mmm cccc (binary)
  983. * where:
  984. * f = Correction Report Filtering Bit. If 1, subsequent errors
  985. * won't be shown
  986. * mmm = error type
  987. * cccc = channel
  988. * If the mask doesn't match, report an error to the parsing logic
  989. */
  990. if (!((errcode & 0xef80) == 0x80)) {
  991. optype = "Can't parse: it is not a mem";
  992. } else {
  993. switch (optypenum) {
  994. case 0:
  995. optype = "generic undef request error";
  996. break;
  997. case 1:
  998. optype = "memory read error";
  999. break;
  1000. case 2:
  1001. optype = "memory write error";
  1002. break;
  1003. case 3:
  1004. optype = "addr/cmd error";
  1005. break;
  1006. case 4:
  1007. optype = "memory scrubbing error";
  1008. break;
  1009. default:
  1010. optype = "reserved";
  1011. break;
  1012. }
  1013. }
  1014. /* Only decode errors with an valid address (ADDRV) */
  1015. if (!(m->status & MCI_STATUS_ADDRV))
  1016. return;
  1017. rc = get_memory_error_data(mci, m->addr, daddr, msg);
  1018. if (rc)
  1019. goto address_error;
  1020. snprintf(msg, sizeof(msg),
  1021. "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
  1022. overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
  1023. errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
  1024. edac_dbg(0, "%s\n", msg);
  1025. /* Call the helper to output message */
  1026. edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
  1027. m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
  1028. return;
  1029. address_error:
  1030. edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
  1031. }
  1032. static void apl_get_dimm_config(struct mem_ctl_info *mci)
  1033. {
  1034. struct pnd2_pvt *pvt = mci->pvt_info;
  1035. struct dimm_info *dimm;
  1036. struct d_cr_drp0 *d;
  1037. u64 capacity;
  1038. int i, g;
  1039. for (i = 0; i < APL_NUM_CHANNELS; i++) {
  1040. if (!(chan_mask & BIT(i)))
  1041. continue;
  1042. dimm = edac_get_dimm(mci, i, 0, 0);
  1043. if (!dimm) {
  1044. edac_dbg(0, "No allocated DIMM for channel %d\n", i);
  1045. continue;
  1046. }
  1047. d = &drp0[i];
  1048. for (g = 0; g < ARRAY_SIZE(dimms); g++)
  1049. if (dimms[g].addrdec == d->addrdec &&
  1050. dimms[g].dden == d->dden &&
  1051. dimms[g].dwid == d->dwid)
  1052. break;
  1053. if (g == ARRAY_SIZE(dimms)) {
  1054. edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
  1055. continue;
  1056. }
  1057. pvt->dimm_geom[i] = g;
  1058. capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
  1059. (1ul << dimms[g].colbits);
  1060. edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
  1061. dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
  1062. dimm->grain = 32;
  1063. dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
  1064. dimm->mtype = MEM_DDR3;
  1065. dimm->edac_mode = EDAC_SECDED;
  1066. snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
  1067. }
  1068. }
  1069. static const int dnv_dtypes[] = {
  1070. DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
  1071. };
  1072. static void dnv_get_dimm_config(struct mem_ctl_info *mci)
  1073. {
  1074. int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
  1075. struct dimm_info *dimm;
  1076. struct d_cr_drp *d;
  1077. u64 capacity;
  1078. if (dsch.ddr4en) {
  1079. memtype = MEM_DDR4;
  1080. banks = 16;
  1081. colbits = 10;
  1082. } else {
  1083. memtype = MEM_DDR3;
  1084. banks = 8;
  1085. }
  1086. for (i = 0; i < DNV_NUM_CHANNELS; i++) {
  1087. if (dmap4[i].row14 == 31)
  1088. rowbits = 14;
  1089. else if (dmap4[i].row15 == 31)
  1090. rowbits = 15;
  1091. else if (dmap4[i].row16 == 31)
  1092. rowbits = 16;
  1093. else if (dmap4[i].row17 == 31)
  1094. rowbits = 17;
  1095. else
  1096. rowbits = 18;
  1097. if (memtype == MEM_DDR3) {
  1098. if (dmap1[i].ca11 != 0x3f)
  1099. colbits = 12;
  1100. else
  1101. colbits = 10;
  1102. }
  1103. d = &drp[i];
  1104. /* DIMM0 is present if rank0 and/or rank1 is enabled */
  1105. ranks_of_dimm[0] = d->rken0 + d->rken1;
  1106. /* DIMM1 is present if rank2 and/or rank3 is enabled */
  1107. ranks_of_dimm[1] = d->rken2 + d->rken3;
  1108. for (j = 0; j < DNV_MAX_DIMMS; j++) {
  1109. if (!ranks_of_dimm[j])
  1110. continue;
  1111. dimm = edac_get_dimm(mci, i, j, 0);
  1112. if (!dimm) {
  1113. edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
  1114. continue;
  1115. }
  1116. capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
  1117. edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
  1118. dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
  1119. dimm->grain = 32;
  1120. dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
  1121. dimm->mtype = memtype;
  1122. dimm->edac_mode = EDAC_SECDED;
  1123. snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
  1124. }
  1125. }
  1126. }
  1127. static int pnd2_register_mci(struct mem_ctl_info **ppmci)
  1128. {
  1129. struct edac_mc_layer layers[2];
  1130. struct mem_ctl_info *mci;
  1131. struct pnd2_pvt *pvt;
  1132. int rc;
  1133. rc = ops->check_ecc();
  1134. if (rc < 0)
  1135. return rc;
  1136. /* Allocate a new MC control structure */
  1137. layers[0].type = EDAC_MC_LAYER_CHANNEL;
  1138. layers[0].size = ops->channels;
  1139. layers[0].is_virt_csrow = false;
  1140. layers[1].type = EDAC_MC_LAYER_SLOT;
  1141. layers[1].size = ops->dimms_per_channel;
  1142. layers[1].is_virt_csrow = true;
  1143. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
  1144. if (!mci)
  1145. return -ENOMEM;
  1146. pvt = mci->pvt_info;
  1147. memset(pvt, 0, sizeof(*pvt));
  1148. mci->mod_name = EDAC_MOD_STR;
  1149. mci->dev_name = ops->name;
  1150. mci->ctl_name = "Pondicherry2";
  1151. /* Get dimm basic config and the memory layout */
  1152. ops->get_dimm_config(mci);
  1153. if (edac_mc_add_mc(mci)) {
  1154. edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
  1155. edac_mc_free(mci);
  1156. return -EINVAL;
  1157. }
  1158. *ppmci = mci;
  1159. return 0;
  1160. }
  1161. static void pnd2_unregister_mci(struct mem_ctl_info *mci)
  1162. {
  1163. if (unlikely(!mci || !mci->pvt_info)) {
  1164. pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
  1165. return;
  1166. }
  1167. /* Remove MC sysfs nodes */
  1168. edac_mc_del_mc(NULL);
  1169. edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
  1170. edac_mc_free(mci);
  1171. }
  1172. /*
  1173. * Callback function registered with core kernel mce code.
  1174. * Called once for each logged error.
  1175. */
  1176. static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
  1177. {
  1178. struct mce *mce = (struct mce *)data;
  1179. struct mem_ctl_info *mci;
  1180. struct dram_addr daddr;
  1181. char *type;
  1182. mci = pnd2_mci;
  1183. if (!mci || (mce->kflags & MCE_HANDLED_CEC))
  1184. return NOTIFY_DONE;
  1185. /*
  1186. * Just let mcelog handle it if the error is
  1187. * outside the memory controller. A memory error
  1188. * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
  1189. * bit 12 has an special meaning.
  1190. */
  1191. if ((mce->status & 0xefff) >> 7 != 1)
  1192. return NOTIFY_DONE;
  1193. if (mce->mcgstatus & MCG_STATUS_MCIP)
  1194. type = "Exception";
  1195. else
  1196. type = "Event";
  1197. pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
  1198. pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
  1199. mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
  1200. pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
  1201. pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
  1202. pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
  1203. pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
  1204. mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
  1205. pnd2_mce_output_error(mci, mce, &daddr);
  1206. /* Advice mcelog that the error were handled */
  1207. mce->kflags |= MCE_HANDLED_EDAC;
  1208. return NOTIFY_OK;
  1209. }
  1210. static struct notifier_block pnd2_mce_dec = {
  1211. .notifier_call = pnd2_mce_check_error,
  1212. .priority = MCE_PRIO_EDAC,
  1213. };
  1214. #ifdef CONFIG_EDAC_DEBUG
  1215. /*
  1216. * Write an address to this file to exercise the address decode
  1217. * logic in this driver.
  1218. */
  1219. static u64 pnd2_fake_addr;
  1220. #define PND2_BLOB_SIZE 1024
  1221. static char pnd2_result[PND2_BLOB_SIZE];
  1222. static struct dentry *pnd2_test;
  1223. static struct debugfs_blob_wrapper pnd2_blob = {
  1224. .data = pnd2_result,
  1225. .size = 0
  1226. };
  1227. static int debugfs_u64_set(void *data, u64 val)
  1228. {
  1229. struct dram_addr daddr;
  1230. struct mce m;
  1231. *(u64 *)data = val;
  1232. m.mcgstatus = 0;
  1233. /* ADDRV + MemRd + Unknown channel */
  1234. m.status = MCI_STATUS_ADDRV + 0x9f;
  1235. m.addr = val;
  1236. pnd2_mce_output_error(pnd2_mci, &m, &daddr);
  1237. snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
  1238. "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
  1239. m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
  1240. pnd2_blob.size = strlen(pnd2_blob.data);
  1241. return 0;
  1242. }
  1243. DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
  1244. static void setup_pnd2_debug(void)
  1245. {
  1246. pnd2_test = edac_debugfs_create_dir("pnd2_test");
  1247. edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
  1248. &pnd2_fake_addr, &fops_u64_wo);
  1249. debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
  1250. }
  1251. static void teardown_pnd2_debug(void)
  1252. {
  1253. debugfs_remove_recursive(pnd2_test);
  1254. }
  1255. #else
  1256. static void setup_pnd2_debug(void) {}
  1257. static void teardown_pnd2_debug(void) {}
  1258. #endif /* CONFIG_EDAC_DEBUG */
  1259. static int pnd2_probe(void)
  1260. {
  1261. int rc;
  1262. edac_dbg(2, "\n");
  1263. rc = get_registers();
  1264. if (rc)
  1265. return rc;
  1266. return pnd2_register_mci(&pnd2_mci);
  1267. }
  1268. static void pnd2_remove(void)
  1269. {
  1270. edac_dbg(0, "\n");
  1271. pnd2_unregister_mci(pnd2_mci);
  1272. }
  1273. static struct dunit_ops apl_ops = {
  1274. .name = "pnd2/apl",
  1275. .type = APL,
  1276. .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
  1277. .pmiidx_shift = 0,
  1278. .channels = APL_NUM_CHANNELS,
  1279. .dimms_per_channel = 1,
  1280. .rd_reg = apl_rd_reg,
  1281. .get_registers = apl_get_registers,
  1282. .check_ecc = apl_check_ecc_active,
  1283. .mk_region = apl_mk_region,
  1284. .get_dimm_config = apl_get_dimm_config,
  1285. .pmi2mem = apl_pmi2mem,
  1286. };
  1287. static struct dunit_ops dnv_ops = {
  1288. .name = "pnd2/dnv",
  1289. .type = DNV,
  1290. .pmiaddr_shift = 0,
  1291. .pmiidx_shift = 1,
  1292. .channels = DNV_NUM_CHANNELS,
  1293. .dimms_per_channel = 2,
  1294. .rd_reg = dnv_rd_reg,
  1295. .get_registers = dnv_get_registers,
  1296. .check_ecc = dnv_check_ecc_active,
  1297. .mk_region = dnv_mk_region,
  1298. .get_dimm_config = dnv_get_dimm_config,
  1299. .pmi2mem = dnv_pmi2mem,
  1300. };
  1301. static const struct x86_cpu_id pnd2_cpuids[] = {
  1302. X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops),
  1303. X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops),
  1304. { }
  1305. };
  1306. MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
  1307. static int __init pnd2_init(void)
  1308. {
  1309. const struct x86_cpu_id *id;
  1310. const char *owner;
  1311. int rc;
  1312. edac_dbg(2, "\n");
  1313. owner = edac_get_owner();
  1314. if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
  1315. return -EBUSY;
  1316. if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
  1317. return -ENODEV;
  1318. id = x86_match_cpu(pnd2_cpuids);
  1319. if (!id)
  1320. return -ENODEV;
  1321. ops = (struct dunit_ops *)id->driver_data;
  1322. if (ops->type == APL) {
  1323. p2sb_bus = pci_find_bus(0, 0);
  1324. if (!p2sb_bus)
  1325. return -ENODEV;
  1326. }
  1327. /* Ensure that the OPSTATE is set correctly for POLL or NMI */
  1328. opstate_init();
  1329. rc = pnd2_probe();
  1330. if (rc < 0) {
  1331. pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
  1332. return rc;
  1333. }
  1334. if (!pnd2_mci)
  1335. return -ENODEV;
  1336. mce_register_decode_chain(&pnd2_mce_dec);
  1337. setup_pnd2_debug();
  1338. return 0;
  1339. }
  1340. static void __exit pnd2_exit(void)
  1341. {
  1342. edac_dbg(2, "\n");
  1343. teardown_pnd2_debug();
  1344. mce_unregister_decode_chain(&pnd2_mce_dec);
  1345. pnd2_remove();
  1346. }
  1347. module_init(pnd2_init);
  1348. module_exit(pnd2_exit);
  1349. module_param(edac_op_state, int, 0444);
  1350. MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
  1351. MODULE_LICENSE("GPL v2");
  1352. MODULE_AUTHOR("Tony Luck");
  1353. MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");