emif.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * EMIF driver
  4. *
  5. * Copyright (C) 2012 Texas Instruments, Inc.
  6. *
  7. * Aneesh V <[email protected]>
  8. * Santosh Shilimkar <[email protected]>
  9. */
  10. #include <linux/err.h>
  11. #include <linux/kernel.h>
  12. #include <linux/reboot.h>
  13. #include <linux/platform_data/emif_plat.h>
  14. #include <linux/io.h>
  15. #include <linux/device.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/slab.h>
  19. #include <linux/of.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/module.h>
  23. #include <linux/list.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/pm.h>
  26. #include "emif.h"
  27. #include "jedec_ddr.h"
  28. #include "of_memory.h"
  29. /**
  30. * struct emif_data - Per device static data for driver's use
  31. * @duplicate: Whether the DDR devices attached to this EMIF
  32. * instance are exactly same as that on EMIF1. In
  33. * this case we can save some memory and processing
  34. * @temperature_level: Maximum temperature of LPDDR2 devices attached
  35. * to this EMIF - read from MR4 register. If there
  36. * are two devices attached to this EMIF, this
  37. * value is the maximum of the two temperature
  38. * levels.
  39. * @node: node in the device list
  40. * @base: base address of memory-mapped IO registers.
  41. * @dev: device pointer.
  42. * @regs_cache: An array of 'struct emif_regs' that stores
  43. * calculated register values for different
  44. * frequencies, to avoid re-calculating them on
  45. * each DVFS transition.
  46. * @curr_regs: The set of register values used in the last
  47. * frequency change (i.e. corresponding to the
  48. * frequency in effect at the moment)
  49. * @plat_data: Pointer to saved platform data.
  50. * @debugfs_root: dentry to the root folder for EMIF in debugfs
  51. * @np_ddr: Pointer to ddr device tree node
  52. */
  53. struct emif_data {
  54. u8 duplicate;
  55. u8 temperature_level;
  56. u8 lpmode;
  57. struct list_head node;
  58. unsigned long irq_state;
  59. void __iomem *base;
  60. struct device *dev;
  61. struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES];
  62. struct emif_regs *curr_regs;
  63. struct emif_platform_data *plat_data;
  64. struct dentry *debugfs_root;
  65. struct device_node *np_ddr;
  66. };
  67. static struct emif_data *emif1;
  68. static DEFINE_SPINLOCK(emif_lock);
  69. static unsigned long irq_state;
  70. static LIST_HEAD(device_list);
  71. #ifdef CONFIG_DEBUG_FS
  72. static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
  73. struct emif_regs *regs)
  74. {
  75. u32 type = emif->plat_data->device_info->type;
  76. u32 ip_rev = emif->plat_data->ip_rev;
  77. seq_printf(s, "EMIF register cache dump for %dMHz\n",
  78. regs->freq/1000000);
  79. seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw);
  80. seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw);
  81. seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw);
  82. seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw);
  83. if (ip_rev == EMIF_4D) {
  84. seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
  85. regs->read_idle_ctrl_shdw_normal);
  86. seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
  87. regs->read_idle_ctrl_shdw_volt_ramp);
  88. } else if (ip_rev == EMIF_4D5) {
  89. seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
  90. regs->dll_calib_ctrl_shdw_normal);
  91. seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
  92. regs->dll_calib_ctrl_shdw_volt_ramp);
  93. }
  94. if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
  95. seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n",
  96. regs->ref_ctrl_shdw_derated);
  97. seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n",
  98. regs->sdram_tim1_shdw_derated);
  99. seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n",
  100. regs->sdram_tim3_shdw_derated);
  101. }
  102. }
  103. static int emif_regdump_show(struct seq_file *s, void *unused)
  104. {
  105. struct emif_data *emif = s->private;
  106. struct emif_regs **regs_cache;
  107. int i;
  108. if (emif->duplicate)
  109. regs_cache = emif1->regs_cache;
  110. else
  111. regs_cache = emif->regs_cache;
  112. for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
  113. do_emif_regdump_show(s, emif, regs_cache[i]);
  114. seq_putc(s, '\n');
  115. }
  116. return 0;
  117. }
  118. DEFINE_SHOW_ATTRIBUTE(emif_regdump);
  119. static int emif_mr4_show(struct seq_file *s, void *unused)
  120. {
  121. struct emif_data *emif = s->private;
  122. seq_printf(s, "MR4=%d\n", emif->temperature_level);
  123. return 0;
  124. }
  125. DEFINE_SHOW_ATTRIBUTE(emif_mr4);
  126. static int __init_or_module emif_debugfs_init(struct emif_data *emif)
  127. {
  128. emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
  129. debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
  130. &emif_regdump_fops);
  131. debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
  132. &emif_mr4_fops);
  133. return 0;
  134. }
  135. static void __exit emif_debugfs_exit(struct emif_data *emif)
  136. {
  137. debugfs_remove_recursive(emif->debugfs_root);
  138. emif->debugfs_root = NULL;
  139. }
  140. #else
  141. static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
  142. {
  143. return 0;
  144. }
  145. static inline void __exit emif_debugfs_exit(struct emif_data *emif)
  146. {
  147. }
  148. #endif
  149. /*
  150. * Get bus width used by EMIF. Note that this may be different from the
  151. * bus width of the DDR devices used. For instance two 16-bit DDR devices
  152. * may be connected to a given CS of EMIF. In this case bus width as far
  153. * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
  154. */
  155. static u32 get_emif_bus_width(struct emif_data *emif)
  156. {
  157. u32 width;
  158. void __iomem *base = emif->base;
  159. width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK)
  160. >> NARROW_MODE_SHIFT;
  161. width = width == 0 ? 32 : 16;
  162. return width;
  163. }
  164. static void set_lpmode(struct emif_data *emif, u8 lpmode)
  165. {
  166. u32 temp;
  167. void __iomem *base = emif->base;
  168. /*
  169. * Workaround for errata i743 - LPDDR2 Power-Down State is Not
  170. * Efficient
  171. *
  172. * i743 DESCRIPTION:
  173. * The EMIF supports power-down state for low power. The EMIF
  174. * automatically puts the SDRAM into power-down after the memory is
  175. * not accessed for a defined number of cycles and the
  176. * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
  177. * As the EMIF supports automatic output impedance calibration, a ZQ
  178. * calibration long command is issued every time it exits active
  179. * power-down and precharge power-down modes. The EMIF waits and
  180. * blocks any other command during this calibration.
  181. * The EMIF does not allow selective disabling of ZQ calibration upon
  182. * exit of power-down mode. Due to very short periods of power-down
  183. * cycles, ZQ calibration overhead creates bandwidth issues and
  184. * increases overall system power consumption. On the other hand,
  185. * issuing ZQ calibration long commands when exiting self-refresh is
  186. * still required.
  187. *
  188. * WORKAROUND
  189. * Because there is no power consumption benefit of the power-down due
  190. * to the calibration and there is a performance risk, the guideline
  191. * is to not allow power-down state and, therefore, to not have set
  192. * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
  193. */
  194. if ((emif->plat_data->ip_rev == EMIF_4D) &&
  195. (lpmode == EMIF_LP_MODE_PWR_DN)) {
  196. WARN_ONCE(1,
  197. "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
  198. /* rollback LP_MODE to Self-refresh mode */
  199. lpmode = EMIF_LP_MODE_SELF_REFRESH;
  200. }
  201. temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
  202. temp &= ~LP_MODE_MASK;
  203. temp |= (lpmode << LP_MODE_SHIFT);
  204. writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
  205. }
  206. static void do_freq_update(void)
  207. {
  208. struct emif_data *emif;
  209. /*
  210. * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
  211. *
  212. * i728 DESCRIPTION:
  213. * The EMIF automatically puts the SDRAM into self-refresh mode
  214. * after the EMIF has not performed accesses during
  215. * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
  216. * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
  217. * to 0x2. If during a small window the following three events
  218. * occur:
  219. * - The SR_TIMING counter expires
  220. * - And frequency change is requested
  221. * - And OCP access is requested
  222. * Then it causes instable clock on the DDR interface.
  223. *
  224. * WORKAROUND
  225. * To avoid the occurrence of the three events, the workaround
  226. * is to disable the self-refresh when requesting a frequency
  227. * change. Before requesting a frequency change the software must
  228. * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
  229. * frequency change has been done, the software can reprogram
  230. * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
  231. */
  232. list_for_each_entry(emif, &device_list, node) {
  233. if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
  234. set_lpmode(emif, EMIF_LP_MODE_DISABLE);
  235. }
  236. /*
  237. * TODO: Do FREQ_UPDATE here when an API
  238. * is available for this as part of the new
  239. * clock framework
  240. */
  241. list_for_each_entry(emif, &device_list, node) {
  242. if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
  243. set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
  244. }
  245. }
  246. /* Find addressing table entry based on the device's type and density */
  247. static const struct lpddr2_addressing *get_addressing_table(
  248. const struct ddr_device_info *device_info)
  249. {
  250. u32 index, type, density;
  251. type = device_info->type;
  252. density = device_info->density;
  253. switch (type) {
  254. case DDR_TYPE_LPDDR2_S4:
  255. index = density - 1;
  256. break;
  257. case DDR_TYPE_LPDDR2_S2:
  258. switch (density) {
  259. case DDR_DENSITY_1Gb:
  260. case DDR_DENSITY_2Gb:
  261. index = density + 3;
  262. break;
  263. default:
  264. index = density - 1;
  265. }
  266. break;
  267. default:
  268. return NULL;
  269. }
  270. return &lpddr2_jedec_addressing_table[index];
  271. }
  272. static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
  273. bool cs1_used, bool cal_resistors_per_cs)
  274. {
  275. u32 zq = 0, val = 0;
  276. val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns;
  277. zq |= val << ZQ_REFINTERVAL_SHIFT;
  278. val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1;
  279. zq |= val << ZQ_ZQCL_MULT_SHIFT;
  280. val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1;
  281. zq |= val << ZQ_ZQINIT_MULT_SHIFT;
  282. zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT;
  283. if (cal_resistors_per_cs)
  284. zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT;
  285. else
  286. zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT;
  287. zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */
  288. val = cs1_used ? 1 : 0;
  289. zq |= val << ZQ_CS1EN_SHIFT;
  290. return zq;
  291. }
  292. static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
  293. const struct emif_custom_configs *custom_configs, bool cs1_used,
  294. u32 sdram_io_width, u32 emif_bus_width)
  295. {
  296. u32 alert = 0, interval, devcnt;
  297. if (custom_configs && (custom_configs->mask &
  298. EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL))
  299. interval = custom_configs->temp_alert_poll_interval_ms;
  300. else
  301. interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS;
  302. interval *= 1000000; /* Convert to ns */
  303. interval /= addressing->tREFI_ns; /* Convert to refresh cycles */
  304. alert |= (interval << TA_REFINTERVAL_SHIFT);
  305. /*
  306. * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
  307. * also to this form and subtract to get TA_DEVCNT, which is
  308. * in log2(x) form.
  309. */
  310. emif_bus_width = __fls(emif_bus_width) - 1;
  311. devcnt = emif_bus_width - sdram_io_width;
  312. alert |= devcnt << TA_DEVCNT_SHIFT;
  313. /* DEVWDT is in 'log2(x) - 3' form */
  314. alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
  315. alert |= 1 << TA_SFEXITEN_SHIFT;
  316. alert |= 1 << TA_CS0EN_SHIFT;
  317. alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT;
  318. return alert;
  319. }
  320. static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
  321. {
  322. u32 pwr_mgmt_ctrl = 0, timeout;
  323. u32 lpmode = EMIF_LP_MODE_SELF_REFRESH;
  324. u32 timeout_perf = EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
  325. u32 timeout_pwr = EMIF_LP_MODE_TIMEOUT_POWER;
  326. u32 freq_threshold = EMIF_LP_MODE_FREQ_THRESHOLD;
  327. u32 mask;
  328. u8 shift;
  329. struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
  330. if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
  331. lpmode = cust_cfgs->lpmode;
  332. timeout_perf = cust_cfgs->lpmode_timeout_performance;
  333. timeout_pwr = cust_cfgs->lpmode_timeout_power;
  334. freq_threshold = cust_cfgs->lpmode_freq_threshold;
  335. }
  336. /* Timeout based on DDR frequency */
  337. timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
  338. /*
  339. * The value to be set in register is "log2(timeout) - 3"
  340. * if timeout < 16 load 0 in register
  341. * if timeout is not a power of 2, round to next highest power of 2
  342. */
  343. if (timeout < 16) {
  344. timeout = 0;
  345. } else {
  346. if (timeout & (timeout - 1))
  347. timeout <<= 1;
  348. timeout = __fls(timeout) - 3;
  349. }
  350. switch (lpmode) {
  351. case EMIF_LP_MODE_CLOCK_STOP:
  352. shift = CS_TIM_SHIFT;
  353. mask = CS_TIM_MASK;
  354. break;
  355. case EMIF_LP_MODE_SELF_REFRESH:
  356. /* Workaround for errata i735 */
  357. if (timeout < 6)
  358. timeout = 6;
  359. shift = SR_TIM_SHIFT;
  360. mask = SR_TIM_MASK;
  361. break;
  362. case EMIF_LP_MODE_PWR_DN:
  363. shift = PD_TIM_SHIFT;
  364. mask = PD_TIM_MASK;
  365. break;
  366. case EMIF_LP_MODE_DISABLE:
  367. default:
  368. mask = 0;
  369. shift = 0;
  370. break;
  371. }
  372. /* Round to maximum in case of overflow, BUT warn! */
  373. if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
  374. pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
  375. lpmode,
  376. timeout_perf,
  377. timeout_pwr,
  378. freq_threshold);
  379. WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
  380. timeout, mask >> shift);
  381. timeout = mask >> shift;
  382. }
  383. /* Setup required timing */
  384. pwr_mgmt_ctrl = (timeout << shift) & mask;
  385. /* setup a default mask for rest of the modes */
  386. pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
  387. ~mask;
  388. /* No CS_TIM in EMIF_4D5 */
  389. if (ip_rev == EMIF_4D5)
  390. pwr_mgmt_ctrl &= ~CS_TIM_MASK;
  391. pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
  392. return pwr_mgmt_ctrl;
  393. }
  394. /*
  395. * Get the temperature level of the EMIF instance:
  396. * Reads the MR4 register of attached SDRAM parts to find out the temperature
  397. * level. If there are two parts attached(one on each CS), then the temperature
  398. * level for the EMIF instance is the higher of the two temperatures.
  399. */
  400. static void get_temperature_level(struct emif_data *emif)
  401. {
  402. u32 temp, temperature_level;
  403. void __iomem *base;
  404. base = emif->base;
  405. /* Read mode register 4 */
  406. writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG);
  407. temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
  408. temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >>
  409. MR4_SDRAM_REF_RATE_SHIFT;
  410. if (emif->plat_data->device_info->cs1_used) {
  411. writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG);
  412. temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
  413. temp = (temp & MR4_SDRAM_REF_RATE_MASK)
  414. >> MR4_SDRAM_REF_RATE_SHIFT;
  415. temperature_level = max(temp, temperature_level);
  416. }
  417. /* treat everything less than nominal(3) in MR4 as nominal */
  418. if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
  419. temperature_level = SDRAM_TEMP_NOMINAL;
  420. /* if we get reserved value in MR4 persist with the existing value */
  421. if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
  422. emif->temperature_level = temperature_level;
  423. }
  424. /*
  425. * setup_temperature_sensitive_regs() - set the timings for temperature
  426. * sensitive registers. This happens once at initialisation time based
  427. * on the temperature at boot time and subsequently based on the temperature
  428. * alert interrupt. Temperature alert can happen when the temperature
  429. * increases or drops. So this function can have the effect of either
  430. * derating the timings or going back to nominal values.
  431. */
  432. static void setup_temperature_sensitive_regs(struct emif_data *emif,
  433. struct emif_regs *regs)
  434. {
  435. u32 tim1, tim3, ref_ctrl, type;
  436. void __iomem *base = emif->base;
  437. u32 temperature;
  438. type = emif->plat_data->device_info->type;
  439. tim1 = regs->sdram_tim1_shdw;
  440. tim3 = regs->sdram_tim3_shdw;
  441. ref_ctrl = regs->ref_ctrl_shdw;
  442. /* No de-rating for non-lpddr2 devices */
  443. if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
  444. goto out;
  445. temperature = emif->temperature_level;
  446. if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
  447. ref_ctrl = regs->ref_ctrl_shdw_derated;
  448. } else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
  449. tim1 = regs->sdram_tim1_shdw_derated;
  450. tim3 = regs->sdram_tim3_shdw_derated;
  451. ref_ctrl = regs->ref_ctrl_shdw_derated;
  452. }
  453. out:
  454. writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
  455. writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
  456. writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
  457. }
  458. static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
  459. {
  460. u32 old_temp_level;
  461. irqreturn_t ret = IRQ_HANDLED;
  462. struct emif_custom_configs *custom_configs;
  463. spin_lock_irqsave(&emif_lock, irq_state);
  464. old_temp_level = emif->temperature_level;
  465. get_temperature_level(emif);
  466. if (unlikely(emif->temperature_level == old_temp_level)) {
  467. goto out;
  468. } else if (!emif->curr_regs) {
  469. dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
  470. goto out;
  471. }
  472. custom_configs = emif->plat_data->custom_configs;
  473. /*
  474. * IF we detect higher than "nominal rating" from DDR sensor
  475. * on an unsupported DDR part, shutdown system
  476. */
  477. if (custom_configs && !(custom_configs->mask &
  478. EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
  479. if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
  480. dev_err(emif->dev,
  481. "%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
  482. __func__, emif->temperature_level);
  483. /*
  484. * Temperature far too high - do kernel_power_off()
  485. * from thread context
  486. */
  487. emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
  488. ret = IRQ_WAKE_THREAD;
  489. goto out;
  490. }
  491. }
  492. if (emif->temperature_level < old_temp_level ||
  493. emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
  494. /*
  495. * Temperature coming down - defer handling to thread OR
  496. * Temperature far too high - do kernel_power_off() from
  497. * thread context
  498. */
  499. ret = IRQ_WAKE_THREAD;
  500. } else {
  501. /* Temperature is going up - handle immediately */
  502. setup_temperature_sensitive_regs(emif, emif->curr_regs);
  503. do_freq_update();
  504. }
  505. out:
  506. spin_unlock_irqrestore(&emif_lock, irq_state);
  507. return ret;
  508. }
  509. static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
  510. {
  511. u32 interrupts;
  512. struct emif_data *emif = dev_id;
  513. void __iomem *base = emif->base;
  514. struct device *dev = emif->dev;
  515. irqreturn_t ret = IRQ_HANDLED;
  516. /* Save the status and clear it */
  517. interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
  518. writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
  519. /*
  520. * Handle temperature alert
  521. * Temperature alert should be same for all ports
  522. * So, it's enough to process it only for one of the ports
  523. */
  524. if (interrupts & TA_SYS_MASK)
  525. ret = handle_temp_alert(base, emif);
  526. if (interrupts & ERR_SYS_MASK)
  527. dev_err(dev, "Access error from SYS port - %x\n", interrupts);
  528. if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
  529. /* Save the status and clear it */
  530. interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
  531. writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
  532. if (interrupts & ERR_LL_MASK)
  533. dev_err(dev, "Access error from LL port - %x\n",
  534. interrupts);
  535. }
  536. return ret;
  537. }
  538. static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
  539. {
  540. struct emif_data *emif = dev_id;
  541. if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
  542. dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
  543. /* If we have Power OFF ability, use it, else try restarting */
  544. if (kernel_can_power_off()) {
  545. kernel_power_off();
  546. } else {
  547. WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
  548. kernel_restart("SDRAM Over-temp Emergency restart");
  549. }
  550. return IRQ_HANDLED;
  551. }
  552. spin_lock_irqsave(&emif_lock, irq_state);
  553. if (emif->curr_regs) {
  554. setup_temperature_sensitive_regs(emif, emif->curr_regs);
  555. do_freq_update();
  556. } else {
  557. dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
  558. }
  559. spin_unlock_irqrestore(&emif_lock, irq_state);
  560. return IRQ_HANDLED;
  561. }
  562. static void clear_all_interrupts(struct emif_data *emif)
  563. {
  564. void __iomem *base = emif->base;
  565. writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS),
  566. base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
  567. if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
  568. writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS),
  569. base + EMIF_LL_OCP_INTERRUPT_STATUS);
  570. }
  571. static void disable_and_clear_all_interrupts(struct emif_data *emif)
  572. {
  573. void __iomem *base = emif->base;
  574. /* Disable all interrupts */
  575. writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET),
  576. base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR);
  577. if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
  578. writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET),
  579. base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR);
  580. /* Clear all interrupts */
  581. clear_all_interrupts(emif);
  582. }
  583. static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
  584. {
  585. u32 interrupts, type;
  586. void __iomem *base = emif->base;
  587. type = emif->plat_data->device_info->type;
  588. clear_all_interrupts(emif);
  589. /* Enable interrupts for SYS interface */
  590. interrupts = EN_ERR_SYS_MASK;
  591. if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
  592. interrupts |= EN_TA_SYS_MASK;
  593. writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
  594. /* Enable interrupts for LL interface */
  595. if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
  596. /* TA need not be enabled for LL */
  597. interrupts = EN_ERR_LL_MASK;
  598. writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
  599. }
  600. /* setup IRQ handlers */
  601. return devm_request_threaded_irq(emif->dev, irq,
  602. emif_interrupt_handler,
  603. emif_threaded_isr,
  604. 0, dev_name(emif->dev),
  605. emif);
  606. }
  607. static void __init_or_module emif_onetime_settings(struct emif_data *emif)
  608. {
  609. u32 pwr_mgmt_ctrl, zq, temp_alert_cfg;
  610. void __iomem *base = emif->base;
  611. const struct lpddr2_addressing *addressing;
  612. const struct ddr_device_info *device_info;
  613. device_info = emif->plat_data->device_info;
  614. addressing = get_addressing_table(device_info);
  615. /*
  616. * Init power management settings
  617. * We don't know the frequency yet. Use a high frequency
  618. * value for a conservative timeout setting
  619. */
  620. pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
  621. emif->plat_data->ip_rev);
  622. emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
  623. writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
  624. /* Init ZQ calibration settings */
  625. zq = get_zq_config_reg(addressing, device_info->cs1_used,
  626. device_info->cal_resistors_per_cs);
  627. writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG);
  628. /* Check temperature level temperature level*/
  629. get_temperature_level(emif);
  630. if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
  631. dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
  632. /* Init temperature polling */
  633. temp_alert_cfg = get_temp_alert_config(addressing,
  634. emif->plat_data->custom_configs, device_info->cs1_used,
  635. device_info->io_width, get_emif_bus_width(emif));
  636. writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
  637. /*
  638. * Program external PHY control registers that are not frequency
  639. * dependent
  640. */
  641. if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY)
  642. return;
  643. writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
  644. writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
  645. writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
  646. writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
  647. writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
  648. writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
  649. writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
  650. writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
  651. writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
  652. writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
  653. writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
  654. writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
  655. writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
  656. writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
  657. writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
  658. writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
  659. writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
  660. writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
  661. writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
  662. writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
  663. writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
  664. }
  665. static void get_default_timings(struct emif_data *emif)
  666. {
  667. struct emif_platform_data *pd = emif->plat_data;
  668. pd->timings = lpddr2_jedec_timings;
  669. pd->timings_arr_size = ARRAY_SIZE(lpddr2_jedec_timings);
  670. dev_warn(emif->dev, "%s: using default timings\n", __func__);
  671. }
  672. static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type,
  673. u32 ip_rev, struct device *dev)
  674. {
  675. int valid;
  676. valid = (type == DDR_TYPE_LPDDR2_S4 ||
  677. type == DDR_TYPE_LPDDR2_S2)
  678. && (density >= DDR_DENSITY_64Mb
  679. && density <= DDR_DENSITY_8Gb)
  680. && (io_width >= DDR_IO_WIDTH_8
  681. && io_width <= DDR_IO_WIDTH_32);
  682. /* Combinations of EMIF and PHY revisions that we support today */
  683. switch (ip_rev) {
  684. case EMIF_4D:
  685. valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY);
  686. break;
  687. case EMIF_4D5:
  688. valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY);
  689. break;
  690. default:
  691. valid = 0;
  692. }
  693. if (!valid)
  694. dev_err(dev, "%s: invalid DDR details\n", __func__);
  695. return valid;
  696. }
  697. static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
  698. struct device *dev)
  699. {
  700. int valid = 1;
  701. if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) &&
  702. (cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE))
  703. valid = cust_cfgs->lpmode_freq_threshold &&
  704. cust_cfgs->lpmode_timeout_performance &&
  705. cust_cfgs->lpmode_timeout_power;
  706. if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)
  707. valid = valid && cust_cfgs->temp_alert_poll_interval_ms;
  708. if (!valid)
  709. dev_warn(dev, "%s: invalid custom configs\n", __func__);
  710. return valid;
  711. }
  712. #if defined(CONFIG_OF)
  713. static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
  714. struct emif_data *emif)
  715. {
  716. struct emif_custom_configs *cust_cfgs = NULL;
  717. int len;
  718. const __be32 *lpmode, *poll_intvl;
  719. lpmode = of_get_property(np_emif, "low-power-mode", &len);
  720. poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len);
  721. if (lpmode || poll_intvl)
  722. cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs),
  723. GFP_KERNEL);
  724. if (!cust_cfgs)
  725. return;
  726. if (lpmode) {
  727. cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE;
  728. cust_cfgs->lpmode = be32_to_cpup(lpmode);
  729. of_property_read_u32(np_emif,
  730. "low-power-mode-timeout-performance",
  731. &cust_cfgs->lpmode_timeout_performance);
  732. of_property_read_u32(np_emif,
  733. "low-power-mode-timeout-power",
  734. &cust_cfgs->lpmode_timeout_power);
  735. of_property_read_u32(np_emif,
  736. "low-power-mode-freq-threshold",
  737. &cust_cfgs->lpmode_freq_threshold);
  738. }
  739. if (poll_intvl) {
  740. cust_cfgs->mask |=
  741. EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL;
  742. cust_cfgs->temp_alert_poll_interval_ms =
  743. be32_to_cpup(poll_intvl);
  744. }
  745. if (of_find_property(np_emif, "extended-temp-part", &len))
  746. cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
  747. if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
  748. devm_kfree(emif->dev, cust_cfgs);
  749. return;
  750. }
  751. emif->plat_data->custom_configs = cust_cfgs;
  752. }
  753. static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
  754. struct device_node *np_ddr,
  755. struct ddr_device_info *dev_info)
  756. {
  757. u32 density = 0, io_width = 0;
  758. int len;
  759. if (of_find_property(np_emif, "cs1-used", &len))
  760. dev_info->cs1_used = true;
  761. if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
  762. dev_info->cal_resistors_per_cs = true;
  763. if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
  764. dev_info->type = DDR_TYPE_LPDDR2_S4;
  765. else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
  766. dev_info->type = DDR_TYPE_LPDDR2_S2;
  767. of_property_read_u32(np_ddr, "density", &density);
  768. of_property_read_u32(np_ddr, "io-width", &io_width);
  769. /* Convert from density in Mb to the density encoding in jedc_ddr.h */
  770. if (density & (density - 1))
  771. dev_info->density = 0;
  772. else
  773. dev_info->density = __fls(density) - 5;
  774. /* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
  775. if (io_width & (io_width - 1))
  776. dev_info->io_width = 0;
  777. else
  778. dev_info->io_width = __fls(io_width) - 1;
  779. }
  780. static struct emif_data * __init_or_module of_get_memory_device_details(
  781. struct device_node *np_emif, struct device *dev)
  782. {
  783. struct emif_data *emif = NULL;
  784. struct ddr_device_info *dev_info = NULL;
  785. struct emif_platform_data *pd = NULL;
  786. struct device_node *np_ddr;
  787. int len;
  788. np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
  789. if (!np_ddr)
  790. goto error;
  791. emif = devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL);
  792. pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
  793. dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
  794. if (!emif || !pd || !dev_info) {
  795. dev_err(dev, "%s: Out of memory!!\n",
  796. __func__);
  797. goto error;
  798. }
  799. emif->plat_data = pd;
  800. pd->device_info = dev_info;
  801. emif->dev = dev;
  802. emif->np_ddr = np_ddr;
  803. emif->temperature_level = SDRAM_TEMP_NOMINAL;
  804. if (of_device_is_compatible(np_emif, "ti,emif-4d"))
  805. emif->plat_data->ip_rev = EMIF_4D;
  806. else if (of_device_is_compatible(np_emif, "ti,emif-4d5"))
  807. emif->plat_data->ip_rev = EMIF_4D5;
  808. of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
  809. if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
  810. pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
  811. of_get_ddr_info(np_emif, np_ddr, dev_info);
  812. if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
  813. pd->device_info->io_width, pd->phy_type, pd->ip_rev,
  814. emif->dev)) {
  815. dev_err(dev, "%s: invalid device data!!\n", __func__);
  816. goto error;
  817. }
  818. /*
  819. * For EMIF instances other than EMIF1 see if the devices connected
  820. * are exactly same as on EMIF1(which is typically the case). If so,
  821. * mark it as a duplicate of EMIF1. This will save some memory and
  822. * computation.
  823. */
  824. if (emif1 && emif1->np_ddr == np_ddr) {
  825. emif->duplicate = true;
  826. goto out;
  827. } else if (emif1) {
  828. dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
  829. __func__);
  830. }
  831. of_get_custom_configs(np_emif, emif);
  832. emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev,
  833. emif->plat_data->device_info->type,
  834. &emif->plat_data->timings_arr_size);
  835. emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev);
  836. goto out;
  837. error:
  838. return NULL;
  839. out:
  840. return emif;
  841. }
  842. #else
  843. static struct emif_data * __init_or_module of_get_memory_device_details(
  844. struct device_node *np_emif, struct device *dev)
  845. {
  846. return NULL;
  847. }
  848. #endif
  849. static struct emif_data *__init_or_module get_device_details(
  850. struct platform_device *pdev)
  851. {
  852. u32 size;
  853. struct emif_data *emif = NULL;
  854. struct ddr_device_info *dev_info;
  855. struct emif_custom_configs *cust_cfgs;
  856. struct emif_platform_data *pd;
  857. struct device *dev;
  858. void *temp;
  859. pd = pdev->dev.platform_data;
  860. dev = &pdev->dev;
  861. if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type,
  862. pd->device_info->density, pd->device_info->io_width,
  863. pd->phy_type, pd->ip_rev, dev))) {
  864. dev_err(dev, "%s: invalid device data\n", __func__);
  865. goto error;
  866. }
  867. emif = devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL);
  868. temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
  869. dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
  870. if (!emif || !temp || !dev_info)
  871. goto error;
  872. memcpy(temp, pd, sizeof(*pd));
  873. pd = temp;
  874. memcpy(dev_info, pd->device_info, sizeof(*dev_info));
  875. pd->device_info = dev_info;
  876. emif->plat_data = pd;
  877. emif->dev = dev;
  878. emif->temperature_level = SDRAM_TEMP_NOMINAL;
  879. /*
  880. * For EMIF instances other than EMIF1 see if the devices connected
  881. * are exactly same as on EMIF1(which is typically the case). If so,
  882. * mark it as a duplicate of EMIF1 and skip copying timings data.
  883. * This will save some memory and some computation later.
  884. */
  885. emif->duplicate = emif1 && (memcmp(dev_info,
  886. emif1->plat_data->device_info,
  887. sizeof(struct ddr_device_info)) == 0);
  888. if (emif->duplicate) {
  889. pd->timings = NULL;
  890. pd->min_tck = NULL;
  891. goto out;
  892. } else if (emif1) {
  893. dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
  894. __func__);
  895. }
  896. /*
  897. * Copy custom configs - ignore allocation error, if any, as
  898. * custom_configs is not very critical
  899. */
  900. cust_cfgs = pd->custom_configs;
  901. if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
  902. temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
  903. if (temp)
  904. memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
  905. pd->custom_configs = temp;
  906. }
  907. /*
  908. * Copy timings and min-tck values from platform data. If it is not
  909. * available or if memory allocation fails, use JEDEC defaults
  910. */
  911. size = sizeof(struct lpddr2_timings) * pd->timings_arr_size;
  912. if (pd->timings) {
  913. temp = devm_kzalloc(dev, size, GFP_KERNEL);
  914. if (temp) {
  915. memcpy(temp, pd->timings, size);
  916. pd->timings = temp;
  917. } else {
  918. get_default_timings(emif);
  919. }
  920. } else {
  921. get_default_timings(emif);
  922. }
  923. if (pd->min_tck) {
  924. temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL);
  925. if (temp) {
  926. memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
  927. pd->min_tck = temp;
  928. } else {
  929. pd->min_tck = &lpddr2_jedec_min_tck;
  930. }
  931. } else {
  932. pd->min_tck = &lpddr2_jedec_min_tck;
  933. }
  934. out:
  935. return emif;
  936. error:
  937. return NULL;
  938. }
  939. static int __init_or_module emif_probe(struct platform_device *pdev)
  940. {
  941. struct emif_data *emif;
  942. int irq, ret;
  943. if (pdev->dev.of_node)
  944. emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
  945. else
  946. emif = get_device_details(pdev);
  947. if (!emif) {
  948. pr_err("%s: error getting device data\n", __func__);
  949. goto error;
  950. }
  951. list_add(&emif->node, &device_list);
  952. /* Save pointers to each other in emif and device structures */
  953. emif->dev = &pdev->dev;
  954. platform_set_drvdata(pdev, emif);
  955. emif->base = devm_platform_ioremap_resource(pdev, 0);
  956. if (IS_ERR(emif->base))
  957. goto error;
  958. irq = platform_get_irq(pdev, 0);
  959. if (irq < 0)
  960. goto error;
  961. emif_onetime_settings(emif);
  962. emif_debugfs_init(emif);
  963. disable_and_clear_all_interrupts(emif);
  964. ret = setup_interrupts(emif, irq);
  965. if (ret)
  966. goto error;
  967. /* One-time actions taken on probing the first device */
  968. if (!emif1) {
  969. emif1 = emif;
  970. /*
  971. * TODO: register notifiers for frequency and voltage
  972. * change here once the respective frameworks are
  973. * available
  974. */
  975. }
  976. dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n",
  977. __func__, emif->base, irq);
  978. return 0;
  979. error:
  980. return -ENODEV;
  981. }
  982. static int __exit emif_remove(struct platform_device *pdev)
  983. {
  984. struct emif_data *emif = platform_get_drvdata(pdev);
  985. emif_debugfs_exit(emif);
  986. return 0;
  987. }
  988. static void emif_shutdown(struct platform_device *pdev)
  989. {
  990. struct emif_data *emif = platform_get_drvdata(pdev);
  991. disable_and_clear_all_interrupts(emif);
  992. }
  993. #if defined(CONFIG_OF)
  994. static const struct of_device_id emif_of_match[] = {
  995. { .compatible = "ti,emif-4d" },
  996. { .compatible = "ti,emif-4d5" },
  997. {},
  998. };
  999. MODULE_DEVICE_TABLE(of, emif_of_match);
  1000. #endif
  1001. static struct platform_driver emif_driver = {
  1002. .remove = __exit_p(emif_remove),
  1003. .shutdown = emif_shutdown,
  1004. .driver = {
  1005. .name = "emif",
  1006. .of_match_table = of_match_ptr(emif_of_match),
  1007. },
  1008. };
  1009. module_platform_driver_probe(emif_driver, emif_probe);
  1010. MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
  1011. MODULE_LICENSE("GPL");
  1012. MODULE_ALIAS("platform:emif");
  1013. MODULE_AUTHOR("Texas Instruments Inc");