mmdc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2017 NXP
  4. * Copyright 2011,2016 Freescale Semiconductor, Inc.
  5. * Copyright 2011 Linaro Ltd.
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/hrtimer.h>
  9. #include <linux/init.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_device.h>
  16. #include <linux/perf_event.h>
  17. #include <linux/slab.h>
  18. #include "common.h"
  19. #define MMDC_MAPSR 0x404
  20. #define BP_MMDC_MAPSR_PSD 0
  21. #define BP_MMDC_MAPSR_PSS 4
  22. #define MMDC_MDMISC 0x18
  23. #define BM_MMDC_MDMISC_DDR_TYPE 0x18
  24. #define BP_MMDC_MDMISC_DDR_TYPE 0x3
  25. #define TOTAL_CYCLES 0x0
  26. #define BUSY_CYCLES 0x1
  27. #define READ_ACCESSES 0x2
  28. #define WRITE_ACCESSES 0x3
  29. #define READ_BYTES 0x4
  30. #define WRITE_BYTES 0x5
  31. /* Enables, resets, freezes, overflow profiling*/
  32. #define DBG_DIS 0x0
  33. #define DBG_EN 0x1
  34. #define DBG_RST 0x2
  35. #define PRF_FRZ 0x4
  36. #define CYC_OVF 0x8
  37. #define PROFILE_SEL 0x10
  38. #define MMDC_MADPCR0 0x410
  39. #define MMDC_MADPCR1 0x414
  40. #define MMDC_MADPSR0 0x418
  41. #define MMDC_MADPSR1 0x41C
  42. #define MMDC_MADPSR2 0x420
  43. #define MMDC_MADPSR3 0x424
  44. #define MMDC_MADPSR4 0x428
  45. #define MMDC_MADPSR5 0x42C
  46. #define MMDC_NUM_COUNTERS 6
  47. #define MMDC_FLAG_PROFILE_SEL 0x1
  48. #define MMDC_PRF_AXI_ID_CLEAR 0x0
  49. #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
  50. static int ddr_type;
  51. struct fsl_mmdc_devtype_data {
  52. unsigned int flags;
  53. };
  54. static const struct fsl_mmdc_devtype_data imx6q_data = {
  55. };
  56. static const struct fsl_mmdc_devtype_data imx6qp_data = {
  57. .flags = MMDC_FLAG_PROFILE_SEL,
  58. };
  59. static const struct of_device_id imx_mmdc_dt_ids[] = {
  60. { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
  61. { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
  62. { /* sentinel */ }
  63. };
  64. #ifdef CONFIG_PERF_EVENTS
  65. static enum cpuhp_state cpuhp_mmdc_state;
  66. static DEFINE_IDA(mmdc_ida);
  67. PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
  68. PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
  69. PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
  70. PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
  71. PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
  72. PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
  73. PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
  74. PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
  75. PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
  76. PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
  77. struct mmdc_pmu {
  78. struct pmu pmu;
  79. void __iomem *mmdc_base;
  80. cpumask_t cpu;
  81. struct hrtimer hrtimer;
  82. unsigned int active_events;
  83. int id;
  84. struct device *dev;
  85. struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
  86. struct hlist_node node;
  87. struct fsl_mmdc_devtype_data *devtype_data;
  88. struct clk *mmdc_ipg_clk;
  89. };
  90. /*
  91. * Polling period is set to one second, overflow of total-cycles (the fastest
  92. * increasing counter) takes ten seconds so one second is safe
  93. */
  94. static unsigned int mmdc_pmu_poll_period_us = 1000000;
  95. module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint,
  96. S_IRUGO | S_IWUSR);
  97. static ktime_t mmdc_pmu_timer_period(void)
  98. {
  99. return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000);
  100. }
  101. static ssize_t mmdc_pmu_cpumask_show(struct device *dev,
  102. struct device_attribute *attr, char *buf)
  103. {
  104. struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev);
  105. return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
  106. }
  107. static struct device_attribute mmdc_pmu_cpumask_attr =
  108. __ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL);
  109. static struct attribute *mmdc_pmu_cpumask_attrs[] = {
  110. &mmdc_pmu_cpumask_attr.attr,
  111. NULL,
  112. };
  113. static struct attribute_group mmdc_pmu_cpumask_attr_group = {
  114. .attrs = mmdc_pmu_cpumask_attrs,
  115. };
  116. static struct attribute *mmdc_pmu_events_attrs[] = {
  117. &mmdc_pmu_total_cycles.attr.attr,
  118. &mmdc_pmu_busy_cycles.attr.attr,
  119. &mmdc_pmu_read_accesses.attr.attr,
  120. &mmdc_pmu_write_accesses.attr.attr,
  121. &mmdc_pmu_read_bytes.attr.attr,
  122. &mmdc_pmu_read_bytes_unit.attr.attr,
  123. &mmdc_pmu_read_bytes_scale.attr.attr,
  124. &mmdc_pmu_write_bytes.attr.attr,
  125. &mmdc_pmu_write_bytes_unit.attr.attr,
  126. &mmdc_pmu_write_bytes_scale.attr.attr,
  127. NULL,
  128. };
  129. static struct attribute_group mmdc_pmu_events_attr_group = {
  130. .name = "events",
  131. .attrs = mmdc_pmu_events_attrs,
  132. };
  133. PMU_FORMAT_ATTR(event, "config:0-63");
  134. PMU_FORMAT_ATTR(axi_id, "config1:0-63");
  135. static struct attribute *mmdc_pmu_format_attrs[] = {
  136. &format_attr_event.attr,
  137. &format_attr_axi_id.attr,
  138. NULL,
  139. };
  140. static struct attribute_group mmdc_pmu_format_attr_group = {
  141. .name = "format",
  142. .attrs = mmdc_pmu_format_attrs,
  143. };
  144. static const struct attribute_group *attr_groups[] = {
  145. &mmdc_pmu_events_attr_group,
  146. &mmdc_pmu_format_attr_group,
  147. &mmdc_pmu_cpumask_attr_group,
  148. NULL,
  149. };
  150. static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg)
  151. {
  152. void __iomem *mmdc_base, *reg;
  153. mmdc_base = pmu_mmdc->mmdc_base;
  154. switch (cfg) {
  155. case TOTAL_CYCLES:
  156. reg = mmdc_base + MMDC_MADPSR0;
  157. break;
  158. case BUSY_CYCLES:
  159. reg = mmdc_base + MMDC_MADPSR1;
  160. break;
  161. case READ_ACCESSES:
  162. reg = mmdc_base + MMDC_MADPSR2;
  163. break;
  164. case WRITE_ACCESSES:
  165. reg = mmdc_base + MMDC_MADPSR3;
  166. break;
  167. case READ_BYTES:
  168. reg = mmdc_base + MMDC_MADPSR4;
  169. break;
  170. case WRITE_BYTES:
  171. reg = mmdc_base + MMDC_MADPSR5;
  172. break;
  173. default:
  174. return WARN_ONCE(1,
  175. "invalid configuration %d for mmdc counter", cfg);
  176. }
  177. return readl(reg);
  178. }
  179. static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
  180. {
  181. struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
  182. int target;
  183. if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
  184. return 0;
  185. target = cpumask_any_but(cpu_online_mask, cpu);
  186. if (target >= nr_cpu_ids)
  187. return 0;
  188. perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
  189. cpumask_set_cpu(target, &pmu_mmdc->cpu);
  190. return 0;
  191. }
  192. static bool mmdc_pmu_group_event_is_valid(struct perf_event *event,
  193. struct pmu *pmu,
  194. unsigned long *used_counters)
  195. {
  196. int cfg = event->attr.config;
  197. if (is_software_event(event))
  198. return true;
  199. if (event->pmu != pmu)
  200. return false;
  201. return !test_and_set_bit(cfg, used_counters);
  202. }
  203. /*
  204. * Each event has a single fixed-purpose counter, so we can only have a
  205. * single active event for each at any point in time. Here we just check
  206. * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
  207. * event numbers are valid.
  208. */
  209. static bool mmdc_pmu_group_is_valid(struct perf_event *event)
  210. {
  211. struct pmu *pmu = event->pmu;
  212. struct perf_event *leader = event->group_leader;
  213. struct perf_event *sibling;
  214. unsigned long counter_mask = 0;
  215. set_bit(leader->attr.config, &counter_mask);
  216. if (event != leader) {
  217. if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
  218. return false;
  219. }
  220. for_each_sibling_event(sibling, leader) {
  221. if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
  222. return false;
  223. }
  224. return true;
  225. }
  226. static int mmdc_pmu_event_init(struct perf_event *event)
  227. {
  228. struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
  229. int cfg = event->attr.config;
  230. if (event->attr.type != event->pmu->type)
  231. return -ENOENT;
  232. if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
  233. return -EOPNOTSUPP;
  234. if (event->cpu < 0) {
  235. dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n");
  236. return -EOPNOTSUPP;
  237. }
  238. if (event->attr.sample_period)
  239. return -EINVAL;
  240. if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
  241. return -EINVAL;
  242. if (!mmdc_pmu_group_is_valid(event))
  243. return -EINVAL;
  244. event->cpu = cpumask_first(&pmu_mmdc->cpu);
  245. return 0;
  246. }
  247. static void mmdc_pmu_event_update(struct perf_event *event)
  248. {
  249. struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
  250. struct hw_perf_event *hwc = &event->hw;
  251. u64 delta, prev_raw_count, new_raw_count;
  252. do {
  253. prev_raw_count = local64_read(&hwc->prev_count);
  254. new_raw_count = mmdc_pmu_read_counter(pmu_mmdc,
  255. event->attr.config);
  256. } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  257. new_raw_count) != prev_raw_count);
  258. delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
  259. local64_add(delta, &event->count);
  260. }
  261. static void mmdc_pmu_event_start(struct perf_event *event, int flags)
  262. {
  263. struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
  264. struct hw_perf_event *hwc = &event->hw;
  265. void __iomem *mmdc_base, *reg;
  266. u32 val;
  267. mmdc_base = pmu_mmdc->mmdc_base;
  268. reg = mmdc_base + MMDC_MADPCR0;
  269. /*
  270. * hrtimer is required because mmdc does not provide an interrupt so
  271. * polling is necessary
  272. */
  273. hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(),
  274. HRTIMER_MODE_REL_PINNED);
  275. local64_set(&hwc->prev_count, 0);
  276. writel(DBG_RST, reg);
  277. /*
  278. * Write the AXI id parameter to MADPCR1.
  279. */
  280. val = event->attr.config1;
  281. reg = mmdc_base + MMDC_MADPCR1;
  282. writel(val, reg);
  283. reg = mmdc_base + MMDC_MADPCR0;
  284. val = DBG_EN;
  285. if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL)
  286. val |= PROFILE_SEL;
  287. writel(val, reg);
  288. }
  289. static int mmdc_pmu_event_add(struct perf_event *event, int flags)
  290. {
  291. struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
  292. struct hw_perf_event *hwc = &event->hw;
  293. int cfg = event->attr.config;
  294. if (flags & PERF_EF_START)
  295. mmdc_pmu_event_start(event, flags);
  296. if (pmu_mmdc->mmdc_events[cfg] != NULL)
  297. return -EAGAIN;
  298. pmu_mmdc->mmdc_events[cfg] = event;
  299. pmu_mmdc->active_events++;
  300. local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg));
  301. return 0;
  302. }
  303. static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
  304. {
  305. struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
  306. void __iomem *mmdc_base, *reg;
  307. mmdc_base = pmu_mmdc->mmdc_base;
  308. reg = mmdc_base + MMDC_MADPCR0;
  309. writel(PRF_FRZ, reg);
  310. reg = mmdc_base + MMDC_MADPCR1;
  311. writel(MMDC_PRF_AXI_ID_CLEAR, reg);
  312. mmdc_pmu_event_update(event);
  313. }
  314. static void mmdc_pmu_event_del(struct perf_event *event, int flags)
  315. {
  316. struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
  317. int cfg = event->attr.config;
  318. pmu_mmdc->mmdc_events[cfg] = NULL;
  319. pmu_mmdc->active_events--;
  320. if (pmu_mmdc->active_events == 0)
  321. hrtimer_cancel(&pmu_mmdc->hrtimer);
  322. mmdc_pmu_event_stop(event, PERF_EF_UPDATE);
  323. }
  324. static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc)
  325. {
  326. int i;
  327. for (i = 0; i < MMDC_NUM_COUNTERS; i++) {
  328. struct perf_event *event = pmu_mmdc->mmdc_events[i];
  329. if (event)
  330. mmdc_pmu_event_update(event);
  331. }
  332. }
  333. static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
  334. {
  335. struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu,
  336. hrtimer);
  337. mmdc_pmu_overflow_handler(pmu_mmdc);
  338. hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period());
  339. return HRTIMER_RESTART;
  340. }
  341. static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
  342. void __iomem *mmdc_base, struct device *dev)
  343. {
  344. *pmu_mmdc = (struct mmdc_pmu) {
  345. .pmu = (struct pmu) {
  346. .task_ctx_nr = perf_invalid_context,
  347. .attr_groups = attr_groups,
  348. .event_init = mmdc_pmu_event_init,
  349. .add = mmdc_pmu_event_add,
  350. .del = mmdc_pmu_event_del,
  351. .start = mmdc_pmu_event_start,
  352. .stop = mmdc_pmu_event_stop,
  353. .read = mmdc_pmu_event_update,
  354. .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
  355. },
  356. .mmdc_base = mmdc_base,
  357. .dev = dev,
  358. .active_events = 0,
  359. };
  360. pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
  361. return pmu_mmdc->id;
  362. }
  363. static int imx_mmdc_remove(struct platform_device *pdev)
  364. {
  365. struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
  366. ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
  367. cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
  368. perf_pmu_unregister(&pmu_mmdc->pmu);
  369. iounmap(pmu_mmdc->mmdc_base);
  370. clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
  371. kfree(pmu_mmdc);
  372. return 0;
  373. }
  374. static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
  375. struct clk *mmdc_ipg_clk)
  376. {
  377. struct mmdc_pmu *pmu_mmdc;
  378. char *name;
  379. int ret;
  380. const struct of_device_id *of_id =
  381. of_match_device(imx_mmdc_dt_ids, &pdev->dev);
  382. pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL);
  383. if (!pmu_mmdc) {
  384. pr_err("failed to allocate PMU device!\n");
  385. return -ENOMEM;
  386. }
  387. /* The first instance registers the hotplug state */
  388. if (!cpuhp_mmdc_state) {
  389. ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
  390. "perf/arm/mmdc:online", NULL,
  391. mmdc_pmu_offline_cpu);
  392. if (ret < 0) {
  393. pr_err("cpuhp_setup_state_multi failed\n");
  394. goto pmu_free;
  395. }
  396. cpuhp_mmdc_state = ret;
  397. }
  398. ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
  399. if (ret < 0)
  400. goto pmu_free;
  401. name = devm_kasprintf(&pdev->dev,
  402. GFP_KERNEL, "mmdc%d", ret);
  403. if (!name) {
  404. ret = -ENOMEM;
  405. goto pmu_release_id;
  406. }
  407. pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
  408. pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
  409. hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
  410. HRTIMER_MODE_REL);
  411. pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
  412. cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
  413. /* Register the pmu instance for cpu hotplug */
  414. cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
  415. ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
  416. if (ret)
  417. goto pmu_register_err;
  418. platform_set_drvdata(pdev, pmu_mmdc);
  419. return 0;
  420. pmu_register_err:
  421. pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
  422. cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
  423. hrtimer_cancel(&pmu_mmdc->hrtimer);
  424. pmu_release_id:
  425. ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
  426. pmu_free:
  427. kfree(pmu_mmdc);
  428. return ret;
  429. }
  430. #else
  431. #define imx_mmdc_remove NULL
  432. #define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
  433. #endif
  434. static int imx_mmdc_probe(struct platform_device *pdev)
  435. {
  436. struct device_node *np = pdev->dev.of_node;
  437. void __iomem *mmdc_base, *reg;
  438. struct clk *mmdc_ipg_clk;
  439. u32 val;
  440. int err;
  441. /* the ipg clock is optional */
  442. mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL);
  443. if (IS_ERR(mmdc_ipg_clk))
  444. mmdc_ipg_clk = NULL;
  445. err = clk_prepare_enable(mmdc_ipg_clk);
  446. if (err) {
  447. dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n");
  448. return err;
  449. }
  450. mmdc_base = of_iomap(np, 0);
  451. WARN_ON(!mmdc_base);
  452. reg = mmdc_base + MMDC_MDMISC;
  453. /* Get ddr type */
  454. val = readl_relaxed(reg);
  455. ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >>
  456. BP_MMDC_MDMISC_DDR_TYPE;
  457. reg = mmdc_base + MMDC_MAPSR;
  458. /* Enable automatic power saving */
  459. val = readl_relaxed(reg);
  460. val &= ~(1 << BP_MMDC_MAPSR_PSD);
  461. writel_relaxed(val, reg);
  462. err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
  463. if (err) {
  464. iounmap(mmdc_base);
  465. clk_disable_unprepare(mmdc_ipg_clk);
  466. }
  467. return err;
  468. }
  469. int imx_mmdc_get_ddr_type(void)
  470. {
  471. return ddr_type;
  472. }
  473. static struct platform_driver imx_mmdc_driver = {
  474. .driver = {
  475. .name = "imx-mmdc",
  476. .of_match_table = imx_mmdc_dt_ids,
  477. },
  478. .probe = imx_mmdc_probe,
  479. .remove = imx_mmdc_remove,
  480. };
  481. static int __init imx_mmdc_init(void)
  482. {
  483. return platform_driver_register(&imx_mmdc_driver);
  484. }
  485. postcore_initcall(imx_mmdc_init);