exynos5422-dmc.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2019 Samsung Electronics Co., Ltd.
  4. * Author: Lukasz Luba <[email protected]>
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/devfreq.h>
  8. #include <linux/devfreq-event.h>
  9. #include <linux/device.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/mfd/syscon.h>
  13. #include <linux/module.h>
  14. #include <linux/moduleparam.h>
  15. #include <linux/of_device.h>
  16. #include <linux/pm_opp.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/regmap.h>
  19. #include <linux/regulator/consumer.h>
  20. #include <linux/slab.h>
  21. #include "../jedec_ddr.h"
  22. #include "../of_memory.h"
  23. static int irqmode;
  24. module_param(irqmode, int, 0644);
  25. MODULE_PARM_DESC(irqmode, "Enable IRQ mode (0=off [default], 1=on)");
  26. #define EXYNOS5_DREXI_TIMINGAREF (0x0030)
  27. #define EXYNOS5_DREXI_TIMINGROW0 (0x0034)
  28. #define EXYNOS5_DREXI_TIMINGDATA0 (0x0038)
  29. #define EXYNOS5_DREXI_TIMINGPOWER0 (0x003C)
  30. #define EXYNOS5_DREXI_TIMINGROW1 (0x00E4)
  31. #define EXYNOS5_DREXI_TIMINGDATA1 (0x00E8)
  32. #define EXYNOS5_DREXI_TIMINGPOWER1 (0x00EC)
  33. #define CDREX_PAUSE (0x2091c)
  34. #define CDREX_LPDDR3PHY_CON3 (0x20a20)
  35. #define CDREX_LPDDR3PHY_CLKM_SRC (0x20700)
  36. #define EXYNOS5_TIMING_SET_SWI BIT(28)
  37. #define USE_MX_MSPLL_TIMINGS (1)
  38. #define USE_BPLL_TIMINGS (0)
  39. #define EXYNOS5_AREF_NORMAL (0x2e)
  40. #define DREX_PPCCLKCON (0x0130)
  41. #define DREX_PEREV2CONFIG (0x013c)
  42. #define DREX_PMNC_PPC (0xE000)
  43. #define DREX_CNTENS_PPC (0xE010)
  44. #define DREX_CNTENC_PPC (0xE020)
  45. #define DREX_INTENS_PPC (0xE030)
  46. #define DREX_INTENC_PPC (0xE040)
  47. #define DREX_FLAG_PPC (0xE050)
  48. #define DREX_PMCNT2_PPC (0xE130)
  49. /*
  50. * A value for register DREX_PMNC_PPC which should be written to reset
  51. * the cycle counter CCNT (a reference wall clock). It sets zero to the
  52. * CCNT counter.
  53. */
  54. #define CC_RESET BIT(2)
  55. /*
  56. * A value for register DREX_PMNC_PPC which does the reset of all performance
  57. * counters to zero.
  58. */
  59. #define PPC_COUNTER_RESET BIT(1)
  60. /*
  61. * Enables all configured counters (including cycle counter). The value should
  62. * be written to the register DREX_PMNC_PPC.
  63. */
  64. #define PPC_ENABLE BIT(0)
  65. /* A value for register DREX_PPCCLKCON which enables performance events clock.
  66. * Must be written before first access to the performance counters register
  67. * set, otherwise it could crash.
  68. */
  69. #define PEREV_CLK_EN BIT(0)
  70. /*
  71. * Values which are used to enable counters, interrupts or configure flags of
  72. * the performance counters. They configure counter 2 and cycle counter.
  73. */
  74. #define PERF_CNT2 BIT(2)
  75. #define PERF_CCNT BIT(31)
  76. /*
  77. * Performance event types which are used for setting the preferred event
  78. * to track in the counters.
  79. * There is a set of different types, the values are from range 0 to 0x6f.
  80. * These settings should be written to the configuration register which manages
  81. * the type of the event (register DREX_PEREV2CONFIG).
  82. */
  83. #define READ_TRANSFER_CH0 (0x6d)
  84. #define READ_TRANSFER_CH1 (0x6f)
  85. #define PERF_COUNTER_START_VALUE 0xff000000
  86. #define PERF_EVENT_UP_DOWN_THRESHOLD 900000000ULL
  87. /**
  88. * struct dmc_opp_table - Operating level desciption
  89. * @freq_hz: target frequency in Hz
  90. * @volt_uv: target voltage in uV
  91. *
  92. * Covers frequency and voltage settings of the DMC operating mode.
  93. */
  94. struct dmc_opp_table {
  95. u32 freq_hz;
  96. u32 volt_uv;
  97. };
  98. /**
  99. * struct exynos5_dmc - main structure describing DMC device
  100. * @dev: DMC device
  101. * @df: devfreq device structure returned by devfreq framework
  102. * @gov_data: configuration of devfreq governor
  103. * @base_drexi0: DREX0 registers mapping
  104. * @base_drexi1: DREX1 registers mapping
  105. * @clk_regmap: regmap for clock controller registers
  106. * @lock: protects curr_rate and frequency/voltage setting section
  107. * @curr_rate: current frequency
  108. * @curr_volt: current voltage
  109. * @opp: OPP table
  110. * @opp_count: number of 'opp' elements
  111. * @timings_arr_size: number of 'timings' elements
  112. * @timing_row: values for timing row register, for each OPP
  113. * @timing_data: values for timing data register, for each OPP
  114. * @timing_power: balues for timing power register, for each OPP
  115. * @timings: DDR memory timings, from device tree
  116. * @min_tck: DDR memory minimum timing values, from device tree
  117. * @bypass_timing_row: value for timing row register for bypass timings
  118. * @bypass_timing_data: value for timing data register for bypass timings
  119. * @bypass_timing_power: value for timing power register for bypass
  120. * timings
  121. * @vdd_mif: Memory interface regulator
  122. * @fout_spll: clock: SPLL
  123. * @fout_bpll: clock: BPLL
  124. * @mout_spll: clock: mux SPLL
  125. * @mout_bpll: clock: mux BPLL
  126. * @mout_mclk_cdrex: clock: mux mclk_cdrex
  127. * @mout_mx_mspll_ccore: clock: mux mx_mspll_ccore
  128. * @counter: devfreq events
  129. * @num_counters: number of 'counter' elements
  130. * @last_overflow_ts: time (in ns) of last overflow of each DREX
  131. * @load: utilization in percents
  132. * @total: total time between devfreq events
  133. * @in_irq_mode: whether running in interrupt mode (true)
  134. * or polling (false)
  135. *
  136. * The main structure for the Dynamic Memory Controller which covers clocks,
  137. * memory regions, HW information, parameters and current operating mode.
  138. */
  139. struct exynos5_dmc {
  140. struct device *dev;
  141. struct devfreq *df;
  142. struct devfreq_simple_ondemand_data gov_data;
  143. void __iomem *base_drexi0;
  144. void __iomem *base_drexi1;
  145. struct regmap *clk_regmap;
  146. /* Protects curr_rate and frequency/voltage setting section */
  147. struct mutex lock;
  148. unsigned long curr_rate;
  149. unsigned long curr_volt;
  150. struct dmc_opp_table *opp;
  151. int opp_count;
  152. u32 timings_arr_size;
  153. u32 *timing_row;
  154. u32 *timing_data;
  155. u32 *timing_power;
  156. const struct lpddr3_timings *timings;
  157. const struct lpddr3_min_tck *min_tck;
  158. u32 bypass_timing_row;
  159. u32 bypass_timing_data;
  160. u32 bypass_timing_power;
  161. struct regulator *vdd_mif;
  162. struct clk *fout_spll;
  163. struct clk *fout_bpll;
  164. struct clk *mout_spll;
  165. struct clk *mout_bpll;
  166. struct clk *mout_mclk_cdrex;
  167. struct clk *mout_mx_mspll_ccore;
  168. struct devfreq_event_dev **counter;
  169. int num_counters;
  170. u64 last_overflow_ts[2];
  171. unsigned long load;
  172. unsigned long total;
  173. bool in_irq_mode;
  174. };
  175. #define TIMING_FIELD(t_name, t_bit_beg, t_bit_end) \
  176. { .name = t_name, .bit_beg = t_bit_beg, .bit_end = t_bit_end }
  177. #define TIMING_VAL2REG(timing, t_val) \
  178. ({ \
  179. u32 __val; \
  180. __val = (t_val) << (timing)->bit_beg; \
  181. __val; \
  182. })
  183. struct timing_reg {
  184. char *name;
  185. int bit_beg;
  186. int bit_end;
  187. unsigned int val;
  188. };
  189. static const struct timing_reg timing_row_reg_fields[] = {
  190. TIMING_FIELD("tRFC", 24, 31),
  191. TIMING_FIELD("tRRD", 20, 23),
  192. TIMING_FIELD("tRP", 16, 19),
  193. TIMING_FIELD("tRCD", 12, 15),
  194. TIMING_FIELD("tRC", 6, 11),
  195. TIMING_FIELD("tRAS", 0, 5),
  196. };
  197. static const struct timing_reg timing_data_reg_fields[] = {
  198. TIMING_FIELD("tWTR", 28, 31),
  199. TIMING_FIELD("tWR", 24, 27),
  200. TIMING_FIELD("tRTP", 20, 23),
  201. TIMING_FIELD("tW2W-C2C", 14, 14),
  202. TIMING_FIELD("tR2R-C2C", 12, 12),
  203. TIMING_FIELD("WL", 8, 11),
  204. TIMING_FIELD("tDQSCK", 4, 7),
  205. TIMING_FIELD("RL", 0, 3),
  206. };
  207. static const struct timing_reg timing_power_reg_fields[] = {
  208. TIMING_FIELD("tFAW", 26, 31),
  209. TIMING_FIELD("tXSR", 16, 25),
  210. TIMING_FIELD("tXP", 8, 15),
  211. TIMING_FIELD("tCKE", 4, 7),
  212. TIMING_FIELD("tMRD", 0, 3),
  213. };
  214. #define TIMING_COUNT (ARRAY_SIZE(timing_row_reg_fields) + \
  215. ARRAY_SIZE(timing_data_reg_fields) + \
  216. ARRAY_SIZE(timing_power_reg_fields))
  217. static int exynos5_counters_set_event(struct exynos5_dmc *dmc)
  218. {
  219. int i, ret;
  220. for (i = 0; i < dmc->num_counters; i++) {
  221. if (!dmc->counter[i])
  222. continue;
  223. ret = devfreq_event_set_event(dmc->counter[i]);
  224. if (ret < 0)
  225. return ret;
  226. }
  227. return 0;
  228. }
  229. static int exynos5_counters_enable_edev(struct exynos5_dmc *dmc)
  230. {
  231. int i, ret;
  232. for (i = 0; i < dmc->num_counters; i++) {
  233. if (!dmc->counter[i])
  234. continue;
  235. ret = devfreq_event_enable_edev(dmc->counter[i]);
  236. if (ret < 0)
  237. return ret;
  238. }
  239. return 0;
  240. }
  241. static int exynos5_counters_disable_edev(struct exynos5_dmc *dmc)
  242. {
  243. int i, ret;
  244. for (i = 0; i < dmc->num_counters; i++) {
  245. if (!dmc->counter[i])
  246. continue;
  247. ret = devfreq_event_disable_edev(dmc->counter[i]);
  248. if (ret < 0)
  249. return ret;
  250. }
  251. return 0;
  252. }
  253. /**
  254. * find_target_freq_idx() - Finds requested frequency in local DMC configuration
  255. * @dmc: device for which the information is checked
  256. * @target_rate: requested frequency in KHz
  257. *
  258. * Seeks in the local DMC driver structure for the requested frequency value
  259. * and returns index or error value.
  260. */
  261. static int find_target_freq_idx(struct exynos5_dmc *dmc,
  262. unsigned long target_rate)
  263. {
  264. int i;
  265. for (i = dmc->opp_count - 1; i >= 0; i--)
  266. if (dmc->opp[i].freq_hz <= target_rate)
  267. return i;
  268. return -EINVAL;
  269. }
  270. /**
  271. * exynos5_switch_timing_regs() - Changes bank register set for DRAM timings
  272. * @dmc: device for which the new settings is going to be applied
  273. * @set: boolean variable passing set value
  274. *
  275. * Changes the register set, which holds timing parameters.
  276. * There is two register sets: 0 and 1. The register set 0
  277. * is used in normal operation when the clock is provided from main PLL.
  278. * The bank register set 1 is used when the main PLL frequency is going to be
  279. * changed and the clock is taken from alternative, stable source.
  280. * This function switches between these banks according to the
  281. * currently used clock source.
  282. */
  283. static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
  284. {
  285. unsigned int reg;
  286. int ret;
  287. ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, &reg);
  288. if (ret)
  289. return ret;
  290. if (set)
  291. reg |= EXYNOS5_TIMING_SET_SWI;
  292. else
  293. reg &= ~EXYNOS5_TIMING_SET_SWI;
  294. regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg);
  295. return 0;
  296. }
  297. /**
  298. * exynos5_init_freq_table() - Initialized PM OPP framework
  299. * @dmc: DMC device for which the frequencies are used for OPP init
  300. * @profile: devfreq device's profile
  301. *
  302. * Populate the devfreq device's OPP table based on current frequency, voltage.
  303. */
  304. static int exynos5_init_freq_table(struct exynos5_dmc *dmc,
  305. struct devfreq_dev_profile *profile)
  306. {
  307. int i, ret;
  308. int idx;
  309. unsigned long freq;
  310. ret = devm_pm_opp_of_add_table(dmc->dev);
  311. if (ret < 0) {
  312. dev_err(dmc->dev, "Failed to get OPP table\n");
  313. return ret;
  314. }
  315. dmc->opp_count = dev_pm_opp_get_opp_count(dmc->dev);
  316. dmc->opp = devm_kmalloc_array(dmc->dev, dmc->opp_count,
  317. sizeof(struct dmc_opp_table), GFP_KERNEL);
  318. if (!dmc->opp)
  319. return -ENOMEM;
  320. idx = dmc->opp_count - 1;
  321. for (i = 0, freq = ULONG_MAX; i < dmc->opp_count; i++, freq--) {
  322. struct dev_pm_opp *opp;
  323. opp = dev_pm_opp_find_freq_floor(dmc->dev, &freq);
  324. if (IS_ERR(opp))
  325. return PTR_ERR(opp);
  326. dmc->opp[idx - i].freq_hz = freq;
  327. dmc->opp[idx - i].volt_uv = dev_pm_opp_get_voltage(opp);
  328. dev_pm_opp_put(opp);
  329. }
  330. return 0;
  331. }
  332. /**
  333. * exynos5_set_bypass_dram_timings() - Low-level changes of the DRAM timings
  334. * @dmc: device for which the new settings is going to be applied
  335. *
  336. * Low-level function for changing timings for DRAM memory clocking from
  337. * 'bypass' clock source (fixed frequency @400MHz).
  338. * It uses timing bank registers set 1.
  339. */
  340. static void exynos5_set_bypass_dram_timings(struct exynos5_dmc *dmc)
  341. {
  342. writel(EXYNOS5_AREF_NORMAL,
  343. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
  344. writel(dmc->bypass_timing_row,
  345. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW1);
  346. writel(dmc->bypass_timing_row,
  347. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW1);
  348. writel(dmc->bypass_timing_data,
  349. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA1);
  350. writel(dmc->bypass_timing_data,
  351. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA1);
  352. writel(dmc->bypass_timing_power,
  353. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER1);
  354. writel(dmc->bypass_timing_power,
  355. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER1);
  356. }
  357. /**
  358. * exynos5_dram_change_timings() - Low-level changes of the DRAM final timings
  359. * @dmc: device for which the new settings is going to be applied
  360. * @target_rate: target frequency of the DMC
  361. *
  362. * Low-level function for changing timings for DRAM memory operating from main
  363. * clock source (BPLL), which can have different frequencies. Thus, each
  364. * frequency must have corresponding timings register values in order to keep
  365. * the needed delays.
  366. * It uses timing bank registers set 0.
  367. */
  368. static int exynos5_dram_change_timings(struct exynos5_dmc *dmc,
  369. unsigned long target_rate)
  370. {
  371. int idx;
  372. for (idx = dmc->opp_count - 1; idx >= 0; idx--)
  373. if (dmc->opp[idx].freq_hz <= target_rate)
  374. break;
  375. if (idx < 0)
  376. return -EINVAL;
  377. writel(EXYNOS5_AREF_NORMAL,
  378. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
  379. writel(dmc->timing_row[idx],
  380. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW0);
  381. writel(dmc->timing_row[idx],
  382. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW0);
  383. writel(dmc->timing_data[idx],
  384. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA0);
  385. writel(dmc->timing_data[idx],
  386. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA0);
  387. writel(dmc->timing_power[idx],
  388. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER0);
  389. writel(dmc->timing_power[idx],
  390. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER0);
  391. return 0;
  392. }
  393. /**
  394. * exynos5_dmc_align_target_voltage() - Sets the final voltage for the DMC
  395. * @dmc: device for which it is going to be set
  396. * @target_volt: new voltage which is chosen to be final
  397. *
  398. * Function tries to align voltage to the safe level for 'normal' mode.
  399. * It checks the need of higher voltage and changes the value. The target
  400. * voltage might be lower that currently set and still the system will be
  401. * stable.
  402. */
  403. static int exynos5_dmc_align_target_voltage(struct exynos5_dmc *dmc,
  404. unsigned long target_volt)
  405. {
  406. int ret = 0;
  407. if (dmc->curr_volt <= target_volt)
  408. return 0;
  409. ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
  410. target_volt);
  411. if (!ret)
  412. dmc->curr_volt = target_volt;
  413. return ret;
  414. }
  415. /**
  416. * exynos5_dmc_align_bypass_voltage() - Sets the voltage for the DMC
  417. * @dmc: device for which it is going to be set
  418. * @target_volt: new voltage which is chosen to be final
  419. *
  420. * Function tries to align voltage to the safe level for the 'bypass' mode.
  421. * It checks the need of higher voltage and changes the value.
  422. * The target voltage must not be less than currently needed, because
  423. * for current frequency the device might become unstable.
  424. */
  425. static int exynos5_dmc_align_bypass_voltage(struct exynos5_dmc *dmc,
  426. unsigned long target_volt)
  427. {
  428. int ret = 0;
  429. if (dmc->curr_volt >= target_volt)
  430. return 0;
  431. ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
  432. target_volt);
  433. if (!ret)
  434. dmc->curr_volt = target_volt;
  435. return ret;
  436. }
  437. /**
  438. * exynos5_dmc_align_bypass_dram_timings() - Chooses and sets DRAM timings
  439. * @dmc: device for which it is going to be set
  440. * @target_rate: new frequency which is chosen to be final
  441. *
  442. * Function changes the DRAM timings for the temporary 'bypass' mode.
  443. */
  444. static int exynos5_dmc_align_bypass_dram_timings(struct exynos5_dmc *dmc,
  445. unsigned long target_rate)
  446. {
  447. int idx = find_target_freq_idx(dmc, target_rate);
  448. if (idx < 0)
  449. return -EINVAL;
  450. exynos5_set_bypass_dram_timings(dmc);
  451. return 0;
  452. }
  453. /**
  454. * exynos5_dmc_switch_to_bypass_configuration() - Switching to temporary clock
  455. * @dmc: DMC device for which the switching is going to happen
  456. * @target_rate: new frequency which is going to be set as a final
  457. * @target_volt: new voltage which is going to be set as a final
  458. *
  459. * Function configures DMC and clocks for operating in temporary 'bypass' mode.
  460. * This mode is used only temporary but if required, changes voltage and timings
  461. * for DRAM chips. It switches the main clock to stable clock source for the
  462. * period of the main PLL reconfiguration.
  463. */
  464. static int
  465. exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc,
  466. unsigned long target_rate,
  467. unsigned long target_volt)
  468. {
  469. int ret;
  470. /*
  471. * Having higher voltage for a particular frequency does not harm
  472. * the chip. Use it for the temporary frequency change when one
  473. * voltage manipulation might be avoided.
  474. */
  475. ret = exynos5_dmc_align_bypass_voltage(dmc, target_volt);
  476. if (ret)
  477. return ret;
  478. /*
  479. * Longer delays for DRAM does not cause crash, the opposite does.
  480. */
  481. ret = exynos5_dmc_align_bypass_dram_timings(dmc, target_rate);
  482. if (ret)
  483. return ret;
  484. /*
  485. * Delays are long enough, so use them for the new coming clock.
  486. */
  487. ret = exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
  488. return ret;
  489. }
  490. /**
  491. * exynos5_dmc_change_freq_and_volt() - Changes voltage and frequency of the DMC
  492. * using safe procedure
  493. * @dmc: device for which the frequency is going to be changed
  494. * @target_rate: requested new frequency
  495. * @target_volt: requested voltage which corresponds to the new frequency
  496. *
  497. * The DMC frequency change procedure requires a few steps.
  498. * The main requirement is to change the clock source in the clk mux
  499. * for the time of main clock PLL locking. The assumption is that the
  500. * alternative clock source set as parent is stable.
  501. * The second parent's clock frequency is fixed to 400MHz, it is named 'bypass'
  502. * clock. This requires alignment in DRAM timing parameters for the new
  503. * T-period. There is two bank sets for keeping DRAM
  504. * timings: set 0 and set 1. The set 0 is used when main clock source is
  505. * chosen. The 2nd set of regs is used for 'bypass' clock. Switching between
  506. * the two bank sets is part of the process.
  507. * The voltage must also be aligned to the minimum required level. There is
  508. * this intermediate step with switching to 'bypass' parent clock source.
  509. * if the old voltage is lower, it requires an increase of the voltage level.
  510. * The complexity of the voltage manipulation is hidden in low level function.
  511. * In this function there is last alignment of the voltage level at the end.
  512. */
  513. static int
  514. exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc,
  515. unsigned long target_rate,
  516. unsigned long target_volt)
  517. {
  518. int ret;
  519. ret = exynos5_dmc_switch_to_bypass_configuration(dmc, target_rate,
  520. target_volt);
  521. if (ret)
  522. return ret;
  523. /*
  524. * Voltage is set at least to a level needed for this frequency,
  525. * so switching clock source is safe now.
  526. */
  527. clk_prepare_enable(dmc->fout_spll);
  528. clk_prepare_enable(dmc->mout_spll);
  529. clk_prepare_enable(dmc->mout_mx_mspll_ccore);
  530. ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_mx_mspll_ccore);
  531. if (ret)
  532. goto disable_clocks;
  533. /*
  534. * We are safe to increase the timings for current bypass frequency.
  535. * Thanks to this the settings will be ready for the upcoming clock
  536. * source change.
  537. */
  538. exynos5_dram_change_timings(dmc, target_rate);
  539. clk_set_rate(dmc->fout_bpll, target_rate);
  540. ret = exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
  541. if (ret)
  542. goto disable_clocks;
  543. ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll);
  544. if (ret)
  545. goto disable_clocks;
  546. /*
  547. * Make sure if the voltage is not from 'bypass' settings and align to
  548. * the right level for power efficiency.
  549. */
  550. ret = exynos5_dmc_align_target_voltage(dmc, target_volt);
  551. disable_clocks:
  552. clk_disable_unprepare(dmc->mout_mx_mspll_ccore);
  553. clk_disable_unprepare(dmc->mout_spll);
  554. clk_disable_unprepare(dmc->fout_spll);
  555. return ret;
  556. }
  557. /**
  558. * exynos5_dmc_get_volt_freq() - Gets the frequency and voltage from the OPP
  559. * table.
  560. * @dmc: device for which the frequency is going to be changed
  561. * @freq: requested frequency in KHz
  562. * @target_rate: returned frequency which is the same or lower than
  563. * requested
  564. * @target_volt: returned voltage which corresponds to the returned
  565. * frequency
  566. * @flags: devfreq flags provided for this frequency change request
  567. *
  568. * Function gets requested frequency and checks OPP framework for needed
  569. * frequency and voltage. It populates the values 'target_rate' and
  570. * 'target_volt' or returns error value when OPP framework fails.
  571. */
  572. static int exynos5_dmc_get_volt_freq(struct exynos5_dmc *dmc,
  573. unsigned long *freq,
  574. unsigned long *target_rate,
  575. unsigned long *target_volt, u32 flags)
  576. {
  577. struct dev_pm_opp *opp;
  578. opp = devfreq_recommended_opp(dmc->dev, freq, flags);
  579. if (IS_ERR(opp))
  580. return PTR_ERR(opp);
  581. *target_rate = dev_pm_opp_get_freq(opp);
  582. *target_volt = dev_pm_opp_get_voltage(opp);
  583. dev_pm_opp_put(opp);
  584. return 0;
  585. }
  586. /**
  587. * exynos5_dmc_target() - Function responsible for changing frequency of DMC
  588. * @dev: device for which the frequency is going to be changed
  589. * @freq: requested frequency in KHz
  590. * @flags: flags provided for this frequency change request
  591. *
  592. * An entry function provided to the devfreq framework which provides frequency
  593. * change of the DMC. The function gets the possible rate from OPP table based
  594. * on requested frequency. It calls the next function responsible for the
  595. * frequency and voltage change. In case of failure, does not set 'curr_rate'
  596. * and returns error value to the framework.
  597. */
  598. static int exynos5_dmc_target(struct device *dev, unsigned long *freq,
  599. u32 flags)
  600. {
  601. struct exynos5_dmc *dmc = dev_get_drvdata(dev);
  602. unsigned long target_rate = 0;
  603. unsigned long target_volt = 0;
  604. int ret;
  605. ret = exynos5_dmc_get_volt_freq(dmc, freq, &target_rate, &target_volt,
  606. flags);
  607. if (ret)
  608. return ret;
  609. if (target_rate == dmc->curr_rate)
  610. return 0;
  611. mutex_lock(&dmc->lock);
  612. ret = exynos5_dmc_change_freq_and_volt(dmc, target_rate, target_volt);
  613. if (ret) {
  614. mutex_unlock(&dmc->lock);
  615. return ret;
  616. }
  617. dmc->curr_rate = target_rate;
  618. mutex_unlock(&dmc->lock);
  619. return 0;
  620. }
  621. /**
  622. * exynos5_counters_get() - Gets the performance counters values.
  623. * @dmc: device for which the counters are going to be checked
  624. * @load_count: variable which is populated with counter value
  625. * @total_count: variable which is used as 'wall clock' reference
  626. *
  627. * Function which provides performance counters values. It sums up counters for
  628. * two DMC channels. The 'total_count' is used as a reference and max value.
  629. * The ratio 'load_count/total_count' shows the busy percentage [0%, 100%].
  630. */
  631. static int exynos5_counters_get(struct exynos5_dmc *dmc,
  632. unsigned long *load_count,
  633. unsigned long *total_count)
  634. {
  635. unsigned long total = 0;
  636. struct devfreq_event_data event;
  637. int ret, i;
  638. *load_count = 0;
  639. /* Take into account only read+write counters, but stop all */
  640. for (i = 0; i < dmc->num_counters; i++) {
  641. if (!dmc->counter[i])
  642. continue;
  643. ret = devfreq_event_get_event(dmc->counter[i], &event);
  644. if (ret < 0)
  645. return ret;
  646. *load_count += event.load_count;
  647. if (total < event.total_count)
  648. total = event.total_count;
  649. }
  650. *total_count = total;
  651. return 0;
  652. }
  653. /**
  654. * exynos5_dmc_start_perf_events() - Setup and start performance event counters
  655. * @dmc: device for which the counters are going to be checked
  656. * @beg_value: initial value for the counter
  657. *
  658. * Function which enables needed counters, interrupts and sets initial values
  659. * then starts the counters.
  660. */
  661. static void exynos5_dmc_start_perf_events(struct exynos5_dmc *dmc,
  662. u32 beg_value)
  663. {
  664. /* Enable interrupts for counter 2 */
  665. writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENS_PPC);
  666. writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENS_PPC);
  667. /* Enable counter 2 and CCNT */
  668. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENS_PPC);
  669. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENS_PPC);
  670. /* Clear overflow flag for all counters */
  671. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
  672. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
  673. /* Reset all counters */
  674. writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi0 + DREX_PMNC_PPC);
  675. writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi1 + DREX_PMNC_PPC);
  676. /*
  677. * Set start value for the counters, the number of samples that
  678. * will be gathered is calculated as: 0xffffffff - beg_value
  679. */
  680. writel(beg_value, dmc->base_drexi0 + DREX_PMCNT2_PPC);
  681. writel(beg_value, dmc->base_drexi1 + DREX_PMCNT2_PPC);
  682. /* Start all counters */
  683. writel(PPC_ENABLE, dmc->base_drexi0 + DREX_PMNC_PPC);
  684. writel(PPC_ENABLE, dmc->base_drexi1 + DREX_PMNC_PPC);
  685. }
  686. /**
  687. * exynos5_dmc_perf_events_calc() - Calculate utilization
  688. * @dmc: device for which the counters are going to be checked
  689. * @diff_ts: time between last interrupt and current one
  690. *
  691. * Function which calculates needed utilization for the devfreq governor.
  692. * It prepares values for 'busy_time' and 'total_time' based on elapsed time
  693. * between interrupts, which approximates utilization.
  694. */
  695. static void exynos5_dmc_perf_events_calc(struct exynos5_dmc *dmc, u64 diff_ts)
  696. {
  697. /*
  698. * This is a simple algorithm for managing traffic on DMC.
  699. * When there is almost no load the counters overflow every 4s,
  700. * no mater the DMC frequency.
  701. * The high load might be approximated using linear function.
  702. * Knowing that, simple calculation can provide 'busy_time' and
  703. * 'total_time' to the devfreq governor which picks up target
  704. * frequency.
  705. * We want a fast ramp up and slow decay in frequency change function.
  706. */
  707. if (diff_ts < PERF_EVENT_UP_DOWN_THRESHOLD) {
  708. /*
  709. * Set higher utilization for the simple_ondemand governor.
  710. * The governor should increase the frequency of the DMC.
  711. */
  712. dmc->load = 70;
  713. dmc->total = 100;
  714. } else {
  715. /*
  716. * Set low utilization for the simple_ondemand governor.
  717. * The governor should decrease the frequency of the DMC.
  718. */
  719. dmc->load = 35;
  720. dmc->total = 100;
  721. }
  722. dev_dbg(dmc->dev, "diff_ts=%llu\n", diff_ts);
  723. }
  724. /**
  725. * exynos5_dmc_perf_events_check() - Checks the status of the counters
  726. * @dmc: device for which the counters are going to be checked
  727. *
  728. * Function which is called from threaded IRQ to check the counters state
  729. * and to call approximation for the needed utilization.
  730. */
  731. static void exynos5_dmc_perf_events_check(struct exynos5_dmc *dmc)
  732. {
  733. u32 val;
  734. u64 diff_ts, ts;
  735. ts = ktime_get_ns();
  736. /* Stop all counters */
  737. writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
  738. writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
  739. /* Check the source in interrupt flag registers (which channel) */
  740. val = readl(dmc->base_drexi0 + DREX_FLAG_PPC);
  741. if (val) {
  742. diff_ts = ts - dmc->last_overflow_ts[0];
  743. dmc->last_overflow_ts[0] = ts;
  744. dev_dbg(dmc->dev, "drex0 0xE050 val= 0x%08x\n", val);
  745. } else {
  746. val = readl(dmc->base_drexi1 + DREX_FLAG_PPC);
  747. diff_ts = ts - dmc->last_overflow_ts[1];
  748. dmc->last_overflow_ts[1] = ts;
  749. dev_dbg(dmc->dev, "drex1 0xE050 val= 0x%08x\n", val);
  750. }
  751. exynos5_dmc_perf_events_calc(dmc, diff_ts);
  752. exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
  753. }
  754. /**
  755. * exynos5_dmc_enable_perf_events() - Enable performance events
  756. * @dmc: device for which the counters are going to be checked
  757. *
  758. * Function which is setup needed environment and enables counters.
  759. */
  760. static void exynos5_dmc_enable_perf_events(struct exynos5_dmc *dmc)
  761. {
  762. u64 ts;
  763. /* Enable Performance Event Clock */
  764. writel(PEREV_CLK_EN, dmc->base_drexi0 + DREX_PPCCLKCON);
  765. writel(PEREV_CLK_EN, dmc->base_drexi1 + DREX_PPCCLKCON);
  766. /* Select read transfers as performance event2 */
  767. writel(READ_TRANSFER_CH0, dmc->base_drexi0 + DREX_PEREV2CONFIG);
  768. writel(READ_TRANSFER_CH1, dmc->base_drexi1 + DREX_PEREV2CONFIG);
  769. ts = ktime_get_ns();
  770. dmc->last_overflow_ts[0] = ts;
  771. dmc->last_overflow_ts[1] = ts;
  772. /* Devfreq shouldn't be faster than initialization, play safe though. */
  773. dmc->load = 99;
  774. dmc->total = 100;
  775. }
  776. /**
  777. * exynos5_dmc_disable_perf_events() - Disable performance events
  778. * @dmc: device for which the counters are going to be checked
  779. *
  780. * Function which stops, disables performance event counters and interrupts.
  781. */
  782. static void exynos5_dmc_disable_perf_events(struct exynos5_dmc *dmc)
  783. {
  784. /* Stop all counters */
  785. writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
  786. writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
  787. /* Disable interrupts for counter 2 */
  788. writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENC_PPC);
  789. writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENC_PPC);
  790. /* Disable counter 2 and CCNT */
  791. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENC_PPC);
  792. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENC_PPC);
  793. /* Clear overflow flag for all counters */
  794. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
  795. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
  796. }
  797. /**
  798. * exynos5_dmc_get_status() - Read current DMC performance statistics.
  799. * @dev: device for which the statistics are requested
  800. * @stat: structure which has statistic fields
  801. *
  802. * Function reads the DMC performance counters and calculates 'busy_time'
  803. * and 'total_time'. To protect from overflow, the values are shifted right
  804. * by 10. After read out the counters are setup to count again.
  805. */
  806. static int exynos5_dmc_get_status(struct device *dev,
  807. struct devfreq_dev_status *stat)
  808. {
  809. struct exynos5_dmc *dmc = dev_get_drvdata(dev);
  810. unsigned long load, total;
  811. int ret;
  812. if (dmc->in_irq_mode) {
  813. mutex_lock(&dmc->lock);
  814. stat->current_frequency = dmc->curr_rate;
  815. mutex_unlock(&dmc->lock);
  816. stat->busy_time = dmc->load;
  817. stat->total_time = dmc->total;
  818. } else {
  819. ret = exynos5_counters_get(dmc, &load, &total);
  820. if (ret < 0)
  821. return -EINVAL;
  822. /* To protect from overflow, divide by 1024 */
  823. stat->busy_time = load >> 10;
  824. stat->total_time = total >> 10;
  825. ret = exynos5_counters_set_event(dmc);
  826. if (ret < 0) {
  827. dev_err(dev, "could not set event counter\n");
  828. return ret;
  829. }
  830. }
  831. return 0;
  832. }
  833. /**
  834. * exynos5_dmc_get_cur_freq() - Function returns current DMC frequency
  835. * @dev: device for which the framework checks operating frequency
  836. * @freq: returned frequency value
  837. *
  838. * It returns the currently used frequency of the DMC. The real operating
  839. * frequency might be lower when the clock source value could not be divided
  840. * to the requested value.
  841. */
  842. static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq)
  843. {
  844. struct exynos5_dmc *dmc = dev_get_drvdata(dev);
  845. mutex_lock(&dmc->lock);
  846. *freq = dmc->curr_rate;
  847. mutex_unlock(&dmc->lock);
  848. return 0;
  849. }
  850. /*
  851. * exynos5_dmc_df_profile - Devfreq governor's profile structure
  852. *
  853. * It provides to the devfreq framework needed functions and polling period.
  854. */
  855. static struct devfreq_dev_profile exynos5_dmc_df_profile = {
  856. .timer = DEVFREQ_TIMER_DELAYED,
  857. .target = exynos5_dmc_target,
  858. .get_dev_status = exynos5_dmc_get_status,
  859. .get_cur_freq = exynos5_dmc_get_cur_freq,
  860. };
  861. /**
  862. * exynos5_dmc_align_init_freq() - Align initial frequency value
  863. * @dmc: device for which the frequency is going to be set
  864. * @bootloader_init_freq: initial frequency set by the bootloader in KHz
  865. *
  866. * The initial bootloader frequency, which is present during boot, might be
  867. * different that supported frequency values in the driver. It is possible
  868. * due to different PLL settings or used PLL as a source.
  869. * This function provides the 'initial_freq' for the devfreq framework
  870. * statistics engine which supports only registered values. Thus, some alignment
  871. * must be made.
  872. */
  873. static unsigned long
  874. exynos5_dmc_align_init_freq(struct exynos5_dmc *dmc,
  875. unsigned long bootloader_init_freq)
  876. {
  877. unsigned long aligned_freq;
  878. int idx;
  879. idx = find_target_freq_idx(dmc, bootloader_init_freq);
  880. if (idx >= 0)
  881. aligned_freq = dmc->opp[idx].freq_hz;
  882. else
  883. aligned_freq = dmc->opp[dmc->opp_count - 1].freq_hz;
  884. return aligned_freq;
  885. }
  886. /**
  887. * create_timings_aligned() - Create register values and align with standard
  888. * @dmc: device for which the frequency is going to be set
  889. * @reg_timing_row: array to fill with values for timing row register
  890. * @reg_timing_data: array to fill with values for timing data register
  891. * @reg_timing_power: array to fill with values for timing power register
  892. * @clk_period_ps: the period of the clock, known as tCK
  893. *
  894. * The function calculates timings and creates a register value ready for
  895. * a frequency transition. The register contains a few timings. They are
  896. * shifted by a known offset. The timing value is calculated based on memory
  897. * specyfication: minimal time required and minimal cycles required.
  898. */
  899. static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
  900. u32 *reg_timing_data, u32 *reg_timing_power,
  901. u32 clk_period_ps)
  902. {
  903. u32 val;
  904. const struct timing_reg *reg;
  905. if (clk_period_ps == 0)
  906. return -EINVAL;
  907. *reg_timing_row = 0;
  908. *reg_timing_data = 0;
  909. *reg_timing_power = 0;
  910. val = dmc->timings->tRFC / clk_period_ps;
  911. val += dmc->timings->tRFC % clk_period_ps ? 1 : 0;
  912. val = max(val, dmc->min_tck->tRFC);
  913. reg = &timing_row_reg_fields[0];
  914. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  915. val = dmc->timings->tRRD / clk_period_ps;
  916. val += dmc->timings->tRRD % clk_period_ps ? 1 : 0;
  917. val = max(val, dmc->min_tck->tRRD);
  918. reg = &timing_row_reg_fields[1];
  919. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  920. val = dmc->timings->tRPab / clk_period_ps;
  921. val += dmc->timings->tRPab % clk_period_ps ? 1 : 0;
  922. val = max(val, dmc->min_tck->tRPab);
  923. reg = &timing_row_reg_fields[2];
  924. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  925. val = dmc->timings->tRCD / clk_period_ps;
  926. val += dmc->timings->tRCD % clk_period_ps ? 1 : 0;
  927. val = max(val, dmc->min_tck->tRCD);
  928. reg = &timing_row_reg_fields[3];
  929. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  930. val = dmc->timings->tRC / clk_period_ps;
  931. val += dmc->timings->tRC % clk_period_ps ? 1 : 0;
  932. val = max(val, dmc->min_tck->tRC);
  933. reg = &timing_row_reg_fields[4];
  934. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  935. val = dmc->timings->tRAS / clk_period_ps;
  936. val += dmc->timings->tRAS % clk_period_ps ? 1 : 0;
  937. val = max(val, dmc->min_tck->tRAS);
  938. reg = &timing_row_reg_fields[5];
  939. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  940. /* data related timings */
  941. val = dmc->timings->tWTR / clk_period_ps;
  942. val += dmc->timings->tWTR % clk_period_ps ? 1 : 0;
  943. val = max(val, dmc->min_tck->tWTR);
  944. reg = &timing_data_reg_fields[0];
  945. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  946. val = dmc->timings->tWR / clk_period_ps;
  947. val += dmc->timings->tWR % clk_period_ps ? 1 : 0;
  948. val = max(val, dmc->min_tck->tWR);
  949. reg = &timing_data_reg_fields[1];
  950. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  951. val = dmc->timings->tRTP / clk_period_ps;
  952. val += dmc->timings->tRTP % clk_period_ps ? 1 : 0;
  953. val = max(val, dmc->min_tck->tRTP);
  954. reg = &timing_data_reg_fields[2];
  955. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  956. val = dmc->timings->tW2W_C2C / clk_period_ps;
  957. val += dmc->timings->tW2W_C2C % clk_period_ps ? 1 : 0;
  958. val = max(val, dmc->min_tck->tW2W_C2C);
  959. reg = &timing_data_reg_fields[3];
  960. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  961. val = dmc->timings->tR2R_C2C / clk_period_ps;
  962. val += dmc->timings->tR2R_C2C % clk_period_ps ? 1 : 0;
  963. val = max(val, dmc->min_tck->tR2R_C2C);
  964. reg = &timing_data_reg_fields[4];
  965. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  966. val = dmc->timings->tWL / clk_period_ps;
  967. val += dmc->timings->tWL % clk_period_ps ? 1 : 0;
  968. val = max(val, dmc->min_tck->tWL);
  969. reg = &timing_data_reg_fields[5];
  970. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  971. val = dmc->timings->tDQSCK / clk_period_ps;
  972. val += dmc->timings->tDQSCK % clk_period_ps ? 1 : 0;
  973. val = max(val, dmc->min_tck->tDQSCK);
  974. reg = &timing_data_reg_fields[6];
  975. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  976. val = dmc->timings->tRL / clk_period_ps;
  977. val += dmc->timings->tRL % clk_period_ps ? 1 : 0;
  978. val = max(val, dmc->min_tck->tRL);
  979. reg = &timing_data_reg_fields[7];
  980. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  981. /* power related timings */
  982. val = dmc->timings->tFAW / clk_period_ps;
  983. val += dmc->timings->tFAW % clk_period_ps ? 1 : 0;
  984. val = max(val, dmc->min_tck->tFAW);
  985. reg = &timing_power_reg_fields[0];
  986. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  987. val = dmc->timings->tXSR / clk_period_ps;
  988. val += dmc->timings->tXSR % clk_period_ps ? 1 : 0;
  989. val = max(val, dmc->min_tck->tXSR);
  990. reg = &timing_power_reg_fields[1];
  991. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  992. val = dmc->timings->tXP / clk_period_ps;
  993. val += dmc->timings->tXP % clk_period_ps ? 1 : 0;
  994. val = max(val, dmc->min_tck->tXP);
  995. reg = &timing_power_reg_fields[2];
  996. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  997. val = dmc->timings->tCKE / clk_period_ps;
  998. val += dmc->timings->tCKE % clk_period_ps ? 1 : 0;
  999. val = max(val, dmc->min_tck->tCKE);
  1000. reg = &timing_power_reg_fields[3];
  1001. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  1002. val = dmc->timings->tMRD / clk_period_ps;
  1003. val += dmc->timings->tMRD % clk_period_ps ? 1 : 0;
  1004. val = max(val, dmc->min_tck->tMRD);
  1005. reg = &timing_power_reg_fields[4];
  1006. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  1007. return 0;
  1008. }
  1009. /**
  1010. * of_get_dram_timings() - helper function for parsing DT settings for DRAM
  1011. * @dmc: device for which the frequency is going to be set
  1012. *
  1013. * The function parses DT entries with DRAM information.
  1014. */
  1015. static int of_get_dram_timings(struct exynos5_dmc *dmc)
  1016. {
  1017. int ret = 0;
  1018. int idx;
  1019. struct device_node *np_ddr;
  1020. u32 freq_mhz, clk_period_ps;
  1021. np_ddr = of_parse_phandle(dmc->dev->of_node, "device-handle", 0);
  1022. if (!np_ddr) {
  1023. dev_warn(dmc->dev, "could not find 'device-handle' in DT\n");
  1024. return -EINVAL;
  1025. }
  1026. dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
  1027. sizeof(u32), GFP_KERNEL);
  1028. if (!dmc->timing_row) {
  1029. ret = -ENOMEM;
  1030. goto put_node;
  1031. }
  1032. dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
  1033. sizeof(u32), GFP_KERNEL);
  1034. if (!dmc->timing_data) {
  1035. ret = -ENOMEM;
  1036. goto put_node;
  1037. }
  1038. dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
  1039. sizeof(u32), GFP_KERNEL);
  1040. if (!dmc->timing_power) {
  1041. ret = -ENOMEM;
  1042. goto put_node;
  1043. }
  1044. dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
  1045. DDR_TYPE_LPDDR3,
  1046. &dmc->timings_arr_size);
  1047. if (!dmc->timings) {
  1048. dev_warn(dmc->dev, "could not get timings from DT\n");
  1049. ret = -EINVAL;
  1050. goto put_node;
  1051. }
  1052. dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
  1053. if (!dmc->min_tck) {
  1054. dev_warn(dmc->dev, "could not get tck from DT\n");
  1055. ret = -EINVAL;
  1056. goto put_node;
  1057. }
  1058. /* Sorted array of OPPs with frequency ascending */
  1059. for (idx = 0; idx < dmc->opp_count; idx++) {
  1060. freq_mhz = dmc->opp[idx].freq_hz / 1000000;
  1061. clk_period_ps = 1000000 / freq_mhz;
  1062. ret = create_timings_aligned(dmc, &dmc->timing_row[idx],
  1063. &dmc->timing_data[idx],
  1064. &dmc->timing_power[idx],
  1065. clk_period_ps);
  1066. }
  1067. /* Take the highest frequency's timings as 'bypass' */
  1068. dmc->bypass_timing_row = dmc->timing_row[idx - 1];
  1069. dmc->bypass_timing_data = dmc->timing_data[idx - 1];
  1070. dmc->bypass_timing_power = dmc->timing_power[idx - 1];
  1071. put_node:
  1072. of_node_put(np_ddr);
  1073. return ret;
  1074. }
  1075. /**
  1076. * exynos5_dmc_init_clks() - Initialize clocks needed for DMC operation.
  1077. * @dmc: DMC structure containing needed fields
  1078. *
  1079. * Get the needed clocks defined in DT device, enable and set the right parents.
  1080. * Read current frequency and initialize the initial rate for governor.
  1081. */
  1082. static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
  1083. {
  1084. int ret;
  1085. unsigned long target_volt = 0;
  1086. unsigned long target_rate = 0;
  1087. unsigned int tmp;
  1088. dmc->fout_spll = devm_clk_get(dmc->dev, "fout_spll");
  1089. if (IS_ERR(dmc->fout_spll))
  1090. return PTR_ERR(dmc->fout_spll);
  1091. dmc->fout_bpll = devm_clk_get(dmc->dev, "fout_bpll");
  1092. if (IS_ERR(dmc->fout_bpll))
  1093. return PTR_ERR(dmc->fout_bpll);
  1094. dmc->mout_mclk_cdrex = devm_clk_get(dmc->dev, "mout_mclk_cdrex");
  1095. if (IS_ERR(dmc->mout_mclk_cdrex))
  1096. return PTR_ERR(dmc->mout_mclk_cdrex);
  1097. dmc->mout_bpll = devm_clk_get(dmc->dev, "mout_bpll");
  1098. if (IS_ERR(dmc->mout_bpll))
  1099. return PTR_ERR(dmc->mout_bpll);
  1100. dmc->mout_mx_mspll_ccore = devm_clk_get(dmc->dev,
  1101. "mout_mx_mspll_ccore");
  1102. if (IS_ERR(dmc->mout_mx_mspll_ccore))
  1103. return PTR_ERR(dmc->mout_mx_mspll_ccore);
  1104. dmc->mout_spll = devm_clk_get(dmc->dev, "ff_dout_spll2");
  1105. if (IS_ERR(dmc->mout_spll)) {
  1106. dmc->mout_spll = devm_clk_get(dmc->dev, "mout_sclk_spll");
  1107. if (IS_ERR(dmc->mout_spll))
  1108. return PTR_ERR(dmc->mout_spll);
  1109. }
  1110. /*
  1111. * Convert frequency to KHz values and set it for the governor.
  1112. */
  1113. dmc->curr_rate = clk_get_rate(dmc->mout_mclk_cdrex);
  1114. dmc->curr_rate = exynos5_dmc_align_init_freq(dmc, dmc->curr_rate);
  1115. exynos5_dmc_df_profile.initial_freq = dmc->curr_rate;
  1116. ret = exynos5_dmc_get_volt_freq(dmc, &dmc->curr_rate, &target_rate,
  1117. &target_volt, 0);
  1118. if (ret)
  1119. return ret;
  1120. dmc->curr_volt = target_volt;
  1121. ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
  1122. if (ret)
  1123. return ret;
  1124. clk_prepare_enable(dmc->fout_bpll);
  1125. clk_prepare_enable(dmc->mout_bpll);
  1126. /*
  1127. * Some bootloaders do not set clock routes correctly.
  1128. * Stop one path in clocks to PHY.
  1129. */
  1130. regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, &tmp);
  1131. tmp &= ~(BIT(1) | BIT(0));
  1132. regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, tmp);
  1133. return 0;
  1134. }
  1135. /**
  1136. * exynos5_performance_counters_init() - Initializes performance DMC's counters
  1137. * @dmc: DMC for which it does the setup
  1138. *
  1139. * Initialization of performance counters in DMC for estimating usage.
  1140. * The counter's values are used for calculation of a memory bandwidth and based
  1141. * on that the governor changes the frequency.
  1142. * The counters are not used when the governor is GOVERNOR_USERSPACE.
  1143. */
  1144. static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
  1145. {
  1146. int ret, i;
  1147. dmc->num_counters = devfreq_event_get_edev_count(dmc->dev,
  1148. "devfreq-events");
  1149. if (dmc->num_counters < 0) {
  1150. dev_err(dmc->dev, "could not get devfreq-event counters\n");
  1151. return dmc->num_counters;
  1152. }
  1153. dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters,
  1154. sizeof(*dmc->counter), GFP_KERNEL);
  1155. if (!dmc->counter)
  1156. return -ENOMEM;
  1157. for (i = 0; i < dmc->num_counters; i++) {
  1158. dmc->counter[i] =
  1159. devfreq_event_get_edev_by_phandle(dmc->dev,
  1160. "devfreq-events", i);
  1161. if (IS_ERR_OR_NULL(dmc->counter[i]))
  1162. return -EPROBE_DEFER;
  1163. }
  1164. ret = exynos5_counters_enable_edev(dmc);
  1165. if (ret < 0) {
  1166. dev_err(dmc->dev, "could not enable event counter\n");
  1167. return ret;
  1168. }
  1169. ret = exynos5_counters_set_event(dmc);
  1170. if (ret < 0) {
  1171. exynos5_counters_disable_edev(dmc);
  1172. dev_err(dmc->dev, "could not set event counter\n");
  1173. return ret;
  1174. }
  1175. return 0;
  1176. }
  1177. /**
  1178. * exynos5_dmc_set_pause_on_switching() - Controls a pause feature in DMC
  1179. * @dmc: device which is used for changing this feature
  1180. *
  1181. * There is a need of pausing DREX DMC when divider or MUX in clock tree
  1182. * changes its configuration. In such situation access to the memory is blocked
  1183. * in DMC automatically. This feature is used when clock frequency change
  1184. * request appears and touches clock tree.
  1185. */
  1186. static inline int exynos5_dmc_set_pause_on_switching(struct exynos5_dmc *dmc)
  1187. {
  1188. unsigned int val;
  1189. int ret;
  1190. ret = regmap_read(dmc->clk_regmap, CDREX_PAUSE, &val);
  1191. if (ret)
  1192. return ret;
  1193. val |= 1UL;
  1194. regmap_write(dmc->clk_regmap, CDREX_PAUSE, val);
  1195. return 0;
  1196. }
  1197. static irqreturn_t dmc_irq_thread(int irq, void *priv)
  1198. {
  1199. int res;
  1200. struct exynos5_dmc *dmc = priv;
  1201. mutex_lock(&dmc->df->lock);
  1202. exynos5_dmc_perf_events_check(dmc);
  1203. res = update_devfreq(dmc->df);
  1204. mutex_unlock(&dmc->df->lock);
  1205. if (res)
  1206. dev_warn(dmc->dev, "devfreq failed with %d\n", res);
  1207. return IRQ_HANDLED;
  1208. }
  1209. /**
  1210. * exynos5_dmc_probe() - Probe function for the DMC driver
  1211. * @pdev: platform device for which the driver is going to be initialized
  1212. *
  1213. * Initialize basic components: clocks, regulators, performance counters, etc.
  1214. * Read out product version and based on the information setup
  1215. * internal structures for the controller (frequency and voltage) and for DRAM
  1216. * memory parameters: timings for each operating frequency.
  1217. * Register new devfreq device for controlling DVFS of the DMC.
  1218. */
  1219. static int exynos5_dmc_probe(struct platform_device *pdev)
  1220. {
  1221. int ret = 0;
  1222. struct device *dev = &pdev->dev;
  1223. struct device_node *np = dev->of_node;
  1224. struct exynos5_dmc *dmc;
  1225. int irq[2];
  1226. dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL);
  1227. if (!dmc)
  1228. return -ENOMEM;
  1229. mutex_init(&dmc->lock);
  1230. dmc->dev = dev;
  1231. platform_set_drvdata(pdev, dmc);
  1232. dmc->base_drexi0 = devm_platform_ioremap_resource(pdev, 0);
  1233. if (IS_ERR(dmc->base_drexi0))
  1234. return PTR_ERR(dmc->base_drexi0);
  1235. dmc->base_drexi1 = devm_platform_ioremap_resource(pdev, 1);
  1236. if (IS_ERR(dmc->base_drexi1))
  1237. return PTR_ERR(dmc->base_drexi1);
  1238. dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np,
  1239. "samsung,syscon-clk");
  1240. if (IS_ERR(dmc->clk_regmap))
  1241. return PTR_ERR(dmc->clk_regmap);
  1242. ret = exynos5_init_freq_table(dmc, &exynos5_dmc_df_profile);
  1243. if (ret) {
  1244. dev_warn(dev, "couldn't initialize frequency settings\n");
  1245. return ret;
  1246. }
  1247. dmc->vdd_mif = devm_regulator_get(dev, "vdd");
  1248. if (IS_ERR(dmc->vdd_mif)) {
  1249. ret = PTR_ERR(dmc->vdd_mif);
  1250. return ret;
  1251. }
  1252. ret = exynos5_dmc_init_clks(dmc);
  1253. if (ret)
  1254. return ret;
  1255. ret = of_get_dram_timings(dmc);
  1256. if (ret) {
  1257. dev_warn(dev, "couldn't initialize timings settings\n");
  1258. goto remove_clocks;
  1259. }
  1260. ret = exynos5_dmc_set_pause_on_switching(dmc);
  1261. if (ret) {
  1262. dev_warn(dev, "couldn't get access to PAUSE register\n");
  1263. goto remove_clocks;
  1264. }
  1265. /* There is two modes in which the driver works: polling or IRQ */
  1266. irq[0] = platform_get_irq_byname(pdev, "drex_0");
  1267. irq[1] = platform_get_irq_byname(pdev, "drex_1");
  1268. if (irq[0] > 0 && irq[1] > 0 && irqmode) {
  1269. ret = devm_request_threaded_irq(dev, irq[0], NULL,
  1270. dmc_irq_thread, IRQF_ONESHOT,
  1271. dev_name(dev), dmc);
  1272. if (ret) {
  1273. dev_err(dev, "couldn't grab IRQ\n");
  1274. goto remove_clocks;
  1275. }
  1276. ret = devm_request_threaded_irq(dev, irq[1], NULL,
  1277. dmc_irq_thread, IRQF_ONESHOT,
  1278. dev_name(dev), dmc);
  1279. if (ret) {
  1280. dev_err(dev, "couldn't grab IRQ\n");
  1281. goto remove_clocks;
  1282. }
  1283. /*
  1284. * Setup default thresholds for the devfreq governor.
  1285. * The values are chosen based on experiments.
  1286. */
  1287. dmc->gov_data.upthreshold = 55;
  1288. dmc->gov_data.downdifferential = 5;
  1289. exynos5_dmc_enable_perf_events(dmc);
  1290. dmc->in_irq_mode = 1;
  1291. } else {
  1292. ret = exynos5_performance_counters_init(dmc);
  1293. if (ret) {
  1294. dev_warn(dev, "couldn't probe performance counters\n");
  1295. goto remove_clocks;
  1296. }
  1297. /*
  1298. * Setup default thresholds for the devfreq governor.
  1299. * The values are chosen based on experiments.
  1300. */
  1301. dmc->gov_data.upthreshold = 10;
  1302. dmc->gov_data.downdifferential = 5;
  1303. exynos5_dmc_df_profile.polling_ms = 100;
  1304. }
  1305. dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile,
  1306. DEVFREQ_GOV_SIMPLE_ONDEMAND,
  1307. &dmc->gov_data);
  1308. if (IS_ERR(dmc->df)) {
  1309. ret = PTR_ERR(dmc->df);
  1310. goto err_devfreq_add;
  1311. }
  1312. if (dmc->in_irq_mode)
  1313. exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
  1314. dev_info(dev, "DMC initialized, in irq mode: %d\n", dmc->in_irq_mode);
  1315. return 0;
  1316. err_devfreq_add:
  1317. if (dmc->in_irq_mode)
  1318. exynos5_dmc_disable_perf_events(dmc);
  1319. else
  1320. exynos5_counters_disable_edev(dmc);
  1321. remove_clocks:
  1322. clk_disable_unprepare(dmc->mout_bpll);
  1323. clk_disable_unprepare(dmc->fout_bpll);
  1324. return ret;
  1325. }
  1326. /**
  1327. * exynos5_dmc_remove() - Remove function for the platform device
  1328. * @pdev: platform device which is going to be removed
  1329. *
  1330. * The function relies on 'devm' framework function which automatically
  1331. * clean the device's resources. It just calls explicitly disable function for
  1332. * the performance counters.
  1333. */
  1334. static int exynos5_dmc_remove(struct platform_device *pdev)
  1335. {
  1336. struct exynos5_dmc *dmc = dev_get_drvdata(&pdev->dev);
  1337. if (dmc->in_irq_mode)
  1338. exynos5_dmc_disable_perf_events(dmc);
  1339. else
  1340. exynos5_counters_disable_edev(dmc);
  1341. clk_disable_unprepare(dmc->mout_bpll);
  1342. clk_disable_unprepare(dmc->fout_bpll);
  1343. return 0;
  1344. }
  1345. static const struct of_device_id exynos5_dmc_of_match[] = {
  1346. { .compatible = "samsung,exynos5422-dmc", },
  1347. { },
  1348. };
  1349. MODULE_DEVICE_TABLE(of, exynos5_dmc_of_match);
  1350. static struct platform_driver exynos5_dmc_platdrv = {
  1351. .probe = exynos5_dmc_probe,
  1352. .remove = exynos5_dmc_remove,
  1353. .driver = {
  1354. .name = "exynos5-dmc",
  1355. .of_match_table = exynos5_dmc_of_match,
  1356. },
  1357. };
  1358. module_platform_driver(exynos5_dmc_platdrv);
  1359. MODULE_DESCRIPTION("Driver for Exynos5422 Dynamic Memory Controller dynamic frequency and voltage change");
  1360. MODULE_LICENSE("GPL v2");
  1361. MODULE_AUTHOR("Lukasz Luba");