rzg2l-cpg.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * RZ/G2L Clock Pulse Generator
  4. *
  5. * Copyright (C) 2021 Renesas Electronics Corp.
  6. *
  7. * Based on renesas-cpg-mssr.c
  8. *
  9. * Copyright (C) 2015 Glider bvba
  10. * Copyright (C) 2013 Ideas On Board SPRL
  11. * Copyright (C) 2015 Renesas Electronics Corp.
  12. */
  13. #include <linux/bitfield.h>
  14. #include <linux/clk.h>
  15. #include <linux/clk-provider.h>
  16. #include <linux/clk/renesas.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <linux/init.h>
  20. #include <linux/iopoll.h>
  21. #include <linux/mod_devicetable.h>
  22. #include <linux/module.h>
  23. #include <linux/of_address.h>
  24. #include <linux/of_device.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/pm_clock.h>
  27. #include <linux/pm_domain.h>
  28. #include <linux/reset-controller.h>
  29. #include <linux/slab.h>
  30. #include <linux/units.h>
  31. #include <dt-bindings/clock/renesas-cpg-mssr.h>
  32. #include "rzg2l-cpg.h"
  33. #ifdef DEBUG
  34. #define WARN_DEBUG(x) WARN_ON(x)
  35. #else
  36. #define WARN_DEBUG(x) do { } while (0)
  37. #endif
  38. #define GET_SHIFT(val) ((val >> 12) & 0xff)
  39. #define GET_WIDTH(val) ((val >> 8) & 0xf)
  40. #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
  41. #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
  42. #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
  43. #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
  44. #define CLK_ON_R(reg) (reg)
  45. #define CLK_MON_R(reg) (0x180 + (reg))
  46. #define CLK_RST_R(reg) (reg)
  47. #define CLK_MRST_R(reg) (0x180 + (reg))
  48. #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
  49. #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
  50. #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
  51. #define MAX_VCLK_FREQ (148500000)
  52. struct sd_hw_data {
  53. struct clk_hw hw;
  54. u32 conf;
  55. struct rzg2l_cpg_priv *priv;
  56. };
  57. #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
  58. struct rzg2l_pll5_param {
  59. u32 pl5_fracin;
  60. u8 pl5_refdiv;
  61. u8 pl5_intin;
  62. u8 pl5_postdiv1;
  63. u8 pl5_postdiv2;
  64. u8 pl5_spread;
  65. };
  66. struct rzg2l_pll5_mux_dsi_div_param {
  67. u8 clksrc;
  68. u8 dsi_div_a;
  69. u8 dsi_div_b;
  70. };
  71. /**
  72. * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
  73. *
  74. * @rcdev: Reset controller entity
  75. * @dev: CPG device
  76. * @base: CPG register block base address
  77. * @rmw_lock: protects register accesses
  78. * @clks: Array containing all Core and Module Clocks
  79. * @num_core_clks: Number of Core Clocks in clks[]
  80. * @num_mod_clks: Number of Module Clocks in clks[]
  81. * @num_resets: Number of Module Resets in info->resets[]
  82. * @last_dt_core_clk: ID of the last Core Clock exported to DT
  83. * @info: Pointer to platform data
  84. * @pll5_mux_dsi_div_params: pll5 mux and dsi div parameters
  85. */
  86. struct rzg2l_cpg_priv {
  87. struct reset_controller_dev rcdev;
  88. struct device *dev;
  89. void __iomem *base;
  90. spinlock_t rmw_lock;
  91. struct clk **clks;
  92. unsigned int num_core_clks;
  93. unsigned int num_mod_clks;
  94. unsigned int num_resets;
  95. unsigned int last_dt_core_clk;
  96. const struct rzg2l_cpg_info *info;
  97. struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
  98. };
  99. static void rzg2l_cpg_del_clk_provider(void *data)
  100. {
  101. of_clk_del_provider(data);
  102. }
  103. static struct clk * __init
  104. rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
  105. struct clk **clks,
  106. void __iomem *base,
  107. struct rzg2l_cpg_priv *priv)
  108. {
  109. struct device *dev = priv->dev;
  110. const struct clk *parent;
  111. const char *parent_name;
  112. struct clk_hw *clk_hw;
  113. parent = clks[core->parent & 0xffff];
  114. if (IS_ERR(parent))
  115. return ERR_CAST(parent);
  116. parent_name = __clk_get_name(parent);
  117. if (core->dtable)
  118. clk_hw = clk_hw_register_divider_table(dev, core->name,
  119. parent_name, 0,
  120. base + GET_REG_OFFSET(core->conf),
  121. GET_SHIFT(core->conf),
  122. GET_WIDTH(core->conf),
  123. core->flag,
  124. core->dtable,
  125. &priv->rmw_lock);
  126. else
  127. clk_hw = clk_hw_register_divider(dev, core->name,
  128. parent_name, 0,
  129. base + GET_REG_OFFSET(core->conf),
  130. GET_SHIFT(core->conf),
  131. GET_WIDTH(core->conf),
  132. core->flag, &priv->rmw_lock);
  133. if (IS_ERR(clk_hw))
  134. return ERR_CAST(clk_hw);
  135. return clk_hw->clk;
  136. }
  137. static struct clk * __init
  138. rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
  139. void __iomem *base,
  140. struct rzg2l_cpg_priv *priv)
  141. {
  142. const struct clk_hw *clk_hw;
  143. clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
  144. core->parent_names, core->num_parents,
  145. core->flag,
  146. base + GET_REG_OFFSET(core->conf),
  147. GET_SHIFT(core->conf),
  148. GET_WIDTH(core->conf),
  149. core->mux_flags, &priv->rmw_lock);
  150. if (IS_ERR(clk_hw))
  151. return ERR_CAST(clk_hw);
  152. return clk_hw->clk;
  153. }
  154. static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw,
  155. struct clk_rate_request *req)
  156. {
  157. return clk_mux_determine_rate_flags(hw, req, 0);
  158. }
  159. static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
  160. {
  161. struct sd_hw_data *hwdata = to_sd_hw_data(hw);
  162. struct rzg2l_cpg_priv *priv = hwdata->priv;
  163. u32 off = GET_REG_OFFSET(hwdata->conf);
  164. u32 shift = GET_SHIFT(hwdata->conf);
  165. const u32 clk_src_266 = 2;
  166. u32 msk, val, bitmask;
  167. unsigned long flags;
  168. int ret;
  169. /*
  170. * As per the HW manual, we should not directly switch from 533 MHz to
  171. * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
  172. * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
  173. * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
  174. * (400 MHz)).
  175. * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
  176. * switching register is prohibited.
  177. * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
  178. * the index to value mapping is done by adding 1 to the index.
  179. */
  180. bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
  181. msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
  182. spin_lock_irqsave(&priv->rmw_lock, flags);
  183. if (index != clk_src_266) {
  184. writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
  185. ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
  186. !(val & msk), 10,
  187. CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
  188. if (ret)
  189. goto unlock;
  190. }
  191. writel(bitmask | ((index + 1) << shift), priv->base + off);
  192. ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
  193. !(val & msk), 10,
  194. CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
  195. unlock:
  196. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  197. if (ret)
  198. dev_err(priv->dev, "failed to switch clk source\n");
  199. return ret;
  200. }
  201. static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
  202. {
  203. struct sd_hw_data *hwdata = to_sd_hw_data(hw);
  204. struct rzg2l_cpg_priv *priv = hwdata->priv;
  205. u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
  206. val >>= GET_SHIFT(hwdata->conf);
  207. val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
  208. return val ? val - 1 : 0;
  209. }
  210. static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
  211. .determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate,
  212. .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
  213. .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
  214. };
  215. static struct clk * __init
  216. rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
  217. void __iomem *base,
  218. struct rzg2l_cpg_priv *priv)
  219. {
  220. struct sd_hw_data *clk_hw_data;
  221. struct clk_init_data init;
  222. struct clk_hw *clk_hw;
  223. int ret;
  224. clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
  225. if (!clk_hw_data)
  226. return ERR_PTR(-ENOMEM);
  227. clk_hw_data->priv = priv;
  228. clk_hw_data->conf = core->conf;
  229. init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
  230. init.ops = &rzg2l_cpg_sd_clk_mux_ops;
  231. init.flags = 0;
  232. init.num_parents = core->num_parents;
  233. init.parent_names = core->parent_names;
  234. clk_hw = &clk_hw_data->hw;
  235. clk_hw->init = &init;
  236. ret = devm_clk_hw_register(priv->dev, clk_hw);
  237. if (ret)
  238. return ERR_PTR(ret);
  239. return clk_hw->clk;
  240. }
  241. static unsigned long
  242. rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
  243. unsigned long rate)
  244. {
  245. unsigned long foutpostdiv_rate;
  246. params->pl5_intin = rate / MEGA;
  247. params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
  248. params->pl5_refdiv = 2;
  249. params->pl5_postdiv1 = 1;
  250. params->pl5_postdiv2 = 1;
  251. params->pl5_spread = 0x16;
  252. foutpostdiv_rate =
  253. EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
  254. ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
  255. (params->pl5_postdiv1 * params->pl5_postdiv2);
  256. return foutpostdiv_rate;
  257. }
  258. struct dsi_div_hw_data {
  259. struct clk_hw hw;
  260. u32 conf;
  261. unsigned long rate;
  262. struct rzg2l_cpg_priv *priv;
  263. };
  264. #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
  265. static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
  266. unsigned long parent_rate)
  267. {
  268. struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
  269. unsigned long rate = dsi_div->rate;
  270. if (!rate)
  271. rate = parent_rate;
  272. return rate;
  273. }
  274. static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
  275. unsigned long rate)
  276. {
  277. struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
  278. struct rzg2l_cpg_priv *priv = dsi_div->priv;
  279. struct rzg2l_pll5_param params;
  280. unsigned long parent_rate;
  281. parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
  282. if (priv->mux_dsi_div_params.clksrc)
  283. parent_rate /= 2;
  284. return parent_rate;
  285. }
  286. static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
  287. struct clk_rate_request *req)
  288. {
  289. if (req->rate > MAX_VCLK_FREQ)
  290. req->rate = MAX_VCLK_FREQ;
  291. req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
  292. return 0;
  293. }
  294. static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
  295. unsigned long rate,
  296. unsigned long parent_rate)
  297. {
  298. struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
  299. struct rzg2l_cpg_priv *priv = dsi_div->priv;
  300. /*
  301. * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
  302. *
  303. * Based on the dot clock, the DSI divider clock sets the divider value,
  304. * calculates the pll parameters for generating FOUTPOSTDIV and the clk
  305. * source for the MUX and propagates that info to the parents.
  306. */
  307. if (!rate || rate > MAX_VCLK_FREQ)
  308. return -EINVAL;
  309. dsi_div->rate = rate;
  310. writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
  311. (priv->mux_dsi_div_params.dsi_div_a << 0) |
  312. (priv->mux_dsi_div_params.dsi_div_b << 8),
  313. priv->base + CPG_PL5_SDIV);
  314. return 0;
  315. }
  316. static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
  317. .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
  318. .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
  319. .set_rate = rzg2l_cpg_dsi_div_set_rate,
  320. };
  321. static struct clk * __init
  322. rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
  323. struct clk **clks,
  324. struct rzg2l_cpg_priv *priv)
  325. {
  326. struct dsi_div_hw_data *clk_hw_data;
  327. const struct clk *parent;
  328. const char *parent_name;
  329. struct clk_init_data init;
  330. struct clk_hw *clk_hw;
  331. int ret;
  332. parent = clks[core->parent & 0xffff];
  333. if (IS_ERR(parent))
  334. return ERR_CAST(parent);
  335. clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
  336. if (!clk_hw_data)
  337. return ERR_PTR(-ENOMEM);
  338. clk_hw_data->priv = priv;
  339. parent_name = __clk_get_name(parent);
  340. init.name = core->name;
  341. init.ops = &rzg2l_cpg_dsi_div_ops;
  342. init.flags = CLK_SET_RATE_PARENT;
  343. init.parent_names = &parent_name;
  344. init.num_parents = 1;
  345. clk_hw = &clk_hw_data->hw;
  346. clk_hw->init = &init;
  347. ret = devm_clk_hw_register(priv->dev, clk_hw);
  348. if (ret)
  349. return ERR_PTR(ret);
  350. return clk_hw->clk;
  351. }
  352. struct pll5_mux_hw_data {
  353. struct clk_hw hw;
  354. u32 conf;
  355. unsigned long rate;
  356. struct rzg2l_cpg_priv *priv;
  357. };
  358. #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
  359. static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
  360. struct clk_rate_request *req)
  361. {
  362. struct clk_hw *parent;
  363. struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
  364. struct rzg2l_cpg_priv *priv = hwdata->priv;
  365. parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
  366. req->best_parent_hw = parent;
  367. req->best_parent_rate = req->rate;
  368. return 0;
  369. }
  370. static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
  371. {
  372. struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
  373. struct rzg2l_cpg_priv *priv = hwdata->priv;
  374. /*
  375. * FOUTPOSTDIV--->|
  376. * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
  377. * |--FOUT1PH0-->|
  378. *
  379. * Based on the dot clock, the DSI divider clock calculates the parent
  380. * rate and clk source for the MUX. It propagates that info to
  381. * pll5_4_clk_mux which sets the clock source for DSI divider clock.
  382. */
  383. writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
  384. priv->base + CPG_OTHERFUNC1_REG);
  385. return 0;
  386. }
  387. static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
  388. {
  389. struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
  390. struct rzg2l_cpg_priv *priv = hwdata->priv;
  391. return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
  392. }
  393. static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
  394. .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
  395. .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
  396. .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
  397. };
  398. static struct clk * __init
  399. rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
  400. struct rzg2l_cpg_priv *priv)
  401. {
  402. struct pll5_mux_hw_data *clk_hw_data;
  403. struct clk_init_data init;
  404. struct clk_hw *clk_hw;
  405. int ret;
  406. clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
  407. if (!clk_hw_data)
  408. return ERR_PTR(-ENOMEM);
  409. clk_hw_data->priv = priv;
  410. clk_hw_data->conf = core->conf;
  411. init.name = core->name;
  412. init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
  413. init.flags = CLK_SET_RATE_PARENT;
  414. init.num_parents = core->num_parents;
  415. init.parent_names = core->parent_names;
  416. clk_hw = &clk_hw_data->hw;
  417. clk_hw->init = &init;
  418. ret = devm_clk_hw_register(priv->dev, clk_hw);
  419. if (ret)
  420. return ERR_PTR(ret);
  421. return clk_hw->clk;
  422. }
  423. struct sipll5 {
  424. struct clk_hw hw;
  425. u32 conf;
  426. unsigned long foutpostdiv_rate;
  427. struct rzg2l_cpg_priv *priv;
  428. };
  429. #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
  430. static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
  431. unsigned long rate)
  432. {
  433. struct sipll5 *sipll5 = to_sipll5(hw);
  434. struct rzg2l_cpg_priv *priv = sipll5->priv;
  435. unsigned long vclk;
  436. vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
  437. (priv->mux_dsi_div_params.dsi_div_b + 1));
  438. if (priv->mux_dsi_div_params.clksrc)
  439. vclk /= 2;
  440. return vclk;
  441. }
  442. static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
  443. unsigned long parent_rate)
  444. {
  445. struct sipll5 *sipll5 = to_sipll5(hw);
  446. unsigned long pll5_rate = sipll5->foutpostdiv_rate;
  447. if (!pll5_rate)
  448. pll5_rate = parent_rate;
  449. return pll5_rate;
  450. }
  451. static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
  452. unsigned long rate,
  453. unsigned long *parent_rate)
  454. {
  455. return rate;
  456. }
  457. static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
  458. unsigned long rate,
  459. unsigned long parent_rate)
  460. {
  461. struct sipll5 *sipll5 = to_sipll5(hw);
  462. struct rzg2l_cpg_priv *priv = sipll5->priv;
  463. struct rzg2l_pll5_param params;
  464. unsigned long vclk_rate;
  465. int ret;
  466. u32 val;
  467. /*
  468. * OSC --> PLL5 --> FOUTPOSTDIV-->|
  469. * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
  470. * |--FOUT1PH0-->|
  471. *
  472. * Based on the dot clock, the DSI divider clock calculates the parent
  473. * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
  474. * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
  475. *
  476. * OSC --> PLL5 --> FOUTPOSTDIV
  477. */
  478. if (!rate)
  479. return -EINVAL;
  480. vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
  481. sipll5->foutpostdiv_rate =
  482. rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
  483. /* Put PLL5 into standby mode */
  484. writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
  485. ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
  486. !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
  487. if (ret) {
  488. dev_err(priv->dev, "failed to release pll5 lock");
  489. return ret;
  490. }
  491. /* Output clock setting 1 */
  492. writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
  493. (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
  494. /* Output clock setting, SSCG modulation value setting 3 */
  495. writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
  496. /* Output clock setting 4 */
  497. writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
  498. priv->base + CPG_SIPLL5_CLK4);
  499. /* Output clock setting 5 */
  500. writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
  501. /* PLL normal mode setting */
  502. writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
  503. CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
  504. priv->base + CPG_SIPLL5_STBY);
  505. /* PLL normal mode transition, output clock stability check */
  506. ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
  507. (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
  508. if (ret) {
  509. dev_err(priv->dev, "failed to lock pll5");
  510. return ret;
  511. }
  512. return 0;
  513. }
  514. static const struct clk_ops rzg2l_cpg_sipll5_ops = {
  515. .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
  516. .round_rate = rzg2l_cpg_sipll5_round_rate,
  517. .set_rate = rzg2l_cpg_sipll5_set_rate,
  518. };
  519. static struct clk * __init
  520. rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
  521. struct clk **clks,
  522. struct rzg2l_cpg_priv *priv)
  523. {
  524. const struct clk *parent;
  525. struct clk_init_data init;
  526. const char *parent_name;
  527. struct sipll5 *sipll5;
  528. struct clk_hw *clk_hw;
  529. int ret;
  530. parent = clks[core->parent & 0xffff];
  531. if (IS_ERR(parent))
  532. return ERR_CAST(parent);
  533. sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
  534. if (!sipll5)
  535. return ERR_PTR(-ENOMEM);
  536. init.name = core->name;
  537. parent_name = __clk_get_name(parent);
  538. init.ops = &rzg2l_cpg_sipll5_ops;
  539. init.flags = 0;
  540. init.parent_names = &parent_name;
  541. init.num_parents = 1;
  542. sipll5->hw.init = &init;
  543. sipll5->conf = core->conf;
  544. sipll5->priv = priv;
  545. writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
  546. CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
  547. clk_hw = &sipll5->hw;
  548. clk_hw->init = &init;
  549. ret = devm_clk_hw_register(priv->dev, clk_hw);
  550. if (ret)
  551. return ERR_PTR(ret);
  552. priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
  553. priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
  554. priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
  555. return clk_hw->clk;
  556. }
  557. struct pll_clk {
  558. struct clk_hw hw;
  559. unsigned int conf;
  560. unsigned int type;
  561. void __iomem *base;
  562. struct rzg2l_cpg_priv *priv;
  563. };
  564. #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
  565. static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
  566. unsigned long parent_rate)
  567. {
  568. struct pll_clk *pll_clk = to_pll(hw);
  569. struct rzg2l_cpg_priv *priv = pll_clk->priv;
  570. unsigned int val1, val2;
  571. u64 rate;
  572. if (pll_clk->type != CLK_TYPE_SAM_PLL)
  573. return parent_rate;
  574. val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
  575. val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
  576. rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
  577. 16 + SDIV(val2));
  578. return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
  579. }
  580. static const struct clk_ops rzg2l_cpg_pll_ops = {
  581. .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
  582. };
  583. static struct clk * __init
  584. rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
  585. struct clk **clks,
  586. void __iomem *base,
  587. struct rzg2l_cpg_priv *priv)
  588. {
  589. struct device *dev = priv->dev;
  590. const struct clk *parent;
  591. struct clk_init_data init;
  592. const char *parent_name;
  593. struct pll_clk *pll_clk;
  594. parent = clks[core->parent & 0xffff];
  595. if (IS_ERR(parent))
  596. return ERR_CAST(parent);
  597. pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
  598. if (!pll_clk)
  599. return ERR_PTR(-ENOMEM);
  600. parent_name = __clk_get_name(parent);
  601. init.name = core->name;
  602. init.ops = &rzg2l_cpg_pll_ops;
  603. init.flags = 0;
  604. init.parent_names = &parent_name;
  605. init.num_parents = 1;
  606. pll_clk->hw.init = &init;
  607. pll_clk->conf = core->conf;
  608. pll_clk->base = base;
  609. pll_clk->priv = priv;
  610. pll_clk->type = core->type;
  611. return clk_register(NULL, &pll_clk->hw);
  612. }
  613. static struct clk
  614. *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
  615. void *data)
  616. {
  617. unsigned int clkidx = clkspec->args[1];
  618. struct rzg2l_cpg_priv *priv = data;
  619. struct device *dev = priv->dev;
  620. const char *type;
  621. struct clk *clk;
  622. switch (clkspec->args[0]) {
  623. case CPG_CORE:
  624. type = "core";
  625. if (clkidx > priv->last_dt_core_clk) {
  626. dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
  627. return ERR_PTR(-EINVAL);
  628. }
  629. clk = priv->clks[clkidx];
  630. break;
  631. case CPG_MOD:
  632. type = "module";
  633. if (clkidx >= priv->num_mod_clks) {
  634. dev_err(dev, "Invalid %s clock index %u\n", type,
  635. clkidx);
  636. return ERR_PTR(-EINVAL);
  637. }
  638. clk = priv->clks[priv->num_core_clks + clkidx];
  639. break;
  640. default:
  641. dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
  642. return ERR_PTR(-EINVAL);
  643. }
  644. if (IS_ERR(clk))
  645. dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
  646. PTR_ERR(clk));
  647. else
  648. dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
  649. clkspec->args[0], clkspec->args[1], clk,
  650. clk_get_rate(clk));
  651. return clk;
  652. }
  653. static void __init
  654. rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
  655. const struct rzg2l_cpg_info *info,
  656. struct rzg2l_cpg_priv *priv)
  657. {
  658. struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
  659. struct device *dev = priv->dev;
  660. unsigned int id = core->id, div = core->div;
  661. const char *parent_name;
  662. WARN_DEBUG(id >= priv->num_core_clks);
  663. WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
  664. if (!core->name) {
  665. /* Skip NULLified clock */
  666. return;
  667. }
  668. switch (core->type) {
  669. case CLK_TYPE_IN:
  670. clk = of_clk_get_by_name(priv->dev->of_node, core->name);
  671. break;
  672. case CLK_TYPE_FF:
  673. WARN_DEBUG(core->parent >= priv->num_core_clks);
  674. parent = priv->clks[core->parent];
  675. if (IS_ERR(parent)) {
  676. clk = parent;
  677. goto fail;
  678. }
  679. parent_name = __clk_get_name(parent);
  680. clk = clk_register_fixed_factor(NULL, core->name,
  681. parent_name, CLK_SET_RATE_PARENT,
  682. core->mult, div);
  683. break;
  684. case CLK_TYPE_SAM_PLL:
  685. clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
  686. priv->base, priv);
  687. break;
  688. case CLK_TYPE_SIPLL5:
  689. clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
  690. break;
  691. case CLK_TYPE_DIV:
  692. clk = rzg2l_cpg_div_clk_register(core, priv->clks,
  693. priv->base, priv);
  694. break;
  695. case CLK_TYPE_MUX:
  696. clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
  697. break;
  698. case CLK_TYPE_SD_MUX:
  699. clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
  700. break;
  701. case CLK_TYPE_PLL5_4_MUX:
  702. clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
  703. break;
  704. case CLK_TYPE_DSI_DIV:
  705. clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
  706. break;
  707. default:
  708. goto fail;
  709. }
  710. if (IS_ERR_OR_NULL(clk))
  711. goto fail;
  712. dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
  713. priv->clks[id] = clk;
  714. return;
  715. fail:
  716. dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
  717. core->name, PTR_ERR(clk));
  718. }
  719. /**
  720. * struct mstp_clock - MSTP gating clock
  721. *
  722. * @hw: handle between common and hardware-specific interfaces
  723. * @off: register offset
  724. * @bit: ON/MON bit
  725. * @enabled: soft state of the clock, if it is coupled with another clock
  726. * @priv: CPG/MSTP private data
  727. * @sibling: pointer to the other coupled clock
  728. */
  729. struct mstp_clock {
  730. struct clk_hw hw;
  731. u16 off;
  732. u8 bit;
  733. bool enabled;
  734. struct rzg2l_cpg_priv *priv;
  735. struct mstp_clock *sibling;
  736. };
  737. #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
  738. static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
  739. {
  740. struct mstp_clock *clock = to_mod_clock(hw);
  741. struct rzg2l_cpg_priv *priv = clock->priv;
  742. unsigned int reg = clock->off;
  743. struct device *dev = priv->dev;
  744. unsigned long flags;
  745. unsigned int i;
  746. u32 bitmask = BIT(clock->bit);
  747. u32 value;
  748. if (!clock->off) {
  749. dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
  750. return 0;
  751. }
  752. dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
  753. enable ? "ON" : "OFF");
  754. spin_lock_irqsave(&priv->rmw_lock, flags);
  755. if (enable)
  756. value = (bitmask << 16) | bitmask;
  757. else
  758. value = bitmask << 16;
  759. writel(value, priv->base + CLK_ON_R(reg));
  760. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  761. if (!enable)
  762. return 0;
  763. if (!priv->info->has_clk_mon_regs)
  764. return 0;
  765. for (i = 1000; i > 0; --i) {
  766. if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
  767. break;
  768. cpu_relax();
  769. }
  770. if (!i) {
  771. dev_err(dev, "Failed to enable CLK_ON %p\n",
  772. priv->base + CLK_ON_R(reg));
  773. return -ETIMEDOUT;
  774. }
  775. return 0;
  776. }
  777. static int rzg2l_mod_clock_enable(struct clk_hw *hw)
  778. {
  779. struct mstp_clock *clock = to_mod_clock(hw);
  780. if (clock->sibling) {
  781. struct rzg2l_cpg_priv *priv = clock->priv;
  782. unsigned long flags;
  783. bool enabled;
  784. spin_lock_irqsave(&priv->rmw_lock, flags);
  785. enabled = clock->sibling->enabled;
  786. clock->enabled = true;
  787. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  788. if (enabled)
  789. return 0;
  790. }
  791. return rzg2l_mod_clock_endisable(hw, true);
  792. }
  793. static void rzg2l_mod_clock_disable(struct clk_hw *hw)
  794. {
  795. struct mstp_clock *clock = to_mod_clock(hw);
  796. if (clock->sibling) {
  797. struct rzg2l_cpg_priv *priv = clock->priv;
  798. unsigned long flags;
  799. bool enabled;
  800. spin_lock_irqsave(&priv->rmw_lock, flags);
  801. enabled = clock->sibling->enabled;
  802. clock->enabled = false;
  803. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  804. if (enabled)
  805. return;
  806. }
  807. rzg2l_mod_clock_endisable(hw, false);
  808. }
  809. static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
  810. {
  811. struct mstp_clock *clock = to_mod_clock(hw);
  812. struct rzg2l_cpg_priv *priv = clock->priv;
  813. u32 bitmask = BIT(clock->bit);
  814. u32 value;
  815. if (!clock->off) {
  816. dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
  817. return 1;
  818. }
  819. if (clock->sibling)
  820. return clock->enabled;
  821. if (priv->info->has_clk_mon_regs)
  822. value = readl(priv->base + CLK_MON_R(clock->off));
  823. else
  824. value = readl(priv->base + clock->off);
  825. return value & bitmask;
  826. }
  827. static const struct clk_ops rzg2l_mod_clock_ops = {
  828. .enable = rzg2l_mod_clock_enable,
  829. .disable = rzg2l_mod_clock_disable,
  830. .is_enabled = rzg2l_mod_clock_is_enabled,
  831. };
  832. static struct mstp_clock
  833. *rzg2l_mod_clock__get_sibling(struct mstp_clock *clock,
  834. struct rzg2l_cpg_priv *priv)
  835. {
  836. struct clk_hw *hw;
  837. unsigned int i;
  838. for (i = 0; i < priv->num_mod_clks; i++) {
  839. struct mstp_clock *clk;
  840. if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
  841. continue;
  842. hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
  843. clk = to_mod_clock(hw);
  844. if (clock->off == clk->off && clock->bit == clk->bit)
  845. return clk;
  846. }
  847. return NULL;
  848. }
  849. static void __init
  850. rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
  851. const struct rzg2l_cpg_info *info,
  852. struct rzg2l_cpg_priv *priv)
  853. {
  854. struct mstp_clock *clock = NULL;
  855. struct device *dev = priv->dev;
  856. unsigned int id = mod->id;
  857. struct clk_init_data init;
  858. struct clk *parent, *clk;
  859. const char *parent_name;
  860. unsigned int i;
  861. WARN_DEBUG(id < priv->num_core_clks);
  862. WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
  863. WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
  864. WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
  865. if (!mod->name) {
  866. /* Skip NULLified clock */
  867. return;
  868. }
  869. parent = priv->clks[mod->parent];
  870. if (IS_ERR(parent)) {
  871. clk = parent;
  872. goto fail;
  873. }
  874. clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
  875. if (!clock) {
  876. clk = ERR_PTR(-ENOMEM);
  877. goto fail;
  878. }
  879. init.name = mod->name;
  880. init.ops = &rzg2l_mod_clock_ops;
  881. init.flags = CLK_SET_RATE_PARENT;
  882. for (i = 0; i < info->num_crit_mod_clks; i++)
  883. if (id == info->crit_mod_clks[i]) {
  884. dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
  885. mod->name);
  886. init.flags |= CLK_IS_CRITICAL;
  887. break;
  888. }
  889. parent_name = __clk_get_name(parent);
  890. init.parent_names = &parent_name;
  891. init.num_parents = 1;
  892. clock->off = mod->off;
  893. clock->bit = mod->bit;
  894. clock->priv = priv;
  895. clock->hw.init = &init;
  896. clk = clk_register(NULL, &clock->hw);
  897. if (IS_ERR(clk))
  898. goto fail;
  899. dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
  900. priv->clks[id] = clk;
  901. if (mod->is_coupled) {
  902. struct mstp_clock *sibling;
  903. clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
  904. sibling = rzg2l_mod_clock__get_sibling(clock, priv);
  905. if (sibling) {
  906. clock->sibling = sibling;
  907. sibling->sibling = clock;
  908. }
  909. }
  910. return;
  911. fail:
  912. dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
  913. mod->name, PTR_ERR(clk));
  914. }
  915. #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
  916. static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
  917. unsigned long id)
  918. {
  919. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  920. const struct rzg2l_cpg_info *info = priv->info;
  921. unsigned int reg = info->resets[id].off;
  922. u32 dis = BIT(info->resets[id].bit);
  923. u32 we = dis << 16;
  924. dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
  925. /* Reset module */
  926. writel(we, priv->base + CLK_RST_R(reg));
  927. /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
  928. udelay(35);
  929. /* Release module from reset state */
  930. writel(we | dis, priv->base + CLK_RST_R(reg));
  931. return 0;
  932. }
  933. static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
  934. unsigned long id)
  935. {
  936. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  937. const struct rzg2l_cpg_info *info = priv->info;
  938. unsigned int reg = info->resets[id].off;
  939. u32 value = BIT(info->resets[id].bit) << 16;
  940. dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
  941. writel(value, priv->base + CLK_RST_R(reg));
  942. return 0;
  943. }
  944. static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
  945. unsigned long id)
  946. {
  947. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  948. const struct rzg2l_cpg_info *info = priv->info;
  949. unsigned int reg = info->resets[id].off;
  950. u32 dis = BIT(info->resets[id].bit);
  951. u32 value = (dis << 16) | dis;
  952. dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
  953. CLK_RST_R(reg));
  954. writel(value, priv->base + CLK_RST_R(reg));
  955. return 0;
  956. }
  957. static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
  958. unsigned long id)
  959. {
  960. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  961. const struct rzg2l_cpg_info *info = priv->info;
  962. unsigned int reg = info->resets[id].off;
  963. u32 bitmask = BIT(info->resets[id].bit);
  964. s8 monbit = info->resets[id].monbit;
  965. if (info->has_clk_mon_regs) {
  966. return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
  967. } else if (monbit >= 0) {
  968. u32 monbitmask = BIT(monbit);
  969. return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
  970. }
  971. return -ENOTSUPP;
  972. }
  973. static const struct reset_control_ops rzg2l_cpg_reset_ops = {
  974. .reset = rzg2l_cpg_reset,
  975. .assert = rzg2l_cpg_assert,
  976. .deassert = rzg2l_cpg_deassert,
  977. .status = rzg2l_cpg_status,
  978. };
  979. static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
  980. const struct of_phandle_args *reset_spec)
  981. {
  982. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  983. const struct rzg2l_cpg_info *info = priv->info;
  984. unsigned int id = reset_spec->args[0];
  985. if (id >= rcdev->nr_resets || !info->resets[id].off) {
  986. dev_err(rcdev->dev, "Invalid reset index %u\n", id);
  987. return -EINVAL;
  988. }
  989. return id;
  990. }
  991. static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
  992. {
  993. priv->rcdev.ops = &rzg2l_cpg_reset_ops;
  994. priv->rcdev.of_node = priv->dev->of_node;
  995. priv->rcdev.dev = priv->dev;
  996. priv->rcdev.of_reset_n_cells = 1;
  997. priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
  998. priv->rcdev.nr_resets = priv->num_resets;
  999. return devm_reset_controller_register(priv->dev, &priv->rcdev);
  1000. }
  1001. static bool rzg2l_cpg_is_pm_clk(const struct of_phandle_args *clkspec)
  1002. {
  1003. if (clkspec->args_count != 2)
  1004. return false;
  1005. switch (clkspec->args[0]) {
  1006. case CPG_MOD:
  1007. return true;
  1008. default:
  1009. return false;
  1010. }
  1011. }
  1012. static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device *dev)
  1013. {
  1014. struct device_node *np = dev->of_node;
  1015. struct of_phandle_args clkspec;
  1016. bool once = true;
  1017. struct clk *clk;
  1018. int error;
  1019. int i = 0;
  1020. while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
  1021. &clkspec)) {
  1022. if (rzg2l_cpg_is_pm_clk(&clkspec)) {
  1023. if (once) {
  1024. once = false;
  1025. error = pm_clk_create(dev);
  1026. if (error) {
  1027. of_node_put(clkspec.np);
  1028. goto err;
  1029. }
  1030. }
  1031. clk = of_clk_get_from_provider(&clkspec);
  1032. of_node_put(clkspec.np);
  1033. if (IS_ERR(clk)) {
  1034. error = PTR_ERR(clk);
  1035. goto fail_destroy;
  1036. }
  1037. error = pm_clk_add_clk(dev, clk);
  1038. if (error) {
  1039. dev_err(dev, "pm_clk_add_clk failed %d\n",
  1040. error);
  1041. goto fail_put;
  1042. }
  1043. } else {
  1044. of_node_put(clkspec.np);
  1045. }
  1046. i++;
  1047. }
  1048. return 0;
  1049. fail_put:
  1050. clk_put(clk);
  1051. fail_destroy:
  1052. pm_clk_destroy(dev);
  1053. err:
  1054. return error;
  1055. }
  1056. static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
  1057. {
  1058. if (!pm_clk_no_clocks(dev))
  1059. pm_clk_destroy(dev);
  1060. }
  1061. static void rzg2l_cpg_genpd_remove(void *data)
  1062. {
  1063. pm_genpd_remove(data);
  1064. }
  1065. static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
  1066. {
  1067. struct device_node *np = dev->of_node;
  1068. struct generic_pm_domain *genpd;
  1069. int ret;
  1070. genpd = devm_kzalloc(dev, sizeof(*genpd), GFP_KERNEL);
  1071. if (!genpd)
  1072. return -ENOMEM;
  1073. genpd->name = np->name;
  1074. genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
  1075. GENPD_FLAG_ACTIVE_WAKEUP;
  1076. genpd->attach_dev = rzg2l_cpg_attach_dev;
  1077. genpd->detach_dev = rzg2l_cpg_detach_dev;
  1078. ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
  1079. if (ret)
  1080. return ret;
  1081. ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
  1082. if (ret)
  1083. return ret;
  1084. return of_genpd_add_provider_simple(np, genpd);
  1085. }
  1086. static int __init rzg2l_cpg_probe(struct platform_device *pdev)
  1087. {
  1088. struct device *dev = &pdev->dev;
  1089. struct device_node *np = dev->of_node;
  1090. const struct rzg2l_cpg_info *info;
  1091. struct rzg2l_cpg_priv *priv;
  1092. unsigned int nclks, i;
  1093. struct clk **clks;
  1094. int error;
  1095. info = of_device_get_match_data(dev);
  1096. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1097. if (!priv)
  1098. return -ENOMEM;
  1099. priv->dev = dev;
  1100. priv->info = info;
  1101. spin_lock_init(&priv->rmw_lock);
  1102. priv->base = devm_platform_ioremap_resource(pdev, 0);
  1103. if (IS_ERR(priv->base))
  1104. return PTR_ERR(priv->base);
  1105. nclks = info->num_total_core_clks + info->num_hw_mod_clks;
  1106. clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
  1107. if (!clks)
  1108. return -ENOMEM;
  1109. dev_set_drvdata(dev, priv);
  1110. priv->clks = clks;
  1111. priv->num_core_clks = info->num_total_core_clks;
  1112. priv->num_mod_clks = info->num_hw_mod_clks;
  1113. priv->num_resets = info->num_resets;
  1114. priv->last_dt_core_clk = info->last_dt_core_clk;
  1115. for (i = 0; i < nclks; i++)
  1116. clks[i] = ERR_PTR(-ENOENT);
  1117. for (i = 0; i < info->num_core_clks; i++)
  1118. rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
  1119. for (i = 0; i < info->num_mod_clks; i++)
  1120. rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
  1121. error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
  1122. if (error)
  1123. return error;
  1124. error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
  1125. if (error)
  1126. return error;
  1127. error = rzg2l_cpg_add_clk_domain(dev);
  1128. if (error)
  1129. return error;
  1130. error = rzg2l_cpg_reset_controller_register(priv);
  1131. if (error)
  1132. return error;
  1133. return 0;
  1134. }
  1135. static const struct of_device_id rzg2l_cpg_match[] = {
  1136. #ifdef CONFIG_CLK_R9A07G043
  1137. {
  1138. .compatible = "renesas,r9a07g043-cpg",
  1139. .data = &r9a07g043_cpg_info,
  1140. },
  1141. #endif
  1142. #ifdef CONFIG_CLK_R9A07G044
  1143. {
  1144. .compatible = "renesas,r9a07g044-cpg",
  1145. .data = &r9a07g044_cpg_info,
  1146. },
  1147. #endif
  1148. #ifdef CONFIG_CLK_R9A07G054
  1149. {
  1150. .compatible = "renesas,r9a07g054-cpg",
  1151. .data = &r9a07g054_cpg_info,
  1152. },
  1153. #endif
  1154. #ifdef CONFIG_CLK_R9A09G011
  1155. {
  1156. .compatible = "renesas,r9a09g011-cpg",
  1157. .data = &r9a09g011_cpg_info,
  1158. },
  1159. #endif
  1160. { /* sentinel */ }
  1161. };
  1162. static struct platform_driver rzg2l_cpg_driver = {
  1163. .driver = {
  1164. .name = "rzg2l-cpg",
  1165. .of_match_table = rzg2l_cpg_match,
  1166. },
  1167. };
  1168. static int __init rzg2l_cpg_init(void)
  1169. {
  1170. return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
  1171. }
  1172. subsys_initcall(rzg2l_cpg_init);
  1173. MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
  1174. MODULE_LICENSE("GPL v2");