clk-rpm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016, Linaro Limited
  4. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/clk-provider.h>
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/mutex.h>
  13. #include <linux/mfd/qcom_rpm.h>
  14. #include <linux/of.h>
  15. #include <linux/of_device.h>
  16. #include <linux/platform_device.h>
  17. #include <dt-bindings/mfd/qcom-rpm.h>
  18. #include <dt-bindings/clock/qcom,rpmcc.h>
  19. #define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
  20. #define QCOM_RPM_SCALING_ENABLE_ID 0x2
  21. #define QCOM_RPM_XO_MODE_ON 0x2
  22. static const struct clk_parent_data gcc_pxo[] = {
  23. { .fw_name = "pxo", .name = "pxo_board" },
  24. };
  25. static const struct clk_parent_data gcc_cxo[] = {
  26. { .fw_name = "cxo", .name = "cxo_board" },
  27. };
  28. #define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
  29. static struct clk_rpm _platform##_##_active; \
  30. static struct clk_rpm _platform##_##_name = { \
  31. .rpm_clk_id = (r_id), \
  32. .peer = &_platform##_##_active, \
  33. .rate = INT_MAX, \
  34. .hw.init = &(struct clk_init_data){ \
  35. .ops = &clk_rpm_ops, \
  36. .name = #_name, \
  37. .parent_data = gcc_pxo, \
  38. .num_parents = ARRAY_SIZE(gcc_pxo), \
  39. }, \
  40. }; \
  41. static struct clk_rpm _platform##_##_active = { \
  42. .rpm_clk_id = (r_id), \
  43. .peer = &_platform##_##_name, \
  44. .active_only = true, \
  45. .rate = INT_MAX, \
  46. .hw.init = &(struct clk_init_data){ \
  47. .ops = &clk_rpm_ops, \
  48. .name = #_active, \
  49. .parent_data = gcc_pxo, \
  50. .num_parents = ARRAY_SIZE(gcc_pxo), \
  51. }, \
  52. }
  53. #define DEFINE_CLK_RPM_XO_BUFFER(_platform, _name, _active, offset) \
  54. static struct clk_rpm _platform##_##_name = { \
  55. .rpm_clk_id = QCOM_RPM_CXO_BUFFERS, \
  56. .xo_offset = (offset), \
  57. .hw.init = &(struct clk_init_data){ \
  58. .ops = &clk_rpm_xo_ops, \
  59. .name = #_name, \
  60. .parent_data = gcc_cxo, \
  61. .num_parents = ARRAY_SIZE(gcc_cxo), \
  62. }, \
  63. }
  64. #define DEFINE_CLK_RPM_FIXED(_platform, _name, _active, r_id, r) \
  65. static struct clk_rpm _platform##_##_name = { \
  66. .rpm_clk_id = (r_id), \
  67. .rate = (r), \
  68. .hw.init = &(struct clk_init_data){ \
  69. .ops = &clk_rpm_fixed_ops, \
  70. .name = #_name, \
  71. .parent_data = gcc_pxo, \
  72. .num_parents = ARRAY_SIZE(gcc_pxo), \
  73. }, \
  74. }
  75. #define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
  76. struct rpm_cc;
  77. struct clk_rpm {
  78. const int rpm_clk_id;
  79. const int xo_offset;
  80. const bool active_only;
  81. unsigned long rate;
  82. bool enabled;
  83. bool branch;
  84. struct clk_rpm *peer;
  85. struct clk_hw hw;
  86. struct qcom_rpm *rpm;
  87. struct rpm_cc *rpm_cc;
  88. };
  89. struct rpm_cc {
  90. struct qcom_rpm *rpm;
  91. struct clk_rpm **clks;
  92. size_t num_clks;
  93. u32 xo_buffer_value;
  94. struct mutex xo_lock;
  95. };
  96. struct rpm_clk_desc {
  97. struct clk_rpm **clks;
  98. size_t num_clks;
  99. };
  100. static DEFINE_MUTEX(rpm_clk_lock);
  101. static int clk_rpm_handoff(struct clk_rpm *r)
  102. {
  103. int ret;
  104. u32 value = INT_MAX;
  105. /*
  106. * The vendor tree simply reads the status for this
  107. * RPM clock.
  108. */
  109. if (r->rpm_clk_id == QCOM_RPM_PLL_4 ||
  110. r->rpm_clk_id == QCOM_RPM_CXO_BUFFERS)
  111. return 0;
  112. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
  113. r->rpm_clk_id, &value, 1);
  114. if (ret)
  115. return ret;
  116. ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
  117. r->rpm_clk_id, &value, 1);
  118. if (ret)
  119. return ret;
  120. return 0;
  121. }
  122. static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
  123. {
  124. u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
  125. return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
  126. r->rpm_clk_id, &value, 1);
  127. }
  128. static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
  129. {
  130. u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
  131. return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
  132. r->rpm_clk_id, &value, 1);
  133. }
  134. static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
  135. unsigned long *active, unsigned long *sleep)
  136. {
  137. *active = rate;
  138. /*
  139. * Active-only clocks don't care what the rate is during sleep. So,
  140. * they vote for zero.
  141. */
  142. if (r->active_only)
  143. *sleep = 0;
  144. else
  145. *sleep = *active;
  146. }
  147. static int clk_rpm_prepare(struct clk_hw *hw)
  148. {
  149. struct clk_rpm *r = to_clk_rpm(hw);
  150. struct clk_rpm *peer = r->peer;
  151. unsigned long this_rate = 0, this_sleep_rate = 0;
  152. unsigned long peer_rate = 0, peer_sleep_rate = 0;
  153. unsigned long active_rate, sleep_rate;
  154. int ret = 0;
  155. mutex_lock(&rpm_clk_lock);
  156. /* Don't send requests to the RPM if the rate has not been set. */
  157. if (!r->rate)
  158. goto out;
  159. to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
  160. /* Take peer clock's rate into account only if it's enabled. */
  161. if (peer->enabled)
  162. to_active_sleep(peer, peer->rate,
  163. &peer_rate, &peer_sleep_rate);
  164. active_rate = max(this_rate, peer_rate);
  165. if (r->branch)
  166. active_rate = !!active_rate;
  167. ret = clk_rpm_set_rate_active(r, active_rate);
  168. if (ret)
  169. goto out;
  170. sleep_rate = max(this_sleep_rate, peer_sleep_rate);
  171. if (r->branch)
  172. sleep_rate = !!sleep_rate;
  173. ret = clk_rpm_set_rate_sleep(r, sleep_rate);
  174. if (ret)
  175. /* Undo the active set vote and restore it */
  176. ret = clk_rpm_set_rate_active(r, peer_rate);
  177. out:
  178. if (!ret)
  179. r->enabled = true;
  180. mutex_unlock(&rpm_clk_lock);
  181. return ret;
  182. }
  183. static void clk_rpm_unprepare(struct clk_hw *hw)
  184. {
  185. struct clk_rpm *r = to_clk_rpm(hw);
  186. struct clk_rpm *peer = r->peer;
  187. unsigned long peer_rate = 0, peer_sleep_rate = 0;
  188. unsigned long active_rate, sleep_rate;
  189. int ret;
  190. mutex_lock(&rpm_clk_lock);
  191. if (!r->rate)
  192. goto out;
  193. /* Take peer clock's rate into account only if it's enabled. */
  194. if (peer->enabled)
  195. to_active_sleep(peer, peer->rate, &peer_rate,
  196. &peer_sleep_rate);
  197. active_rate = r->branch ? !!peer_rate : peer_rate;
  198. ret = clk_rpm_set_rate_active(r, active_rate);
  199. if (ret)
  200. goto out;
  201. sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
  202. ret = clk_rpm_set_rate_sleep(r, sleep_rate);
  203. if (ret)
  204. goto out;
  205. r->enabled = false;
  206. out:
  207. mutex_unlock(&rpm_clk_lock);
  208. }
  209. static int clk_rpm_xo_prepare(struct clk_hw *hw)
  210. {
  211. struct clk_rpm *r = to_clk_rpm(hw);
  212. struct rpm_cc *rcc = r->rpm_cc;
  213. int ret, clk_id = r->rpm_clk_id;
  214. u32 value;
  215. mutex_lock(&rcc->xo_lock);
  216. value = rcc->xo_buffer_value | (QCOM_RPM_XO_MODE_ON << r->xo_offset);
  217. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
  218. if (!ret) {
  219. r->enabled = true;
  220. rcc->xo_buffer_value = value;
  221. }
  222. mutex_unlock(&rcc->xo_lock);
  223. return ret;
  224. }
  225. static void clk_rpm_xo_unprepare(struct clk_hw *hw)
  226. {
  227. struct clk_rpm *r = to_clk_rpm(hw);
  228. struct rpm_cc *rcc = r->rpm_cc;
  229. int ret, clk_id = r->rpm_clk_id;
  230. u32 value;
  231. mutex_lock(&rcc->xo_lock);
  232. value = rcc->xo_buffer_value & ~(QCOM_RPM_XO_MODE_ON << r->xo_offset);
  233. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
  234. if (!ret) {
  235. r->enabled = false;
  236. rcc->xo_buffer_value = value;
  237. }
  238. mutex_unlock(&rcc->xo_lock);
  239. }
  240. static int clk_rpm_fixed_prepare(struct clk_hw *hw)
  241. {
  242. struct clk_rpm *r = to_clk_rpm(hw);
  243. u32 value = 1;
  244. int ret;
  245. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
  246. r->rpm_clk_id, &value, 1);
  247. if (!ret)
  248. r->enabled = true;
  249. return ret;
  250. }
  251. static void clk_rpm_fixed_unprepare(struct clk_hw *hw)
  252. {
  253. struct clk_rpm *r = to_clk_rpm(hw);
  254. u32 value = 0;
  255. int ret;
  256. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
  257. r->rpm_clk_id, &value, 1);
  258. if (!ret)
  259. r->enabled = false;
  260. }
  261. static int clk_rpm_set_rate(struct clk_hw *hw,
  262. unsigned long rate, unsigned long parent_rate)
  263. {
  264. struct clk_rpm *r = to_clk_rpm(hw);
  265. struct clk_rpm *peer = r->peer;
  266. unsigned long active_rate, sleep_rate;
  267. unsigned long this_rate = 0, this_sleep_rate = 0;
  268. unsigned long peer_rate = 0, peer_sleep_rate = 0;
  269. int ret = 0;
  270. mutex_lock(&rpm_clk_lock);
  271. if (!r->enabled)
  272. goto out;
  273. to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
  274. /* Take peer clock's rate into account only if it's enabled. */
  275. if (peer->enabled)
  276. to_active_sleep(peer, peer->rate,
  277. &peer_rate, &peer_sleep_rate);
  278. active_rate = max(this_rate, peer_rate);
  279. ret = clk_rpm_set_rate_active(r, active_rate);
  280. if (ret)
  281. goto out;
  282. sleep_rate = max(this_sleep_rate, peer_sleep_rate);
  283. ret = clk_rpm_set_rate_sleep(r, sleep_rate);
  284. if (ret)
  285. goto out;
  286. r->rate = rate;
  287. out:
  288. mutex_unlock(&rpm_clk_lock);
  289. return ret;
  290. }
  291. static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
  292. unsigned long *parent_rate)
  293. {
  294. /*
  295. * RPM handles rate rounding and we don't have a way to
  296. * know what the rate will be, so just return whatever
  297. * rate is requested.
  298. */
  299. return rate;
  300. }
  301. static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
  302. unsigned long parent_rate)
  303. {
  304. struct clk_rpm *r = to_clk_rpm(hw);
  305. /*
  306. * RPM handles rate rounding and we don't have a way to
  307. * know what the rate will be, so just return whatever
  308. * rate was set.
  309. */
  310. return r->rate;
  311. }
  312. static const struct clk_ops clk_rpm_xo_ops = {
  313. .prepare = clk_rpm_xo_prepare,
  314. .unprepare = clk_rpm_xo_unprepare,
  315. };
  316. static const struct clk_ops clk_rpm_fixed_ops = {
  317. .prepare = clk_rpm_fixed_prepare,
  318. .unprepare = clk_rpm_fixed_unprepare,
  319. .round_rate = clk_rpm_round_rate,
  320. .recalc_rate = clk_rpm_recalc_rate,
  321. };
  322. static const struct clk_ops clk_rpm_ops = {
  323. .prepare = clk_rpm_prepare,
  324. .unprepare = clk_rpm_unprepare,
  325. .set_rate = clk_rpm_set_rate,
  326. .round_rate = clk_rpm_round_rate,
  327. .recalc_rate = clk_rpm_recalc_rate,
  328. };
  329. /* MSM8660/APQ8060 */
  330. DEFINE_CLK_RPM(msm8660, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
  331. DEFINE_CLK_RPM(msm8660, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
  332. DEFINE_CLK_RPM(msm8660, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
  333. DEFINE_CLK_RPM(msm8660, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
  334. DEFINE_CLK_RPM(msm8660, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
  335. DEFINE_CLK_RPM(msm8660, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
  336. DEFINE_CLK_RPM(msm8660, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
  337. DEFINE_CLK_RPM(msm8660, smi_clk, smi_a_clk, QCOM_RPM_SMI_CLK);
  338. DEFINE_CLK_RPM(msm8660, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
  339. DEFINE_CLK_RPM_FIXED(msm8660, pll4_clk, pll4_a_clk, QCOM_RPM_PLL_4, 540672000);
  340. static struct clk_rpm *msm8660_clks[] = {
  341. [RPM_APPS_FABRIC_CLK] = &msm8660_afab_clk,
  342. [RPM_APPS_FABRIC_A_CLK] = &msm8660_afab_a_clk,
  343. [RPM_SYS_FABRIC_CLK] = &msm8660_sfab_clk,
  344. [RPM_SYS_FABRIC_A_CLK] = &msm8660_sfab_a_clk,
  345. [RPM_MM_FABRIC_CLK] = &msm8660_mmfab_clk,
  346. [RPM_MM_FABRIC_A_CLK] = &msm8660_mmfab_a_clk,
  347. [RPM_DAYTONA_FABRIC_CLK] = &msm8660_daytona_clk,
  348. [RPM_DAYTONA_FABRIC_A_CLK] = &msm8660_daytona_a_clk,
  349. [RPM_SFPB_CLK] = &msm8660_sfpb_clk,
  350. [RPM_SFPB_A_CLK] = &msm8660_sfpb_a_clk,
  351. [RPM_CFPB_CLK] = &msm8660_cfpb_clk,
  352. [RPM_CFPB_A_CLK] = &msm8660_cfpb_a_clk,
  353. [RPM_MMFPB_CLK] = &msm8660_mmfpb_clk,
  354. [RPM_MMFPB_A_CLK] = &msm8660_mmfpb_a_clk,
  355. [RPM_SMI_CLK] = &msm8660_smi_clk,
  356. [RPM_SMI_A_CLK] = &msm8660_smi_a_clk,
  357. [RPM_EBI1_CLK] = &msm8660_ebi1_clk,
  358. [RPM_EBI1_A_CLK] = &msm8660_ebi1_a_clk,
  359. [RPM_PLL4_CLK] = &msm8660_pll4_clk,
  360. };
  361. static const struct rpm_clk_desc rpm_clk_msm8660 = {
  362. .clks = msm8660_clks,
  363. .num_clks = ARRAY_SIZE(msm8660_clks),
  364. };
  365. /* apq8064 */
  366. DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
  367. DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
  368. DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
  369. DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
  370. DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
  371. DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
  372. DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
  373. DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
  374. DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK);
  375. DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d0_clk, xo_d0_a_clk, 0);
  376. DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d1_clk, xo_d1_a_clk, 8);
  377. DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a0_clk, xo_a0_a_clk, 16);
  378. DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a1_clk, xo_a1_a_clk, 24);
  379. DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a2_clk, xo_a2_a_clk, 28);
  380. static struct clk_rpm *apq8064_clks[] = {
  381. [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk,
  382. [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk,
  383. [RPM_CFPB_CLK] = &apq8064_cfpb_clk,
  384. [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk,
  385. [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk,
  386. [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk,
  387. [RPM_EBI1_CLK] = &apq8064_ebi1_clk,
  388. [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk,
  389. [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk,
  390. [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk,
  391. [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk,
  392. [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk,
  393. [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk,
  394. [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk,
  395. [RPM_SFPB_CLK] = &apq8064_sfpb_clk,
  396. [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk,
  397. [RPM_QDSS_CLK] = &apq8064_qdss_clk,
  398. [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk,
  399. [RPM_XO_D0] = &apq8064_xo_d0_clk,
  400. [RPM_XO_D1] = &apq8064_xo_d1_clk,
  401. [RPM_XO_A0] = &apq8064_xo_a0_clk,
  402. [RPM_XO_A1] = &apq8064_xo_a1_clk,
  403. [RPM_XO_A2] = &apq8064_xo_a2_clk,
  404. };
  405. static const struct rpm_clk_desc rpm_clk_apq8064 = {
  406. .clks = apq8064_clks,
  407. .num_clks = ARRAY_SIZE(apq8064_clks),
  408. };
  409. /* ipq806x */
  410. DEFINE_CLK_RPM(ipq806x, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
  411. DEFINE_CLK_RPM(ipq806x, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
  412. DEFINE_CLK_RPM(ipq806x, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
  413. DEFINE_CLK_RPM(ipq806x, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
  414. DEFINE_CLK_RPM(ipq806x, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
  415. DEFINE_CLK_RPM(ipq806x, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
  416. DEFINE_CLK_RPM(ipq806x, nss_fabric_0_clk, nss_fabric_0_a_clk, QCOM_RPM_NSS_FABRIC_0_CLK);
  417. DEFINE_CLK_RPM(ipq806x, nss_fabric_1_clk, nss_fabric_1_a_clk, QCOM_RPM_NSS_FABRIC_1_CLK);
  418. static struct clk_rpm *ipq806x_clks[] = {
  419. [RPM_APPS_FABRIC_CLK] = &ipq806x_afab_clk,
  420. [RPM_APPS_FABRIC_A_CLK] = &ipq806x_afab_a_clk,
  421. [RPM_CFPB_CLK] = &ipq806x_cfpb_clk,
  422. [RPM_CFPB_A_CLK] = &ipq806x_cfpb_a_clk,
  423. [RPM_DAYTONA_FABRIC_CLK] = &ipq806x_daytona_clk,
  424. [RPM_DAYTONA_FABRIC_A_CLK] = &ipq806x_daytona_a_clk,
  425. [RPM_EBI1_CLK] = &ipq806x_ebi1_clk,
  426. [RPM_EBI1_A_CLK] = &ipq806x_ebi1_a_clk,
  427. [RPM_SYS_FABRIC_CLK] = &ipq806x_sfab_clk,
  428. [RPM_SYS_FABRIC_A_CLK] = &ipq806x_sfab_a_clk,
  429. [RPM_SFPB_CLK] = &ipq806x_sfpb_clk,
  430. [RPM_SFPB_A_CLK] = &ipq806x_sfpb_a_clk,
  431. [RPM_NSS_FABRIC_0_CLK] = &ipq806x_nss_fabric_0_clk,
  432. [RPM_NSS_FABRIC_0_A_CLK] = &ipq806x_nss_fabric_0_a_clk,
  433. [RPM_NSS_FABRIC_1_CLK] = &ipq806x_nss_fabric_1_clk,
  434. [RPM_NSS_FABRIC_1_A_CLK] = &ipq806x_nss_fabric_1_a_clk,
  435. };
  436. static const struct rpm_clk_desc rpm_clk_ipq806x = {
  437. .clks = ipq806x_clks,
  438. .num_clks = ARRAY_SIZE(ipq806x_clks),
  439. };
  440. static const struct of_device_id rpm_clk_match_table[] = {
  441. { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
  442. { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
  443. { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
  444. { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x },
  445. { }
  446. };
  447. MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
  448. static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec,
  449. void *data)
  450. {
  451. struct rpm_cc *rcc = data;
  452. unsigned int idx = clkspec->args[0];
  453. if (idx >= rcc->num_clks) {
  454. pr_err("%s: invalid index %u\n", __func__, idx);
  455. return ERR_PTR(-EINVAL);
  456. }
  457. return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
  458. }
  459. static int rpm_clk_probe(struct platform_device *pdev)
  460. {
  461. struct rpm_cc *rcc;
  462. int ret;
  463. size_t num_clks, i;
  464. struct qcom_rpm *rpm;
  465. struct clk_rpm **rpm_clks;
  466. const struct rpm_clk_desc *desc;
  467. rpm = dev_get_drvdata(pdev->dev.parent);
  468. if (!rpm) {
  469. dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
  470. return -ENODEV;
  471. }
  472. desc = of_device_get_match_data(&pdev->dev);
  473. if (!desc)
  474. return -EINVAL;
  475. rpm_clks = desc->clks;
  476. num_clks = desc->num_clks;
  477. rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
  478. if (!rcc)
  479. return -ENOMEM;
  480. rcc->clks = rpm_clks;
  481. rcc->num_clks = num_clks;
  482. mutex_init(&rcc->xo_lock);
  483. for (i = 0; i < num_clks; i++) {
  484. if (!rpm_clks[i])
  485. continue;
  486. rpm_clks[i]->rpm = rpm;
  487. rpm_clks[i]->rpm_cc = rcc;
  488. ret = clk_rpm_handoff(rpm_clks[i]);
  489. if (ret)
  490. goto err;
  491. }
  492. for (i = 0; i < num_clks; i++) {
  493. if (!rpm_clks[i])
  494. continue;
  495. ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw);
  496. if (ret)
  497. goto err;
  498. }
  499. ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get,
  500. rcc);
  501. if (ret)
  502. goto err;
  503. return 0;
  504. err:
  505. dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
  506. return ret;
  507. }
  508. static int rpm_clk_remove(struct platform_device *pdev)
  509. {
  510. of_clk_del_provider(pdev->dev.of_node);
  511. return 0;
  512. }
  513. static struct platform_driver rpm_clk_driver = {
  514. .driver = {
  515. .name = "qcom-clk-rpm",
  516. .of_match_table = rpm_clk_match_table,
  517. },
  518. .probe = rpm_clk_probe,
  519. .remove = rpm_clk_remove,
  520. };
  521. static int __init rpm_clk_init(void)
  522. {
  523. return platform_driver_register(&rpm_clk_driver);
  524. }
  525. core_initcall(rpm_clk_init);
  526. static void __exit rpm_clk_exit(void)
  527. {
  528. platform_driver_unregister(&rpm_clk_driver);
  529. }
  530. module_exit(rpm_clk_exit);
  531. MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
  532. MODULE_LICENSE("GPL v2");
  533. MODULE_ALIAS("platform:qcom-clk-rpm");