clk-cpu-sdxlemur.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/cpu.h>
  7. #include <linux/clk.h>
  8. #include <linux/clk-provider.h>
  9. #include <linux/io.h>
  10. #include <linux/module.h>
  11. #include <linux/pm_opp.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/regmap.h>
  14. #include <linux/slab.h>
  15. #include <dt-bindings/clock/qcom,apsscc-sdxlemur.h>
  16. #include "clk-alpha-pll.h"
  17. #include "clk-debug.h"
  18. #include "clk-rcg.h"
  19. #include "clk-regmap-divider.h"
  20. #include "clk-regmap-mux.h"
  21. #include "clk-regmap-mux-div.h"
  22. #include "common.h"
  23. #include "vdd-level-cpu.h"
  24. #include "clk-pll.h"
  25. #include "clk-regmap.h"
  26. #include "reset.h"
  27. #define to_clk_regmap_mux_div(_hw) \
  28. container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
  29. static DEFINE_VDD_REGULATORS(vdd_pll, VDD_NUM, 1, vdd_corner);
  30. static DEFINE_VDD_REGS_INIT(vdd_cpu, 1);
  31. enum apcs_mux_clk_parent {
  32. P_BI_TCXO,
  33. P_GPLL0,
  34. P_APCS_CPU_PLL,
  35. };
  36. static int cpucc_clk_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  37. unsigned long prate, u8 index)
  38. {
  39. struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
  40. return mux_div_set_src_div(cpuclk, cpuclk->parent_map[index].cfg,
  41. cpuclk->div);
  42. }
  43. static int cpucc_clk_set_parent(struct clk_hw *hw, u8 index)
  44. {
  45. /*
  46. * Since cpucc_clk_set_rate_and_parent() is defined and set_parent()
  47. * will never gets called from clk_change_rate() so return 0.
  48. */
  49. return 0;
  50. }
  51. static int cpucc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
  52. unsigned long prate)
  53. {
  54. struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
  55. /*
  56. * Parent is same as the last rate.
  57. * Here just configure new div.
  58. */
  59. return mux_div_set_src_div(cpuclk, cpuclk->src, cpuclk->div);
  60. }
  61. static unsigned long
  62. cpucc_calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
  63. {
  64. u64 tmp = rate;
  65. if (hid_div) {
  66. tmp *= 2;
  67. do_div(tmp, hid_div + 1);
  68. }
  69. if (mode) {
  70. tmp *= m;
  71. do_div(tmp, n);
  72. }
  73. return tmp;
  74. }
  75. static int cpucc_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  76. {
  77. struct clk_hw *xo_hw, *gpll0_hw, *apcs_parent;
  78. struct clk_rate_request parent_req = { };
  79. struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
  80. unsigned long rate = req->rate, rrate;
  81. u32 div;
  82. int ret;
  83. xo_hw = clk_hw_get_parent_by_index(hw, P_BI_TCXO);
  84. if (!xo_hw) {
  85. pr_err("Can't find parent for index %u\n", P_BI_TCXO);
  86. return -EINVAL;
  87. }
  88. if (rate == clk_hw_get_rate(xo_hw)) {
  89. req->best_parent_hw = xo_hw;
  90. req->best_parent_rate = rate;
  91. cpuclk->div = 1;
  92. cpuclk->src = cpuclk->parent_map[P_BI_TCXO].cfg;
  93. return 0;
  94. }
  95. gpll0_hw = clk_hw_get_parent_by_index(hw, P_GPLL0);
  96. if (!gpll0_hw) {
  97. pr_err("Can't find parent for index %u\n", P_GPLL0);
  98. return -EINVAL;
  99. }
  100. div = DIV_ROUND_UP((2 * (clk_hw_get_rate(gpll0_hw))), rate) - 1;
  101. rrate = cpucc_calc_rate(clk_hw_get_rate(gpll0_hw), 0, 0, 0, div);
  102. /* Use the GPLL0 source */
  103. if (rate <= rrate) {
  104. parent_req.best_parent_hw = gpll0_hw;
  105. req->best_parent_hw = gpll0_hw;
  106. req->best_parent_rate = clk_hw_get_rate(gpll0_hw);
  107. req->rate = rrate;
  108. cpuclk->src = cpuclk->parent_map[P_GPLL0].cfg;
  109. } else { /* Use the APCS PLL source */
  110. parent_req.rate = req->rate;
  111. parent_req.best_parent_hw = clk_hw_get_parent_by_index(hw,
  112. P_APCS_CPU_PLL);
  113. req->best_parent_hw = parent_req.best_parent_hw;
  114. apcs_parent = clk_hw_get_parent(req->best_parent_hw);
  115. parent_req.best_parent_rate = clk_hw_get_rate(apcs_parent);
  116. ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
  117. if (ret)
  118. return ret;
  119. req->best_parent_rate = parent_req.rate;
  120. cpuclk->src = cpuclk->parent_map[P_APCS_CPU_PLL].cfg;
  121. div = 1;
  122. }
  123. cpuclk->div = div;
  124. return 0;
  125. }
  126. static void cpucc_clk_list_registers(struct seq_file *f, struct clk_hw *hw)
  127. {
  128. struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
  129. int i = 0, size = 0, val;
  130. static struct clk_register_data data[] = {
  131. {"CMD_RCGR", 0x0},
  132. {"CFG_RCGR", 0x4},
  133. };
  134. size = ARRAY_SIZE(data);
  135. for (i = 0; i < size; i++) {
  136. regmap_read(cpuclk->clkr.regmap,
  137. cpuclk->reg_offset + data[i].offset, &val);
  138. seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
  139. }
  140. }
  141. static struct clk_regmap_ops clk_rcg2_regmap_ops = {
  142. .list_registers = cpucc_clk_list_registers,
  143. };
  144. static int clk_cpu_init(struct clk_hw *hw)
  145. {
  146. struct clk_regmap *rclk = to_clk_regmap(hw);
  147. if (!rclk->ops)
  148. rclk->ops = &clk_rcg2_regmap_ops;
  149. return 0;
  150. }
  151. static unsigned long cpucc_clk_recalc_rate(struct clk_hw *hw,
  152. unsigned long prate)
  153. {
  154. struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
  155. struct clk_hw *parent;
  156. const char *name = clk_hw_get_name(hw);
  157. unsigned long parent_rate;
  158. u32 i, div, src = 0;
  159. u32 num_parents = clk_hw_get_num_parents(hw);
  160. int ret;
  161. ret = mux_div_get_src_div(cpuclk, &src, &div);
  162. if (ret)
  163. return ret;
  164. cpuclk->src = src;
  165. cpuclk->div = div;
  166. for (i = 0; i < num_parents; i++) {
  167. if (src == cpuclk->parent_map[i].cfg) {
  168. parent = clk_hw_get_parent_by_index(hw, i);
  169. if (!parent) {
  170. pr_err("Can't find parent for index %u\n", i);
  171. return -EINVAL;
  172. }
  173. parent_rate = clk_hw_get_rate(parent);
  174. return cpucc_calc_rate(parent_rate, 0, 0, 0, div);
  175. }
  176. }
  177. pr_err("%s: Can't find parent %d\n", name, src);
  178. return 0;
  179. }
  180. static int cpucc_clk_enable(struct clk_hw *hw)
  181. {
  182. return clk_regmap_mux_div_ops.enable(hw);
  183. }
  184. static void cpucc_clk_disable(struct clk_hw *hw)
  185. {
  186. clk_regmap_mux_div_ops.disable(hw);
  187. }
  188. static u8 cpucc_clk_get_parent(struct clk_hw *hw)
  189. {
  190. return clk_regmap_mux_div_ops.get_parent(hw);
  191. }
  192. /*
  193. * We use the notifier function for switching to a temporary safe configuration
  194. * (mux and divider), while the APSS pll is reconfigured.
  195. */
  196. static int cpucc_notifier_cb(struct notifier_block *nb, unsigned long event,
  197. void *data)
  198. {
  199. struct clk_regmap_mux_div *cpuclk = container_of(nb,
  200. struct clk_regmap_mux_div, clk_nb);
  201. int ret = 0;
  202. if (event == PRE_RATE_CHANGE)
  203. /* set the mux to safe source(gpll0) & div */
  204. ret = mux_div_set_src_div(cpuclk, cpuclk->safe_src, 1);
  205. if (event == ABORT_RATE_CHANGE)
  206. pr_err("Error in configuring PLL - stay at safe src only\n");
  207. return notifier_from_errno(ret);
  208. }
  209. static const struct clk_ops cpucc_clk_ops = {
  210. .prepare = clk_prepare_regmap,
  211. .unprepare = clk_unprepare_regmap,
  212. .enable = cpucc_clk_enable,
  213. .disable = cpucc_clk_disable,
  214. .pre_rate_change = clk_pre_change_regmap,
  215. .post_rate_change = clk_post_change_regmap,
  216. .get_parent = cpucc_clk_get_parent,
  217. .set_rate = cpucc_clk_set_rate,
  218. .set_parent = cpucc_clk_set_parent,
  219. .set_rate_and_parent = cpucc_clk_set_rate_and_parent,
  220. .determine_rate = cpucc_clk_determine_rate,
  221. .recalc_rate = cpucc_clk_recalc_rate,
  222. .debug_init = clk_common_debug_init,
  223. .init = clk_cpu_init,
  224. };
  225. static struct pll_vco lucid_5lpe_vco[] = {
  226. { 249600000, 2000000000, 0 },
  227. };
  228. static struct pll_vco alpha_pll_vco[] = {
  229. { 700000000, 1400000000, 0 },
  230. };
  231. /* Initial configuration for 1094.4 */
  232. static const struct alpha_pll_config apcs_cpu_pll_config = {
  233. .l = 0x4E,
  234. .cal_l = 0x4E,
  235. .alpha = 0x0,
  236. .config_ctl_val = 0x2A9A699C,
  237. .config_ctl_hi_val = 0x00002261,
  238. .config_ctl_hi1_val = 0x20485699,
  239. .test_ctl_val = 0x00000000,
  240. .test_ctl_hi_val = 0x00000000,
  241. .test_ctl_hi1_val = 0x01800000,
  242. .user_ctl_val = 0x00000001,
  243. .user_ctl_hi_val = 0x00000805,
  244. .user_ctl_hi1_val = 0x00000000,
  245. };
  246. /* Initial configuration for 1190.4 MHz */
  247. static struct alpha_pll_config apcs_cpu_alpha_pll_config = {
  248. .l = 0x3E,
  249. .config_ctl_val = 0x4001055b,
  250. .test_ctl_hi_val = 0x0,
  251. .vco_val = BIT(20),
  252. .vco_mask = 0x3 << 20,
  253. .main_output_mask = BIT(0),
  254. };
  255. static struct clk_init_data apcs_cpu_pll_mdm9607 = {
  256. .name = "apcs_cpu_pll",
  257. .parent_data = &(const struct clk_parent_data){
  258. .fw_name = "bi_tcxo_ao",
  259. },
  260. .num_parents = 1,
  261. .ops = &clk_alpha_pll_ops,
  262. };
  263. static struct clk_alpha_pll apcs_cpu_pll = {
  264. .offset = 0x0,
  265. .vco_table = lucid_5lpe_vco,
  266. .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
  267. .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_5LPE],
  268. .flags = BYPASS_LATCH,
  269. .clkr = {
  270. .hw.init = &(struct clk_init_data){
  271. .name = "apcs_cpu_pll",
  272. .parent_data = &(const struct clk_parent_data){
  273. .fw_name = "bi_tcxo_ao",
  274. },
  275. .num_parents = 1,
  276. .ops = &clk_alpha_pll_lucid_5lpe_ops,
  277. },
  278. .vdd_data = {
  279. .vdd_class = &vdd_pll,
  280. .num_rate_max = VDD_NUM,
  281. .rate_max = (unsigned long[VDD_NUM]) {
  282. [VDD_MIN] = 615000000,
  283. [VDD_LOW] = 1066000000,
  284. [VDD_LOW_L1] = 1500000000,
  285. [VDD_NOMINAL] = 1750000000,
  286. [VDD_HIGH] = 1804800000},
  287. },
  288. },
  289. };
  290. /* Initial configuration for 345.6MHz */
  291. static const struct parent_map apcs_mux_clk_parent_map[] = {
  292. { P_BI_TCXO, 0 },
  293. { P_GPLL0, 1 },
  294. { P_APCS_CPU_PLL, 5 },
  295. };
  296. static const struct clk_parent_data apss_cc_parent_data[] = {
  297. { .fw_name = "bi_tcxo_ao" },
  298. { .fw_name = "gpll0_out_even" },
  299. { .hw = &apcs_cpu_pll.clkr.hw },
  300. };
  301. static struct clk_regmap_mux_div apcs_mux_clk = {
  302. .reg_offset = 0x0,
  303. .hid_width = 5,
  304. .hid_shift = 0,
  305. .src_width = 3,
  306. .src_shift = 8,
  307. .parent_map = apcs_mux_clk_parent_map,
  308. .clkr.hw.init = &(struct clk_init_data) {
  309. .name = "apcs_mux_clk",
  310. .parent_data = apss_cc_parent_data,
  311. .num_parents = 3,
  312. .flags = CLK_SET_RATE_PARENT,
  313. .ops = &cpucc_clk_ops,
  314. },
  315. .clkr.vdd_data = {
  316. .vdd_class = &vdd_cpu,
  317. },
  318. };
  319. static const struct of_device_id match_table[] = {
  320. { .compatible = "qcom,mdm9607-apsscc" },
  321. {}
  322. };
  323. static struct regmap_config cpu_regmap_config = {
  324. .reg_bits = 32,
  325. .reg_stride = 4,
  326. .val_bits = 32,
  327. .fast_io = true,
  328. };
  329. static struct clk_hw *cpu_clks_hws[] = {
  330. [APCS_CPU_PLL] = &apcs_cpu_pll.clkr.hw,
  331. [APCS_MUX_CLK] = &apcs_mux_clk.clkr.hw,
  332. };
  333. static void cpucc_clk_get_speed_bin(struct platform_device *pdev, int *bin,
  334. int *version)
  335. {
  336. struct resource *res;
  337. u32 pte_efuse, valid;
  338. void __iomem *base;
  339. bool is_mdm9607;
  340. *bin = 0;
  341. *version = 0;
  342. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
  343. if (!res) {
  344. dev_info(&pdev->dev,
  345. "No speed/PVS binning available. Defaulting to 0!\n");
  346. return;
  347. }
  348. base = ioremap(res->start, resource_size(res));
  349. if (!base) {
  350. dev_info(&pdev->dev,
  351. "Unable to read efuse data. Defaulting to 0!\n");
  352. return;
  353. }
  354. pte_efuse = readl_relaxed(base);
  355. iounmap(base);
  356. is_mdm9607 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdm9607-apsscc");
  357. if (is_mdm9607) {
  358. *bin = (pte_efuse >> 0x2) & 0x7;
  359. dev_info(&pdev->dev, "PVS version: %d bin: %d\n", *version, *bin);
  360. return;
  361. }
  362. *bin = pte_efuse & 0x7;
  363. valid = ((pte_efuse >> 3) & 0x1) ? ((pte_efuse >> 3) & 0x1) : 0;
  364. *version = (pte_efuse >> 4) & 0x3;
  365. dev_info(&pdev->dev, "PVS version: %d bin: %d\n", *version, *bin);
  366. }
  367. static int cpucc_clk_get_fmax_vdd_class(struct platform_device *pdev,
  368. struct clk_vdd_class_data *clk_intd, char *prop_name)
  369. {
  370. struct device_node *of = pdev->dev.of_node;
  371. struct clk_vdd_class *vdd = clk_intd->vdd_class;
  372. int prop_len, i, j, ret;
  373. int num = vdd->num_regulators + 1;
  374. u32 *array;
  375. if (!of_find_property(of, prop_name, &prop_len)) {
  376. dev_err(&pdev->dev, "missing %s\n", prop_name);
  377. return -EINVAL;
  378. }
  379. prop_len /= sizeof(u32);
  380. if (prop_len % num) {
  381. dev_err(&pdev->dev, "bad length %d\n", prop_len);
  382. return -EINVAL;
  383. }
  384. prop_len /= num;
  385. vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
  386. GFP_KERNEL);
  387. if (!vdd->level_votes)
  388. return -ENOMEM;
  389. vdd->vdd_uv = devm_kzalloc(&pdev->dev,
  390. prop_len * sizeof(int) * (num - 1), GFP_KERNEL);
  391. if (!vdd->vdd_uv)
  392. return -ENOMEM;
  393. clk_intd->rate_max = devm_kzalloc(&pdev->dev,
  394. prop_len * sizeof(unsigned long), GFP_KERNEL);
  395. if (!clk_intd->rate_max)
  396. return -ENOMEM;
  397. array = kmalloc_array(prop_len * num, sizeof(u32), GFP_KERNEL);
  398. if (!array)
  399. return -ENOMEM;
  400. ret = of_property_read_u32_array(of, prop_name, array, prop_len * num);
  401. if (ret)
  402. return -ENOMEM;
  403. for (i = 0; i < prop_len; i++) {
  404. clk_intd->rate_max[i] = array[num * i];
  405. for (j = 1; j < num; j++) {
  406. vdd->vdd_uv[(num - 1) * i + (j - 1)] =
  407. array[num * i + j];
  408. }
  409. }
  410. kfree(array);
  411. vdd->num_levels = prop_len;
  412. vdd->cur_level = prop_len;
  413. clk_intd->num_rate_max = prop_len;
  414. return 0;
  415. }
  416. /*
  417. * Find the voltage level required for a given clock rate.
  418. */
  419. static int find_vdd_level(struct clk_vdd_class_data *clk_vdd_data,
  420. unsigned long rate)
  421. {
  422. int level;
  423. for (level = 0; level < clk_vdd_data->num_rate_max; level++)
  424. if (rate <= clk_vdd_data->rate_max[level])
  425. break;
  426. if (level == clk_vdd_data->num_rate_max) {
  427. pr_err("Rate %lu is greater than highest Fmax\n", rate);
  428. return -EINVAL;
  429. }
  430. return level;
  431. }
  432. static int cpucc_clk_add_opp(struct clk_regmap *clkr, struct device *dev,
  433. unsigned long max_rate)
  434. {
  435. struct clk_vdd_class_data *clk_vdd_data = &clkr->vdd_data;
  436. struct clk_vdd_class *vdd = clk_vdd_data->vdd_class;
  437. unsigned long rate = 0;
  438. long ret;
  439. int level, uv, j = 1;
  440. if (IS_ERR_OR_NULL(dev)) {
  441. pr_err("%s: Invalid parameters\n", __func__);
  442. return -EINVAL;
  443. }
  444. while (1) {
  445. rate = clk_vdd_data->rate_max[j++];
  446. level = find_vdd_level(clk_vdd_data, rate);
  447. if (level <= 0) {
  448. pr_warn("clock-cpu: no corner for %lu.\n", rate);
  449. return -EINVAL;
  450. }
  451. uv = vdd->vdd_uv[level];
  452. if (uv < 0) {
  453. pr_warn("clock-cpu: no uv for %lu.\n", rate);
  454. return -EINVAL;
  455. }
  456. ret = dev_pm_opp_add(dev, rate, uv);
  457. if (ret) {
  458. pr_warn("clock-cpu: failed to add OPP for %lu\n", rate);
  459. return rate;
  460. }
  461. if (rate >= max_rate)
  462. break;
  463. }
  464. return 0;
  465. }
  466. static void cpucc_clk_print_opp_table(int cpu)
  467. {
  468. struct dev_pm_opp *oppfmax, *oppfmin;
  469. unsigned long apc_fmax, apc_fmin;
  470. u32 max_cpuss_index = apcs_mux_clk.clkr.vdd_data.num_rate_max;
  471. apc_fmax = apcs_mux_clk.clkr.vdd_data.rate_max[max_cpuss_index - 1];
  472. apc_fmin = apcs_mux_clk.clkr.vdd_data.rate_max[1];
  473. oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(cpu),
  474. apc_fmax, true);
  475. oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(cpu),
  476. apc_fmin, true);
  477. pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", cpu, apc_fmin,
  478. dev_pm_opp_get_voltage(oppfmin));
  479. pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", cpu, apc_fmax,
  480. dev_pm_opp_get_voltage(oppfmax));
  481. }
  482. static void cpucc_clk_populate_opp_table(struct platform_device *pdev)
  483. {
  484. unsigned long apc_fmax;
  485. int cpu, final_cpu = 0;
  486. u32 max_cpuss_index = apcs_mux_clk.clkr.vdd_data.num_rate_max;
  487. apc_fmax = apcs_mux_clk.clkr.vdd_data.rate_max[max_cpuss_index - 1];
  488. for_each_possible_cpu(cpu) {
  489. final_cpu = cpu;
  490. WARN(cpucc_clk_add_opp(&apcs_mux_clk.clkr,
  491. get_cpu_device(cpu), apc_fmax),
  492. "Failed to add OPP levels for apcs_mux_clk\n");
  493. }
  494. cpucc_clk_print_opp_table(final_cpu);
  495. }
  496. static void cpucc_mdm9607_fixup(void)
  497. {
  498. apcs_cpu_pll.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT];
  499. apcs_cpu_pll.vco_table = alpha_pll_vco;
  500. apcs_cpu_pll.num_vco = sizeof(alpha_pll_vco);
  501. apcs_cpu_pll.clkr.hw.init = &apcs_cpu_pll_mdm9607;
  502. apcs_cpu_pll.clkr.vdd_data.rate_max[VDD_MIN] = 0;
  503. apcs_cpu_pll.clkr.vdd_data.rate_max[VDD_LOW] = 1400000000;
  504. apcs_cpu_pll.clkr.vdd_data.rate_max[VDD_LOW_L1] = 0;
  505. apcs_cpu_pll.clkr.vdd_data.rate_max[VDD_NOMINAL] = 0;
  506. /* GPLL0 as safe source Index */
  507. apcs_mux_clk.safe_src = 1;
  508. apcs_mux_clk.safe_div = 1;
  509. apcs_mux_clk.clk_nb.notifier_call = cpucc_notifier_cb;
  510. apcs_cpu_alpha_pll_config.l = 0x34;
  511. apcs_cpu_alpha_pll_config.vco_val = 0;
  512. apcs_cpu_alpha_pll_config.main_output_mask = 0;
  513. }
  514. static int cpucc_get_and_parse_dt_resource(struct platform_device *pdev,
  515. unsigned long *xo_rate)
  516. {
  517. struct resource *res;
  518. struct device *dev = &pdev->dev;
  519. struct clk *clk;
  520. int ret, speed_bin, version;
  521. char prop_name[] = "qcom,speedX-bin-vX";
  522. void __iomem *base;
  523. struct regmap *map;
  524. /* Require the RPM-XO clock to be registered before */
  525. clk = clk_get(dev, "bi_tcxo_ao");
  526. if (IS_ERR(clk)) {
  527. if (PTR_ERR(clk) != -EPROBE_DEFER)
  528. dev_err(dev, "Unable to get xo clock\n");
  529. return PTR_ERR(clk);
  530. }
  531. *xo_rate = clk_get_rate(clk);
  532. if (!*xo_rate)
  533. *xo_rate = 19200000;
  534. clk_put(clk);
  535. /* Require the GPLL0_OUT_EVEN clock to be registered before */
  536. clk = clk_get(dev, "gpll0_out_even");
  537. if (IS_ERR(clk)) {
  538. if (PTR_ERR(clk) != -EPROBE_DEFER)
  539. dev_err(dev, "Unable to get GPLL0 clock\n");
  540. return PTR_ERR(clk);
  541. }
  542. clk_put(clk);
  543. /* Rail Regulator for apcs_cpu_pll & cpuss mux*/
  544. vdd_pll.regulator[0] = devm_regulator_get(&pdev->dev, "vdd-pll");
  545. if (IS_ERR(vdd_pll.regulator[0])) {
  546. if (!(PTR_ERR(vdd_pll.regulator[0]) == -EPROBE_DEFER))
  547. dev_err(&pdev->dev,
  548. "Unable to get vdd_pll regulator\n");
  549. return PTR_ERR(vdd_pll.regulator[0]);
  550. }
  551. vdd_cpu.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd");
  552. if (IS_ERR(vdd_cpu.regulator[0])) {
  553. if (!(PTR_ERR(vdd_cpu.regulator[0]) == -EPROBE_DEFER))
  554. dev_err(&pdev->dev,
  555. "Unable to get cpu-vdd regulator\n");
  556. return PTR_ERR(vdd_cpu.regulator[0]);
  557. }
  558. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_pll");
  559. base = devm_ioremap_resource(dev, res);
  560. if (IS_ERR(base)) {
  561. dev_err(&pdev->dev, "Failed to map apcs_cpu_pll register base\n");
  562. return PTR_ERR(base);
  563. }
  564. cpu_regmap_config.name = "apcs_pll";
  565. map = devm_regmap_init_mmio(dev, base, &cpu_regmap_config);
  566. if (IS_ERR(map)) {
  567. dev_err(&pdev->dev, "Couldn't get regmap for apcs_cpu_pll\n");
  568. return PTR_ERR(map);
  569. }
  570. apcs_cpu_pll.clkr.regmap = map;
  571. apcs_cpu_pll.clkr.dev = &pdev->dev;
  572. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_cmd");
  573. base = devm_ioremap_resource(dev, res);
  574. if (IS_ERR(base)) {
  575. dev_err(&pdev->dev, "Failed to map apcs_cmd register base\n");
  576. return PTR_ERR(base);
  577. }
  578. cpu_regmap_config.name = "apcs_cmd";
  579. apcs_mux_clk.clkr.regmap = devm_regmap_init_mmio(dev, base,
  580. &cpu_regmap_config);
  581. if (IS_ERR(apcs_mux_clk.clkr.regmap)) {
  582. dev_err(&pdev->dev, "Couldn't get regmap for apcs_cmd\n");
  583. return PTR_ERR(apcs_mux_clk.clkr.regmap);
  584. }
  585. apcs_mux_clk.clkr.dev = &pdev->dev;
  586. /* Get speed bin information */
  587. cpucc_clk_get_speed_bin(pdev, &speed_bin, &version);
  588. snprintf(prop_name, ARRAY_SIZE(prop_name),
  589. "qcom,speed%d-bin-v%d", speed_bin, version);
  590. ret = cpucc_clk_get_fmax_vdd_class(pdev,
  591. (struct clk_vdd_class_data *)&apcs_mux_clk.clkr.vdd_data, prop_name);
  592. if (ret) {
  593. dev_err(&pdev->dev,
  594. "Can't get speed bin for apcs_mux_clk. Falling back to zero\n");
  595. ret = cpucc_clk_get_fmax_vdd_class(pdev,
  596. &apcs_mux_clk.clkr.vdd_data, "qcom,speed0-bin-v0");
  597. if (ret) {
  598. dev_err(&pdev->dev,
  599. "Unable to get speed bin for apcs_mux_clk freq-corner mapping info\n");
  600. return ret;
  601. }
  602. }
  603. return 0;
  604. }
  605. static int cpucc_driver_probe(struct platform_device *pdev)
  606. {
  607. struct clk_hw_onecell_data *data;
  608. struct device *dev = &pdev->dev;
  609. int i, ret, cpu;
  610. bool is_mdm9607;
  611. unsigned long xo_rate;
  612. u32 l_val;
  613. ret = cpucc_get_and_parse_dt_resource(pdev, &xo_rate);
  614. if (ret < 0)
  615. return ret;
  616. is_mdm9607 = of_device_is_compatible(pdev->dev.of_node,
  617. "qcom,mdm9607-apsscc");
  618. if (is_mdm9607) {
  619. cpucc_mdm9607_fixup();
  620. l_val = apcs_cpu_alpha_pll_config.l;
  621. } else {
  622. l_val = apcs_cpu_pll_config.l;
  623. clk_lucid_5lpe_pll_configure(&apcs_cpu_pll, apcs_cpu_pll.clkr.regmap,
  624. &apcs_cpu_pll_config);
  625. }
  626. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  627. if (!data)
  628. return -ENOMEM;
  629. data->num = ARRAY_SIZE(cpu_clks_hws);
  630. /* Register clocks with clock framework */
  631. for (i = 0; i < ARRAY_SIZE(cpu_clks_hws); i++) {
  632. ret = devm_clk_hw_register(dev, cpu_clks_hws[i]);
  633. if (ret) {
  634. dev_err(&pdev->dev, "Failed to register clock\n");
  635. return ret;
  636. }
  637. data->hws[i] = cpu_clks_hws[i];
  638. devm_clk_regmap_list_node(dev, to_clk_regmap(cpu_clks_hws[i]));
  639. }
  640. ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, data);
  641. if (ret) {
  642. dev_err(&pdev->dev, "CPU clock driver registration failed\n");
  643. return ret;
  644. }
  645. if (is_mdm9607) {
  646. ret = clk_notifier_register(apcs_mux_clk.clkr.hw.clk,
  647. &apcs_mux_clk.clk_nb);
  648. if (ret) {
  649. dev_err(&pdev->dev,
  650. "failed to register clock notifier: %d\n", ret);
  651. return ret;
  652. }
  653. }
  654. /* Set to boot frequency */
  655. ret = clk_set_rate(apcs_mux_clk.clkr.hw.clk, l_val * xo_rate);
  656. if (ret) {
  657. dev_err(&pdev->dev, "Unable to set init rate on apcs_mux_clk\n");
  658. return ret;
  659. }
  660. /*
  661. * We don't want the CPU clocks to be turned off at late init
  662. * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
  663. * refcount of these clocks. Any cpufreq/hotplug manager can assume
  664. * that the clocks have already been prepared and enabled by the time
  665. * they take over.
  666. */
  667. cpus_read_lock();
  668. for_each_online_cpu(cpu)
  669. WARN(clk_prepare_enable(apcs_mux_clk.clkr.hw.clk),
  670. "Unable to turn on CPU clock\n");
  671. cpus_read_unlock();
  672. cpucc_clk_populate_opp_table(pdev);
  673. dev_info(dev, "CPU clock Driver probed successfully\n");
  674. return 0;
  675. }
  676. static struct platform_driver cpu_clk_driver = {
  677. .probe = cpucc_driver_probe,
  678. .driver = {
  679. .name = "qcom-cpu-sdxlemur",
  680. .of_match_table = match_table,
  681. },
  682. };
  683. static int __init cpu_clk_init(void)
  684. {
  685. return platform_driver_register(&cpu_clk_driver);
  686. }
  687. subsys_initcall(cpu_clk_init);
  688. static void __exit cpu_clk_exit(void)
  689. {
  690. platform_driver_unregister(&cpu_clk_driver);
  691. }
  692. module_exit(cpu_clk_exit);
  693. MODULE_DESCRIPTION("SDXLEMUR CPU clock Driver");
  694. MODULE_LICENSE("GPL");