bolero-clk-rsc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of_platform.h>
  7. #include <linux/module.h>
  8. #include <linux/io.h>
  9. #include <linux/init.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/kernel.h>
  12. #include <linux/clk.h>
  13. #include <linux/clk-provider.h>
  14. #include <linux/ratelimit.h>
  15. #include "bolero-cdc.h"
  16. #include "bolero-clk-rsc.h"
  17. #define DRV_NAME "bolero-clk-rsc"
  18. #define BOLERO_CLK_NAME_LENGTH 30
  19. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  20. static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
  21. "tx_core_clk",
  22. "rx_core_clk",
  23. "wsa_core_clk",
  24. "va_core_clk",
  25. "tx_npl_clk",
  26. "rx_npl_clk",
  27. "wsa_npl_clk",
  28. "va_npl_clk",
  29. };
  30. struct bolero_clk_rsc {
  31. struct device *dev;
  32. struct mutex rsc_clk_lock;
  33. struct mutex fs_gen_lock;
  34. struct clk *clk[MAX_CLK];
  35. int clk_cnt[MAX_CLK];
  36. int reg_seq_en_cnt;
  37. int va_tx_clk_cnt;
  38. bool dev_up;
  39. bool dev_up_gfmux;
  40. u32 num_fs_reg;
  41. u32 *fs_gen_seq;
  42. int default_clk_id[MAX_CLK];
  43. struct regmap *regmap;
  44. char __iomem *rx_clk_muxsel;
  45. char __iomem *wsa_clk_muxsel;
  46. char __iomem *va_clk_muxsel;
  47. };
  48. static int bolero_clk_rsc_cb(struct device *dev, u16 event)
  49. {
  50. struct bolero_clk_rsc *priv;
  51. if (!dev) {
  52. pr_err("%s: Invalid device pointer\n",
  53. __func__);
  54. return -EINVAL;
  55. }
  56. priv = dev_get_drvdata(dev);
  57. if (!priv) {
  58. pr_err("%s: Invalid clk rsc priviate data\n",
  59. __func__);
  60. return -EINVAL;
  61. }
  62. mutex_lock(&priv->rsc_clk_lock);
  63. if (event == BOLERO_MACRO_EVT_SSR_UP) {
  64. priv->dev_up = true;
  65. } else if (event == BOLERO_MACRO_EVT_SSR_DOWN) {
  66. priv->dev_up = false;
  67. priv->dev_up_gfmux = false;
  68. } else if (event == BOLERO_MACRO_EVT_SSR_GFMUX_UP) {
  69. priv->dev_up_gfmux = true;
  70. }
  71. mutex_unlock(&priv->rsc_clk_lock);
  72. return 0;
  73. }
  74. static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
  75. int clk_id)
  76. {
  77. switch (clk_id) {
  78. case RX_CORE_CLK:
  79. return priv->rx_clk_muxsel;
  80. case WSA_CORE_CLK:
  81. return priv->wsa_clk_muxsel;
  82. case VA_CORE_CLK:
  83. return priv->va_clk_muxsel;
  84. case TX_CORE_CLK:
  85. default:
  86. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  87. break;
  88. }
  89. return NULL;
  90. }
  91. int bolero_rsc_clk_reset(struct device *dev, int clk_id)
  92. {
  93. struct device *clk_dev = NULL;
  94. struct bolero_clk_rsc *priv = NULL;
  95. int count = 0;
  96. if (!dev) {
  97. pr_err("%s: dev is null\n", __func__);
  98. return -EINVAL;
  99. }
  100. if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
  101. pr_err("%s: Invalid clk_id: %d\n",
  102. __func__, clk_id);
  103. return -EINVAL;
  104. }
  105. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  106. if (!clk_dev) {
  107. pr_err("%s: Invalid rsc clk device\n", __func__);
  108. return -EINVAL;
  109. }
  110. priv = dev_get_drvdata(clk_dev);
  111. if (!priv) {
  112. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  113. return -EINVAL;
  114. }
  115. mutex_lock(&priv->rsc_clk_lock);
  116. while (__clk_is_enabled(priv->clk[clk_id])) {
  117. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  118. clk_disable_unprepare(priv->clk[clk_id]);
  119. count++;
  120. }
  121. dev_dbg(priv->dev,
  122. "%s: clock reset after ssr, count %d\n", __func__, count);
  123. trace_printk("%s: clock reset after ssr, count %d\n", __func__, count);
  124. while (count--) {
  125. clk_prepare_enable(priv->clk[clk_id]);
  126. clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
  127. }
  128. mutex_unlock(&priv->rsc_clk_lock);
  129. return 0;
  130. }
  131. EXPORT_SYMBOL(bolero_rsc_clk_reset);
  132. void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  133. {
  134. struct device *clk_dev = NULL;
  135. struct bolero_clk_rsc *priv = NULL;
  136. int i = 0;
  137. if (!dev) {
  138. pr_err("%s: dev is null\n", __func__);
  139. return;
  140. }
  141. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  142. if (!clk_dev) {
  143. pr_err("%s: Invalid rsc clk device\n", __func__);
  144. return;
  145. }
  146. priv = dev_get_drvdata(clk_dev);
  147. if (!priv) {
  148. pr_err("%s: Invalid rsc clk private data\n", __func__);
  149. return;
  150. }
  151. mutex_lock(&priv->rsc_clk_lock);
  152. for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
  153. if (enable) {
  154. if (priv->clk[i])
  155. clk_prepare_enable(priv->clk[i]);
  156. if (priv->clk[i + NPL_CLK_OFFSET])
  157. clk_prepare_enable(
  158. priv->clk[i + NPL_CLK_OFFSET]);
  159. } else {
  160. if (priv->clk[i + NPL_CLK_OFFSET])
  161. clk_disable_unprepare(
  162. priv->clk[i + NPL_CLK_OFFSET]);
  163. if (priv->clk[i])
  164. clk_disable_unprepare(priv->clk[i]);
  165. }
  166. }
  167. mutex_unlock(&priv->rsc_clk_lock);
  168. return;
  169. }
  170. EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
  171. static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
  172. int clk_id,
  173. bool enable)
  174. {
  175. int ret = 0;
  176. static DEFINE_RATELIMIT_STATE(rtl, 1 * HZ, 1);
  177. if (enable) {
  178. /* Enable Requested Core clk */
  179. if (priv->clk_cnt[clk_id] == 0) {
  180. ret = clk_prepare_enable(priv->clk[clk_id]);
  181. if (ret < 0) {
  182. if (__ratelimit(&rtl))
  183. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  184. __func__, clk_id);
  185. goto done;
  186. }
  187. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  188. ret = clk_prepare_enable(
  189. priv->clk[clk_id + NPL_CLK_OFFSET]);
  190. if (ret < 0) {
  191. if (__ratelimit(&rtl))
  192. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  193. __func__,
  194. clk_id + NPL_CLK_OFFSET);
  195. goto err;
  196. }
  197. }
  198. }
  199. priv->clk_cnt[clk_id]++;
  200. } else {
  201. if (priv->clk_cnt[clk_id] <= 0) {
  202. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  203. __func__, clk_id);
  204. priv->clk_cnt[clk_id] = 0;
  205. goto done;
  206. }
  207. priv->clk_cnt[clk_id]--;
  208. if (priv->clk_cnt[clk_id] == 0) {
  209. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  210. clk_disable_unprepare(
  211. priv->clk[clk_id + NPL_CLK_OFFSET]);
  212. clk_disable_unprepare(priv->clk[clk_id]);
  213. }
  214. }
  215. return ret;
  216. err:
  217. clk_disable_unprepare(priv->clk[clk_id]);
  218. done:
  219. return ret;
  220. }
  221. static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
  222. int clk_id,
  223. bool enable)
  224. {
  225. char __iomem *clk_muxsel = NULL;
  226. int ret = 0;
  227. int default_clk_id = priv->default_clk_id[clk_id];
  228. u32 muxsel = 0;
  229. static DEFINE_RATELIMIT_STATE(rtl, 1 * HZ, 1);
  230. clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
  231. if (!clk_muxsel) {
  232. ret = -EINVAL;
  233. goto done;
  234. }
  235. if (enable) {
  236. if (priv->clk_cnt[clk_id] == 0) {
  237. if (clk_id != VA_CORE_CLK) {
  238. ret = bolero_clk_rsc_mux0_clk_request(priv,
  239. default_clk_id,
  240. true);
  241. if (ret < 0)
  242. goto done;
  243. }
  244. ret = clk_prepare_enable(priv->clk[clk_id]);
  245. if (ret < 0) {
  246. if (__ratelimit(&rtl))
  247. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  248. __func__, clk_id);
  249. goto err_clk;
  250. }
  251. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  252. ret = clk_prepare_enable(
  253. priv->clk[clk_id + NPL_CLK_OFFSET]);
  254. if (ret < 0) {
  255. if (__ratelimit(&rtl))
  256. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  257. __func__,
  258. clk_id + NPL_CLK_OFFSET);
  259. goto err_npl_clk;
  260. }
  261. }
  262. /*
  263. * Temp SW workaround to address a glitch issue of
  264. * VA GFMux instance responsible for switching from
  265. * TX MCLK to VA MCLK. This configuration would be taken
  266. * care in DSP itself
  267. */
  268. if (clk_id != VA_CORE_CLK) {
  269. if (priv->dev_up_gfmux) {
  270. iowrite32(0x1, clk_muxsel);
  271. muxsel = ioread32(clk_muxsel);
  272. trace_printk("%s: muxsel value after enable: %d\n",
  273. __func__, muxsel);
  274. }
  275. bolero_clk_rsc_mux0_clk_request(priv,
  276. default_clk_id,
  277. false);
  278. }
  279. }
  280. priv->clk_cnt[clk_id]++;
  281. } else {
  282. if (priv->clk_cnt[clk_id] <= 0) {
  283. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  284. __func__, clk_id);
  285. priv->clk_cnt[clk_id] = 0;
  286. goto done;
  287. }
  288. priv->clk_cnt[clk_id]--;
  289. if (priv->clk_cnt[clk_id] == 0) {
  290. if (clk_id != VA_CORE_CLK) {
  291. ret = bolero_clk_rsc_mux0_clk_request(priv,
  292. default_clk_id, true);
  293. if (!ret) {
  294. /*
  295. * Temp SW workaround to address a glitch issue
  296. * of VA GFMux instance responsible for
  297. * switching from TX MCLK to VA MCLK.
  298. * This configuration would be taken
  299. * care in DSP itself.
  300. */
  301. if (priv->dev_up_gfmux) {
  302. iowrite32(0x0, clk_muxsel);
  303. muxsel = ioread32(clk_muxsel);
  304. trace_printk("%s: muxsel value after disable: %d\n",
  305. __func__, muxsel);
  306. }
  307. }
  308. }
  309. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  310. clk_disable_unprepare(
  311. priv->clk[clk_id + NPL_CLK_OFFSET]);
  312. clk_disable_unprepare(priv->clk[clk_id]);
  313. if (clk_id != VA_CORE_CLK) {
  314. if (!ret)
  315. bolero_clk_rsc_mux0_clk_request(priv,
  316. default_clk_id, false);
  317. }
  318. }
  319. }
  320. return ret;
  321. err_npl_clk:
  322. clk_disable_unprepare(priv->clk[clk_id]);
  323. err_clk:
  324. if (clk_id != VA_CORE_CLK)
  325. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  326. done:
  327. return ret;
  328. }
  329. static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
  330. bool mux_switch,
  331. int clk_id,
  332. bool enable)
  333. {
  334. int ret = 0;
  335. if (enable) {
  336. if (clk_id == VA_CORE_CLK && mux_switch) {
  337. /*
  338. * Handle the following usecase scenarios during enable
  339. * 1. VA only, Active clk is VA_CORE_CLK
  340. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  341. */
  342. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  343. ret = bolero_clk_rsc_mux1_clk_request(priv,
  344. VA_CORE_CLK, enable);
  345. if (ret < 0)
  346. goto err;
  347. } else {
  348. ret = bolero_clk_rsc_mux0_clk_request(priv,
  349. TX_CORE_CLK, enable);
  350. if (ret < 0)
  351. goto err;
  352. priv->va_tx_clk_cnt++;
  353. }
  354. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  355. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  356. /*
  357. * Handle following concurrency scenario during enable
  358. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  359. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  360. */
  361. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  362. ret = bolero_clk_rsc_mux0_clk_request(priv,
  363. TX_CORE_CLK, true);
  364. if (ret < 0)
  365. goto err;
  366. bolero_clk_rsc_mux1_clk_request(priv,
  367. VA_CORE_CLK, false);
  368. priv->va_tx_clk_cnt++;
  369. }
  370. }
  371. } else {
  372. if (clk_id == VA_CORE_CLK && mux_switch) {
  373. /*
  374. * Handle the following usecase scenarios during disable
  375. * 1. VA only, disable VA_CORE_CLK
  376. * 2. Record + VA -> Record, decrement TX CLK count
  377. */
  378. if (priv->clk_cnt[VA_CORE_CLK]) {
  379. bolero_clk_rsc_mux1_clk_request(priv,
  380. VA_CORE_CLK, enable);
  381. } else if (priv->va_tx_clk_cnt) {
  382. bolero_clk_rsc_mux0_clk_request(priv,
  383. TX_CORE_CLK, enable);
  384. priv->va_tx_clk_cnt--;
  385. }
  386. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  387. /*
  388. * Handle the following usecase scenarios during disable
  389. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  390. */
  391. while (priv->va_tx_clk_cnt) {
  392. ret = bolero_clk_rsc_mux1_clk_request(priv,
  393. VA_CORE_CLK, true);
  394. if (ret < 0)
  395. goto err;
  396. bolero_clk_rsc_mux0_clk_request(priv,
  397. TX_CORE_CLK, false);
  398. priv->va_tx_clk_cnt--;
  399. }
  400. }
  401. }
  402. err:
  403. return ret;
  404. }
  405. /**
  406. * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
  407. * sequence
  408. *
  409. * @dev: Macro device pointer
  410. * @enable: enable or disable flag
  411. */
  412. void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  413. {
  414. int i;
  415. struct regmap *regmap;
  416. struct device *clk_dev = NULL;
  417. struct bolero_clk_rsc *priv = NULL;
  418. if (!dev) {
  419. pr_err("%s: dev is null\n", __func__);
  420. return;
  421. }
  422. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  423. if (!clk_dev) {
  424. pr_err("%s: Invalid rsc clk device\n", __func__);
  425. return;
  426. }
  427. priv = dev_get_drvdata(clk_dev);
  428. if (!priv) {
  429. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  430. return;
  431. }
  432. regmap = dev_get_regmap(priv->dev->parent, NULL);
  433. if (!regmap) {
  434. pr_err("%s: regmap is null\n", __func__);
  435. return;
  436. }
  437. mutex_lock(&priv->fs_gen_lock);
  438. if (enable) {
  439. if (priv->reg_seq_en_cnt++ == 0) {
  440. for (i = 0; i < (priv->num_fs_reg * 3); i += 3) {
  441. dev_dbg(priv->dev, "%s: Register: %d, mask: %d, value %d\n",
  442. __func__, priv->fs_gen_seq[i],
  443. priv->fs_gen_seq[i + 1],
  444. priv->fs_gen_seq[i + 2]);
  445. regmap_update_bits(regmap,
  446. priv->fs_gen_seq[i],
  447. priv->fs_gen_seq[i + 1],
  448. priv->fs_gen_seq[i + 2]);
  449. }
  450. }
  451. } else {
  452. if (priv->reg_seq_en_cnt <= 0) {
  453. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  454. __func__, priv->reg_seq_en_cnt);
  455. priv->reg_seq_en_cnt = 0;
  456. mutex_unlock(&priv->fs_gen_lock);
  457. return;
  458. }
  459. if (--priv->reg_seq_en_cnt == 0) {
  460. for (i = ((priv->num_fs_reg - 1) * 3); i >= 0; i -= 3) {
  461. dev_dbg(priv->dev, "%s: Register: %d, mask: %d\n",
  462. __func__, priv->fs_gen_seq[i],
  463. priv->fs_gen_seq[i + 1]);
  464. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  465. priv->fs_gen_seq[i + 1], 0x0);
  466. }
  467. }
  468. }
  469. mutex_unlock(&priv->fs_gen_lock);
  470. }
  471. EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
  472. /**
  473. * bolero_clk_rsc_request_clock - request for clock to
  474. * enable/disable
  475. *
  476. * @dev: Macro device pointer.
  477. * @default_clk_id: mux0 Core clock ID input.
  478. * @clk_id_req: Core clock ID requested to enable/disable
  479. * @enable: enable or disable clock flag
  480. *
  481. * Returns 0 on success or -EINVAL on error.
  482. */
  483. int bolero_clk_rsc_request_clock(struct device *dev,
  484. int default_clk_id,
  485. int clk_id_req,
  486. bool enable)
  487. {
  488. int ret = 0;
  489. struct device *clk_dev = NULL;
  490. struct bolero_clk_rsc *priv = NULL;
  491. bool mux_switch = false;
  492. if (!dev) {
  493. pr_err("%s: dev is null\n", __func__);
  494. return -EINVAL;
  495. }
  496. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  497. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  498. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  499. __func__, clk_id_req, default_clk_id);
  500. return -EINVAL;
  501. }
  502. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  503. if (!clk_dev) {
  504. pr_err("%s: Invalid rsc clk device\n", __func__);
  505. return -EINVAL;
  506. }
  507. priv = dev_get_drvdata(clk_dev);
  508. if (!priv) {
  509. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  510. return -EINVAL;
  511. }
  512. mutex_lock(&priv->rsc_clk_lock);
  513. if (!priv->dev_up && enable) {
  514. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  515. __func__);
  516. trace_printk("%s: SSR is in progress..\n", __func__);
  517. ret = -EINVAL;
  518. goto err;
  519. }
  520. priv->default_clk_id[clk_id_req] = default_clk_id;
  521. if (default_clk_id != clk_id_req)
  522. mux_switch = true;
  523. if (mux_switch) {
  524. if (clk_id_req != VA_CORE_CLK) {
  525. ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
  526. enable);
  527. if (ret < 0)
  528. goto err;
  529. }
  530. } else {
  531. ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  532. if (ret < 0)
  533. goto err;
  534. }
  535. ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  536. clk_id_req,
  537. enable);
  538. if (ret < 0)
  539. goto err;
  540. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  541. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  542. enable);
  543. trace_printk("%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  544. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  545. enable);
  546. mutex_unlock(&priv->rsc_clk_lock);
  547. return 0;
  548. err:
  549. mutex_unlock(&priv->rsc_clk_lock);
  550. return ret;
  551. }
  552. EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
  553. static int bolero_clk_rsc_probe(struct platform_device *pdev)
  554. {
  555. int ret = 0, fs_gen_size, i, j;
  556. const char **clk_name_array;
  557. int clk_cnt;
  558. struct clk *clk;
  559. struct bolero_clk_rsc *priv = NULL;
  560. u32 muxsel = 0;
  561. priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
  562. GFP_KERNEL);
  563. if (!priv)
  564. return -ENOMEM;
  565. /* Get clk fs gen sequence from device tree */
  566. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  567. &fs_gen_size)) {
  568. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  569. __func__);
  570. ret = -EINVAL;
  571. goto err;
  572. }
  573. priv->num_fs_reg = fs_gen_size/(3 * sizeof(u32));
  574. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  575. if (!priv->fs_gen_seq) {
  576. ret = -ENOMEM;
  577. goto err;
  578. }
  579. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  580. /* Parse fs-gen-sequence */
  581. ret = of_property_read_u32_array(pdev->dev.of_node,
  582. "qcom,fs-gen-sequence",
  583. priv->fs_gen_seq,
  584. priv->num_fs_reg * 3);
  585. if (ret < 0) {
  586. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  587. __func__, ret);
  588. goto err;
  589. }
  590. /* Get clk details from device tree */
  591. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  592. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  593. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  594. __func__, clk_cnt);
  595. ret = -EINVAL;
  596. goto err;
  597. }
  598. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  599. GFP_KERNEL);
  600. if (!clk_name_array) {
  601. ret = -ENOMEM;
  602. goto err;
  603. }
  604. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  605. clk_name_array, clk_cnt);
  606. for (i = 0; i < MAX_CLK; i++) {
  607. priv->clk[i] = NULL;
  608. for (j = 0; j < clk_cnt; j++) {
  609. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  610. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  611. if (IS_ERR(clk)) {
  612. ret = PTR_ERR(clk);
  613. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  614. __func__, clk_src_name[i], ret);
  615. goto err;
  616. }
  617. priv->clk[i] = clk;
  618. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  619. __func__, clk_src_name[i]);
  620. }
  621. }
  622. }
  623. ret = of_property_read_u32(pdev->dev.of_node,
  624. "qcom,rx_mclk_mode_muxsel", &muxsel);
  625. if (ret) {
  626. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  627. __func__);
  628. } else {
  629. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  630. if (!priv->rx_clk_muxsel) {
  631. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  632. __func__);
  633. return -ENOMEM;
  634. }
  635. }
  636. ret = of_property_read_u32(pdev->dev.of_node,
  637. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  638. if (ret) {
  639. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  640. __func__);
  641. } else {
  642. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  643. if (!priv->wsa_clk_muxsel) {
  644. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  645. __func__);
  646. return -ENOMEM;
  647. }
  648. }
  649. ret = of_property_read_u32(pdev->dev.of_node,
  650. "qcom,va_mclk_mode_muxsel", &muxsel);
  651. if (ret) {
  652. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  653. __func__);
  654. } else {
  655. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  656. if (!priv->va_clk_muxsel) {
  657. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  658. __func__);
  659. return -ENOMEM;
  660. }
  661. }
  662. ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
  663. if (ret < 0) {
  664. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  665. __func__, ret);
  666. goto err;
  667. }
  668. priv->dev = &pdev->dev;
  669. priv->dev_up = true;
  670. priv->dev_up_gfmux = true;
  671. mutex_init(&priv->rsc_clk_lock);
  672. mutex_init(&priv->fs_gen_lock);
  673. dev_set_drvdata(&pdev->dev, priv);
  674. err:
  675. return ret;
  676. }
  677. static int bolero_clk_rsc_remove(struct platform_device *pdev)
  678. {
  679. struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  680. bolero_unregister_res_clk(&pdev->dev);
  681. of_platform_depopulate(&pdev->dev);
  682. if (!priv)
  683. return -EINVAL;
  684. mutex_destroy(&priv->rsc_clk_lock);
  685. mutex_destroy(&priv->fs_gen_lock);
  686. return 0;
  687. }
  688. static const struct of_device_id bolero_clk_rsc_dt_match[] = {
  689. {.compatible = "qcom,bolero-clk-rsc-mngr"},
  690. {}
  691. };
  692. MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
  693. static struct platform_driver bolero_clk_rsc_mgr = {
  694. .driver = {
  695. .name = "bolero-clk-rsc-mngr",
  696. .owner = THIS_MODULE,
  697. .of_match_table = bolero_clk_rsc_dt_match,
  698. .suppress_bind_attrs = true,
  699. },
  700. .probe = bolero_clk_rsc_probe,
  701. .remove = bolero_clk_rsc_remove,
  702. };
  703. int bolero_clk_rsc_mgr_init(void)
  704. {
  705. return platform_driver_register(&bolero_clk_rsc_mgr);
  706. }
  707. void bolero_clk_rsc_mgr_exit(void)
  708. {
  709. platform_driver_unregister(&bolero_clk_rsc_mgr);
  710. }
  711. MODULE_DESCRIPTION("Bolero clock resource manager driver");
  712. MODULE_LICENSE("GPL v2");