bolero-clk-rsc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/module.h>
  7. #include <linux/io.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/kernel.h>
  11. #include <linux/clk.h>
  12. #include <linux/clk-provider.h>
  13. #include "bolero-cdc.h"
  14. #include "bolero-clk-rsc.h"
  15. #define DRV_NAME "bolero-clk-rsc"
  16. #define BOLERO_CLK_NAME_LENGTH 30
  17. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  18. static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
  19. "tx_core_clk",
  20. "rx_core_clk",
  21. "wsa_core_clk",
  22. "va_core_clk",
  23. "tx_npl_clk",
  24. "rx_npl_clk",
  25. "wsa_npl_clk",
  26. "va_npl_clk",
  27. };
  28. struct bolero_clk_rsc {
  29. struct device *dev;
  30. struct mutex rsc_clk_lock;
  31. struct clk *clk[MAX_CLK];
  32. int clk_cnt[MAX_CLK];
  33. int reg_seq_en_cnt;
  34. int va_tx_clk_cnt;
  35. bool dev_up;
  36. u32 num_fs_reg;
  37. u32 *fs_gen_seq;
  38. int default_clk_id[MAX_CLK];
  39. struct regmap *regmap;
  40. char __iomem *rx_clk_muxsel;
  41. char __iomem *wsa_clk_muxsel;
  42. char __iomem *va_clk_muxsel;
  43. };
  44. static int bolero_clk_rsc_cb(struct device *dev, u16 event)
  45. {
  46. struct bolero_clk_rsc *priv;
  47. if (!dev) {
  48. pr_err("%s: Invalid device pointer\n",
  49. __func__);
  50. return -EINVAL;
  51. }
  52. priv = dev_get_drvdata(dev);
  53. if (!priv) {
  54. pr_err("%s: Invalid clk rsc priviate data\n",
  55. __func__);
  56. return -EINVAL;
  57. }
  58. mutex_lock(&priv->rsc_clk_lock);
  59. if (event == BOLERO_MACRO_EVT_SSR_UP)
  60. priv->dev_up = true;
  61. else if (event == BOLERO_MACRO_EVT_SSR_DOWN)
  62. priv->dev_up = false;
  63. mutex_unlock(&priv->rsc_clk_lock);
  64. return 0;
  65. }
  66. static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
  67. int clk_id)
  68. {
  69. switch (clk_id) {
  70. case RX_CORE_CLK:
  71. return priv->rx_clk_muxsel;
  72. case WSA_CORE_CLK:
  73. return priv->wsa_clk_muxsel;
  74. case VA_CORE_CLK:
  75. return priv->va_clk_muxsel;
  76. case TX_CORE_CLK:
  77. default:
  78. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  79. break;
  80. }
  81. return NULL;
  82. }
  83. int bolero_rsc_clk_reset(struct device *dev, int clk_id)
  84. {
  85. struct device *clk_dev = NULL;
  86. struct bolero_clk_rsc *priv = NULL;
  87. int count = 0;
  88. if (!dev) {
  89. pr_err("%s: dev is null %d\n", __func__);
  90. return -EINVAL;
  91. }
  92. if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
  93. pr_err("%s: Invalid clk_id: %d\n",
  94. __func__, clk_id);
  95. return -EINVAL;
  96. }
  97. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  98. if (!clk_dev) {
  99. pr_err("%s: Invalid rsc clk device\n", __func__);
  100. return -EINVAL;
  101. }
  102. priv = dev_get_drvdata(clk_dev);
  103. if (!priv) {
  104. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  105. return -EINVAL;
  106. }
  107. mutex_lock(&priv->rsc_clk_lock);
  108. while (__clk_is_enabled(priv->clk[clk_id])) {
  109. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  110. clk_disable_unprepare(priv->clk[clk_id]);
  111. count++;
  112. }
  113. dev_dbg(priv->dev,
  114. "%s: clock reset after ssr, count %d\n", __func__, count);
  115. while (count--) {
  116. clk_prepare_enable(priv->clk[clk_id]);
  117. clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
  118. }
  119. mutex_unlock(&priv->rsc_clk_lock);
  120. return 0;
  121. }
  122. EXPORT_SYMBOL(bolero_rsc_clk_reset);
  123. void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  124. {
  125. struct device *clk_dev = NULL;
  126. struct bolero_clk_rsc *priv = NULL;
  127. int i = 0;
  128. if (!dev) {
  129. pr_err("%s: dev is null %d\n", __func__);
  130. return;
  131. }
  132. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  133. if (!clk_dev) {
  134. pr_err("%s: Invalid rsc clk device\n", __func__);
  135. return;
  136. }
  137. priv = dev_get_drvdata(clk_dev);
  138. if (!priv) {
  139. pr_err("%s: Invalid rsc clk private data\n", __func__);
  140. return;
  141. }
  142. mutex_lock(&priv->rsc_clk_lock);
  143. for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
  144. if (enable) {
  145. if (priv->clk[i])
  146. clk_prepare_enable(priv->clk[i]);
  147. if (priv->clk[i + NPL_CLK_OFFSET])
  148. clk_prepare_enable(
  149. priv->clk[i + NPL_CLK_OFFSET]);
  150. } else {
  151. if (priv->clk[i + NPL_CLK_OFFSET])
  152. clk_disable_unprepare(
  153. priv->clk[i + NPL_CLK_OFFSET]);
  154. if (priv->clk[i])
  155. clk_disable_unprepare(priv->clk[i]);
  156. }
  157. }
  158. mutex_unlock(&priv->rsc_clk_lock);
  159. return;
  160. }
  161. EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
  162. static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
  163. int clk_id,
  164. bool enable)
  165. {
  166. int ret = 0;
  167. if (enable) {
  168. /* Enable Requested Core clk */
  169. if (priv->clk_cnt[clk_id] == 0) {
  170. ret = clk_prepare_enable(priv->clk[clk_id]);
  171. if (ret < 0) {
  172. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  173. __func__, clk_id);
  174. goto done;
  175. }
  176. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  177. ret = clk_prepare_enable(
  178. priv->clk[clk_id + NPL_CLK_OFFSET]);
  179. if (ret < 0) {
  180. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  181. __func__,
  182. clk_id + NPL_CLK_OFFSET);
  183. goto err;
  184. }
  185. }
  186. }
  187. priv->clk_cnt[clk_id]++;
  188. } else {
  189. if (priv->clk_cnt[clk_id] <= 0) {
  190. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  191. __func__, clk_id);
  192. priv->clk_cnt[clk_id] = 0;
  193. goto done;
  194. }
  195. priv->clk_cnt[clk_id]--;
  196. if (priv->clk_cnt[clk_id] == 0) {
  197. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  198. clk_disable_unprepare(
  199. priv->clk[clk_id + NPL_CLK_OFFSET]);
  200. clk_disable_unprepare(priv->clk[clk_id]);
  201. }
  202. }
  203. return ret;
  204. err:
  205. clk_disable_unprepare(priv->clk[clk_id]);
  206. done:
  207. return ret;
  208. }
  209. static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
  210. int clk_id,
  211. bool enable)
  212. {
  213. char __iomem *clk_muxsel = NULL;
  214. int ret = 0;
  215. int default_clk_id = priv->default_clk_id[clk_id];
  216. clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
  217. if (!clk_muxsel) {
  218. ret = -EINVAL;
  219. goto done;
  220. }
  221. if (enable) {
  222. if (priv->clk_cnt[clk_id] == 0) {
  223. ret = bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
  224. true);
  225. if (ret < 0)
  226. goto done;
  227. ret = clk_prepare_enable(priv->clk[clk_id]);
  228. if (ret < 0) {
  229. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  230. __func__, clk_id);
  231. goto err_clk;
  232. }
  233. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  234. ret = clk_prepare_enable(
  235. priv->clk[clk_id + NPL_CLK_OFFSET]);
  236. if (ret < 0) {
  237. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  238. __func__,
  239. clk_id + NPL_CLK_OFFSET);
  240. goto err_npl_clk;
  241. }
  242. }
  243. iowrite32(0x1, clk_muxsel);
  244. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
  245. false);
  246. }
  247. priv->clk_cnt[clk_id]++;
  248. } else {
  249. if (priv->clk_cnt[clk_id] <= 0) {
  250. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  251. __func__, clk_id);
  252. priv->clk_cnt[clk_id] = 0;
  253. goto done;
  254. }
  255. priv->clk_cnt[clk_id]--;
  256. if (priv->clk_cnt[clk_id] == 0) {
  257. ret = bolero_clk_rsc_mux0_clk_request(priv,
  258. default_clk_id, true);
  259. if (!ret)
  260. iowrite32(0x0, clk_muxsel);
  261. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  262. clk_disable_unprepare(
  263. priv->clk[clk_id + NPL_CLK_OFFSET]);
  264. clk_disable_unprepare(priv->clk[clk_id]);
  265. if (!ret)
  266. bolero_clk_rsc_mux0_clk_request(priv,
  267. default_clk_id, false);
  268. }
  269. }
  270. return ret;
  271. err_npl_clk:
  272. clk_disable_unprepare(priv->clk[clk_id]);
  273. err_clk:
  274. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  275. done:
  276. return ret;
  277. }
  278. static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
  279. bool mux_switch,
  280. int clk_id,
  281. bool enable)
  282. {
  283. int ret = 0;
  284. if (enable) {
  285. if (clk_id == VA_CORE_CLK && mux_switch) {
  286. /*
  287. * Handle the following usecase scenarios during enable
  288. * 1. VA only, Active clk is VA_CORE_CLK
  289. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  290. */
  291. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  292. ret = bolero_clk_rsc_mux1_clk_request(priv,
  293. VA_CORE_CLK, enable);
  294. if (ret < 0)
  295. goto err;
  296. } else {
  297. ret = bolero_clk_rsc_mux0_clk_request(priv,
  298. TX_CORE_CLK, enable);
  299. if (ret < 0)
  300. goto err;
  301. priv->va_tx_clk_cnt++;
  302. }
  303. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  304. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  305. /*
  306. * Handle following concurrency scenario during enable
  307. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  308. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  309. */
  310. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  311. ret = bolero_clk_rsc_mux0_clk_request(priv,
  312. TX_CORE_CLK, true);
  313. if (ret < 0)
  314. goto err;
  315. bolero_clk_rsc_mux1_clk_request(priv,
  316. VA_CORE_CLK, false);
  317. priv->va_tx_clk_cnt++;
  318. }
  319. }
  320. } else {
  321. if (clk_id == VA_CORE_CLK && mux_switch) {
  322. /*
  323. * Handle the following usecase scenarios during disable
  324. * 1. VA only, disable VA_CORE_CLK
  325. * 2. Record + VA -> Record, decrement TX CLK count
  326. */
  327. if (priv->clk_cnt[VA_CORE_CLK]) {
  328. bolero_clk_rsc_mux1_clk_request(priv,
  329. VA_CORE_CLK, enable);
  330. } else if (priv->va_tx_clk_cnt) {
  331. bolero_clk_rsc_mux0_clk_request(priv,
  332. TX_CORE_CLK, enable);
  333. priv->va_tx_clk_cnt--;
  334. }
  335. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  336. /*
  337. * Handle the following usecase scenarios during disable
  338. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  339. */
  340. while (priv->va_tx_clk_cnt) {
  341. ret = bolero_clk_rsc_mux1_clk_request(priv,
  342. VA_CORE_CLK, true);
  343. if (ret < 0)
  344. goto err;
  345. bolero_clk_rsc_mux0_clk_request(priv,
  346. TX_CORE_CLK, false);
  347. priv->va_tx_clk_cnt--;
  348. }
  349. }
  350. }
  351. err:
  352. return ret;
  353. }
  354. /**
  355. * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
  356. * sequence
  357. *
  358. * @dev: Macro device pointer
  359. * @enable: enable or disable flag
  360. */
  361. void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  362. {
  363. int i;
  364. struct regmap *regmap;
  365. struct device *clk_dev = NULL;
  366. struct bolero_clk_rsc *priv = NULL;
  367. if (!dev) {
  368. pr_err("%s: dev is null %d\n", __func__);
  369. return;
  370. }
  371. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  372. if (!clk_dev) {
  373. pr_err("%s: Invalid rsc clk device\n", __func__);
  374. return;
  375. }
  376. priv = dev_get_drvdata(clk_dev);
  377. if (!priv) {
  378. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  379. return;
  380. }
  381. regmap = dev_get_regmap(priv->dev->parent, NULL);
  382. if (!regmap) {
  383. pr_err("%s: regmap is null\n", __func__);
  384. return;
  385. }
  386. if (enable) {
  387. if (priv->reg_seq_en_cnt++ == 0) {
  388. for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
  389. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  390. __func__, priv->fs_gen_seq[i],
  391. priv->fs_gen_seq[i + 1]);
  392. regmap_update_bits(regmap,
  393. priv->fs_gen_seq[i],
  394. priv->fs_gen_seq[i + 1],
  395. priv->fs_gen_seq[i + 1]);
  396. }
  397. }
  398. } else {
  399. if (priv->reg_seq_en_cnt <= 0) {
  400. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  401. __func__, priv->reg_seq_en_cnt);
  402. priv->reg_seq_en_cnt = 0;
  403. return;
  404. }
  405. if (--priv->reg_seq_en_cnt == 0) {
  406. for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
  407. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  408. __func__, priv->fs_gen_seq[i],
  409. priv->fs_gen_seq[i + 1]);
  410. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  411. priv->fs_gen_seq[i + 1], 0x0);
  412. }
  413. }
  414. }
  415. }
  416. EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
  417. /**
  418. * bolero_clk_rsc_request_clock - request for clock to
  419. * enable/disable
  420. *
  421. * @dev: Macro device pointer.
  422. * @default_clk_id: mux0 Core clock ID input.
  423. * @clk_id_req: Core clock ID requested to enable/disable
  424. * @enable: enable or disable clock flag
  425. *
  426. * Returns 0 on success or -EINVAL on error.
  427. */
  428. int bolero_clk_rsc_request_clock(struct device *dev,
  429. int default_clk_id,
  430. int clk_id_req,
  431. bool enable)
  432. {
  433. int ret = 0;
  434. struct device *clk_dev = NULL;
  435. struct bolero_clk_rsc *priv = NULL;
  436. bool mux_switch = false;
  437. if (!dev) {
  438. pr_err("%s: dev is null %d\n", __func__);
  439. return -EINVAL;
  440. }
  441. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  442. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  443. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  444. __func__, clk_id_req, default_clk_id);
  445. return -EINVAL;
  446. }
  447. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  448. if (!clk_dev) {
  449. pr_err("%s: Invalid rsc clk device\n", __func__);
  450. return -EINVAL;
  451. }
  452. priv = dev_get_drvdata(clk_dev);
  453. if (!priv) {
  454. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  455. return -EINVAL;
  456. }
  457. mutex_lock(&priv->rsc_clk_lock);
  458. if (!priv->dev_up && enable) {
  459. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  460. __func__);
  461. ret = -EINVAL;
  462. goto err;
  463. }
  464. priv->default_clk_id[clk_id_req] = default_clk_id;
  465. if (default_clk_id != clk_id_req)
  466. mux_switch = true;
  467. if (mux_switch) {
  468. if (clk_id_req != VA_CORE_CLK) {
  469. ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
  470. enable);
  471. if (ret < 0)
  472. goto err;
  473. }
  474. } else {
  475. ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  476. if (ret < 0)
  477. goto err;
  478. }
  479. ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  480. clk_id_req,
  481. enable);
  482. if (ret < 0)
  483. goto err;
  484. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  485. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  486. enable);
  487. mutex_unlock(&priv->rsc_clk_lock);
  488. return 0;
  489. err:
  490. mutex_unlock(&priv->rsc_clk_lock);
  491. return ret;
  492. }
  493. EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
  494. static int bolero_clk_rsc_probe(struct platform_device *pdev)
  495. {
  496. int ret = 0, fs_gen_size, i, j;
  497. const char **clk_name_array;
  498. int clk_cnt;
  499. struct clk *clk;
  500. struct bolero_clk_rsc *priv = NULL;
  501. u32 muxsel = 0;
  502. priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
  503. GFP_KERNEL);
  504. if (!priv)
  505. return -ENOMEM;
  506. /* Get clk fs gen sequence from device tree */
  507. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  508. &fs_gen_size)) {
  509. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  510. __func__);
  511. ret = -EINVAL;
  512. goto err;
  513. }
  514. priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
  515. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  516. if (!priv->fs_gen_seq) {
  517. ret = -ENOMEM;
  518. goto err;
  519. }
  520. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  521. /* Parse fs-gen-sequence */
  522. ret = of_property_read_u32_array(pdev->dev.of_node,
  523. "qcom,fs-gen-sequence",
  524. priv->fs_gen_seq,
  525. priv->num_fs_reg * 2);
  526. if (ret < 0) {
  527. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  528. __func__, ret);
  529. goto err;
  530. }
  531. /* Get clk details from device tree */
  532. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  533. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  534. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  535. __func__, clk_cnt);
  536. ret = -EINVAL;
  537. goto err;
  538. }
  539. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  540. GFP_KERNEL);
  541. if (!clk_name_array) {
  542. ret = -ENOMEM;
  543. goto err;
  544. }
  545. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  546. clk_name_array, clk_cnt);
  547. for (i = 0; i < MAX_CLK; i++) {
  548. priv->clk[i] = NULL;
  549. for (j = 0; j < clk_cnt; j++) {
  550. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  551. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  552. if (IS_ERR(clk)) {
  553. ret = PTR_ERR(clk);
  554. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  555. __func__, clk_src_name[i], ret);
  556. goto err;
  557. }
  558. priv->clk[i] = clk;
  559. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  560. __func__, clk_src_name[i]);
  561. }
  562. }
  563. }
  564. ret = of_property_read_u32(pdev->dev.of_node,
  565. "qcom,rx_mclk_mode_muxsel", &muxsel);
  566. if (ret) {
  567. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  568. __func__);
  569. } else {
  570. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  571. if (!priv->rx_clk_muxsel) {
  572. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  573. __func__);
  574. return -ENOMEM;
  575. }
  576. }
  577. ret = of_property_read_u32(pdev->dev.of_node,
  578. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  579. if (ret) {
  580. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  581. __func__);
  582. } else {
  583. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  584. if (!priv->wsa_clk_muxsel) {
  585. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  586. __func__);
  587. return -ENOMEM;
  588. }
  589. }
  590. ret = of_property_read_u32(pdev->dev.of_node,
  591. "qcom,va_mclk_mode_muxsel", &muxsel);
  592. if (ret) {
  593. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  594. __func__);
  595. } else {
  596. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  597. if (!priv->va_clk_muxsel) {
  598. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  599. __func__);
  600. return -ENOMEM;
  601. }
  602. }
  603. ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
  604. if (ret < 0) {
  605. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  606. __func__, ret);
  607. goto err;
  608. }
  609. priv->dev = &pdev->dev;
  610. priv->dev_up = true;
  611. mutex_init(&priv->rsc_clk_lock);
  612. dev_set_drvdata(&pdev->dev, priv);
  613. err:
  614. return ret;
  615. }
  616. static int bolero_clk_rsc_remove(struct platform_device *pdev)
  617. {
  618. struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  619. bolero_unregister_res_clk(&pdev->dev);
  620. of_platform_depopulate(&pdev->dev);
  621. if (!priv)
  622. return -EINVAL;
  623. mutex_destroy(&priv->rsc_clk_lock);
  624. return 0;
  625. }
  626. static const struct of_device_id bolero_clk_rsc_dt_match[] = {
  627. {.compatible = "qcom,bolero-clk-rsc-mngr"},
  628. {}
  629. };
  630. MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
  631. static struct platform_driver bolero_clk_rsc_mgr = {
  632. .driver = {
  633. .name = "bolero-clk-rsc-mngr",
  634. .owner = THIS_MODULE,
  635. .of_match_table = bolero_clk_rsc_dt_match,
  636. .suppress_bind_attrs = true,
  637. },
  638. .probe = bolero_clk_rsc_probe,
  639. .remove = bolero_clk_rsc_remove,
  640. };
  641. int bolero_clk_rsc_mgr_init(void)
  642. {
  643. return platform_driver_register(&bolero_clk_rsc_mgr);
  644. }
  645. void bolero_clk_rsc_mgr_exit(void)
  646. {
  647. platform_driver_unregister(&bolero_clk_rsc_mgr);
  648. }
  649. MODULE_DESCRIPTION("Bolero clock resource manager driver");
  650. MODULE_LICENSE("GPL v2");