lpass-cdc-clk-rsc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/module.h>
  7. #include <linux/io.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/kernel.h>
  11. #include <linux/clk.h>
  12. #include <linux/clk-provider.h>
  13. #include "lpass-cdc.h"
  14. #include "lpass-cdc-clk-rsc.h"
  15. #define DRV_NAME "lpass-cdc-clk-rsc"
  16. #define LPASS_CDC_CLK_NAME_LENGTH 30
  17. static char clk_src_name[MAX_CLK][LPASS_CDC_CLK_NAME_LENGTH] = {
  18. "tx_core_clk",
  19. "rx_core_clk",
  20. "wsa_core_clk",
  21. "va_core_clk",
  22. "wsa2_core_clk",
  23. "rx_tx_core_clk",
  24. "wsa_tx_core_clk",
  25. "wsa2_tx_core_clk",
  26. };
  27. struct lpass_cdc_clk_rsc {
  28. struct device *dev;
  29. struct mutex rsc_clk_lock;
  30. struct mutex fs_gen_lock;
  31. struct clk *clk[MAX_CLK];
  32. int clk_cnt[MAX_CLK];
  33. int reg_seq_en_cnt;
  34. int va_tx_clk_cnt;
  35. bool dev_up;
  36. bool dev_up_gfmux;
  37. u32 num_fs_reg;
  38. u32 *fs_gen_seq;
  39. int default_clk_id[MAX_CLK];
  40. struct regmap *regmap;
  41. char __iomem *rx_clk_muxsel;
  42. char __iomem *wsa_clk_muxsel;
  43. char __iomem *va_clk_muxsel;
  44. };
  45. static int lpass_cdc_clk_rsc_cb(struct device *dev, u16 event)
  46. {
  47. struct lpass_cdc_clk_rsc *priv;
  48. if (!dev) {
  49. pr_err("%s: Invalid device pointer\n",
  50. __func__);
  51. return -EINVAL;
  52. }
  53. priv = dev_get_drvdata(dev);
  54. if (!priv) {
  55. pr_err("%s: Invalid clk rsc priviate data\n",
  56. __func__);
  57. return -EINVAL;
  58. }
  59. mutex_lock(&priv->rsc_clk_lock);
  60. if (event == LPASS_CDC_MACRO_EVT_SSR_UP) {
  61. priv->dev_up = true;
  62. } else if (event == LPASS_CDC_MACRO_EVT_SSR_DOWN) {
  63. priv->dev_up = false;
  64. priv->dev_up_gfmux = false;
  65. } else if (event == LPASS_CDC_MACRO_EVT_SSR_GFMUX_UP) {
  66. priv->dev_up_gfmux = true;
  67. }
  68. mutex_unlock(&priv->rsc_clk_lock);
  69. return 0;
  70. }
  71. static char __iomem *lpass_cdc_clk_rsc_get_clk_muxsel(struct lpass_cdc_clk_rsc *priv,
  72. int clk_id)
  73. {
  74. switch (clk_id) {
  75. case RX_CORE_CLK:
  76. return priv->rx_clk_muxsel;
  77. case WSA_CORE_CLK:
  78. case WSA2_CORE_CLK:
  79. return priv->wsa_clk_muxsel;
  80. case VA_CORE_CLK:
  81. return priv->va_clk_muxsel;
  82. case TX_CORE_CLK:
  83. case RX_TX_CORE_CLK:
  84. case WSA_TX_CORE_CLK:
  85. case WSA2_TX_CORE_CLK:
  86. default:
  87. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  88. break;
  89. }
  90. return NULL;
  91. }
  92. int lpass_cdc_rsc_clk_reset(struct device *dev, int clk_id)
  93. {
  94. struct device *clk_dev = NULL;
  95. struct lpass_cdc_clk_rsc *priv = NULL;
  96. int count = 0;
  97. if (!dev) {
  98. pr_err("%s: dev is null %d\n", __func__);
  99. return -EINVAL;
  100. }
  101. if (clk_id < 0 || clk_id >= MAX_CLK) {
  102. pr_err("%s: Invalid clk_id: %d\n",
  103. __func__, clk_id);
  104. return -EINVAL;
  105. }
  106. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  107. if (!clk_dev) {
  108. pr_err("%s: Invalid rsc clk device\n", __func__);
  109. return -EINVAL;
  110. }
  111. priv = dev_get_drvdata(clk_dev);
  112. if (!priv) {
  113. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  114. return -EINVAL;
  115. }
  116. mutex_lock(&priv->rsc_clk_lock);
  117. while (__clk_is_enabled(priv->clk[clk_id])) {
  118. clk_disable_unprepare(priv->clk[clk_id]);
  119. count++;
  120. }
  121. dev_dbg(priv->dev,
  122. "%s: clock reset after ssr, count %d\n", __func__, count);
  123. trace_printk("%s: clock reset after ssr, count %d\n", __func__, count);
  124. while (count--) {
  125. clk_prepare_enable(priv->clk[clk_id]);
  126. }
  127. mutex_unlock(&priv->rsc_clk_lock);
  128. return 0;
  129. }
  130. EXPORT_SYMBOL(lpass_cdc_rsc_clk_reset);
  131. void lpass_cdc_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  132. {
  133. struct device *clk_dev = NULL;
  134. struct lpass_cdc_clk_rsc *priv = NULL;
  135. int i = 0;
  136. if (!dev) {
  137. pr_err("%s: dev is null %d\n", __func__);
  138. return;
  139. }
  140. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  141. if (!clk_dev) {
  142. pr_err("%s: Invalid rsc clk device\n", __func__);
  143. return;
  144. }
  145. priv = dev_get_drvdata(clk_dev);
  146. if (!priv) {
  147. pr_err("%s: Invalid rsc clk private data\n", __func__);
  148. return;
  149. }
  150. mutex_lock(&priv->rsc_clk_lock);
  151. for (i = 0; i < MAX_CLK; i++) {
  152. if (enable) {
  153. if (priv->clk[i])
  154. clk_prepare_enable(priv->clk[i]);
  155. } else {
  156. if (priv->clk[i])
  157. clk_disable_unprepare(priv->clk[i]);
  158. }
  159. }
  160. mutex_unlock(&priv->rsc_clk_lock);
  161. return;
  162. }
  163. EXPORT_SYMBOL(lpass_cdc_clk_rsc_enable_all_clocks);
  164. static int lpass_cdc_clk_rsc_mux0_clk_request(struct lpass_cdc_clk_rsc *priv,
  165. int clk_id,
  166. bool enable)
  167. {
  168. int ret = 0;
  169. if (enable) {
  170. /* Enable Requested Core clk */
  171. if (priv->clk_cnt[clk_id] == 0) {
  172. ret = clk_prepare_enable(priv->clk[clk_id]);
  173. if (ret < 0) {
  174. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  175. __func__, clk_id);
  176. goto done;
  177. }
  178. }
  179. priv->clk_cnt[clk_id]++;
  180. } else {
  181. if (priv->clk_cnt[clk_id] <= 0) {
  182. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  183. __func__, clk_id);
  184. priv->clk_cnt[clk_id] = 0;
  185. goto done;
  186. }
  187. priv->clk_cnt[clk_id]--;
  188. if (priv->clk_cnt[clk_id] == 0)
  189. clk_disable_unprepare(priv->clk[clk_id]);
  190. }
  191. done:
  192. return ret;
  193. }
  194. static int lpass_cdc_clk_rsc_mux1_clk_request(struct lpass_cdc_clk_rsc *priv,
  195. int clk_id,
  196. bool enable)
  197. {
  198. char __iomem *clk_muxsel = NULL;
  199. int ret = 0;
  200. int default_clk_id = priv->default_clk_id[clk_id];
  201. u32 muxsel = 0;
  202. clk_muxsel = lpass_cdc_clk_rsc_get_clk_muxsel(priv, clk_id);
  203. if (!clk_muxsel) {
  204. ret = -EINVAL;
  205. goto done;
  206. }
  207. if (enable) {
  208. if (priv->clk_cnt[clk_id] == 0) {
  209. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  210. default_clk_id,
  211. true);
  212. if (ret < 0)
  213. goto done;
  214. ret = clk_prepare_enable(priv->clk[clk_id]);
  215. if (ret < 0) {
  216. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  217. __func__, clk_id);
  218. goto err_clk;
  219. }
  220. if (priv->dev_up_gfmux) {
  221. iowrite32(0x1, clk_muxsel);
  222. muxsel = ioread32(clk_muxsel);
  223. trace_printk("%s: muxsel value after enable: %d\n",
  224. __func__, muxsel);
  225. }
  226. lpass_cdc_clk_rsc_mux0_clk_request(priv, default_clk_id,
  227. false);
  228. }
  229. priv->clk_cnt[clk_id]++;
  230. } else {
  231. if (priv->clk_cnt[clk_id] <= 0) {
  232. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  233. __func__, clk_id);
  234. priv->clk_cnt[clk_id] = 0;
  235. goto done;
  236. }
  237. priv->clk_cnt[clk_id]--;
  238. if (priv->clk_cnt[clk_id] == 0) {
  239. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  240. default_clk_id, true);
  241. if (!ret && priv->dev_up_gfmux) {
  242. iowrite32(0x0, clk_muxsel);
  243. muxsel = ioread32(clk_muxsel);
  244. trace_printk("%s: muxsel value after disable: %d\n",
  245. __func__, muxsel);
  246. }
  247. clk_disable_unprepare(priv->clk[clk_id]);
  248. if (!ret)
  249. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  250. default_clk_id, false);
  251. }
  252. }
  253. return ret;
  254. err_clk:
  255. lpass_cdc_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  256. done:
  257. return ret;
  258. }
  259. static int lpass_cdc_clk_rsc_check_and_update_va_clk(struct lpass_cdc_clk_rsc *priv,
  260. bool mux_switch,
  261. int clk_id,
  262. bool enable)
  263. {
  264. int ret = 0;
  265. if (enable) {
  266. if (clk_id == VA_CORE_CLK && mux_switch) {
  267. /*
  268. * Handle the following usecase scenarios during enable
  269. * 1. VA only, Active clk is VA_CORE_CLK
  270. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  271. */
  272. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  273. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv,
  274. VA_CORE_CLK, enable);
  275. if (ret < 0)
  276. goto err;
  277. } else {
  278. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  279. TX_CORE_CLK, enable);
  280. if (ret < 0)
  281. goto err;
  282. priv->va_tx_clk_cnt++;
  283. }
  284. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  285. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  286. /*
  287. * Handle following concurrency scenario during enable
  288. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  289. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  290. */
  291. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  292. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  293. TX_CORE_CLK, true);
  294. if (ret < 0)
  295. goto err;
  296. lpass_cdc_clk_rsc_mux1_clk_request(priv,
  297. VA_CORE_CLK, false);
  298. priv->va_tx_clk_cnt++;
  299. }
  300. }
  301. } else {
  302. if (clk_id == VA_CORE_CLK && mux_switch) {
  303. /*
  304. * Handle the following usecase scenarios during disable
  305. * 1. VA only, disable VA_CORE_CLK
  306. * 2. Record + VA -> Record, decrement TX CLK count
  307. */
  308. if (priv->clk_cnt[VA_CORE_CLK]) {
  309. lpass_cdc_clk_rsc_mux1_clk_request(priv,
  310. VA_CORE_CLK, enable);
  311. } else if (priv->va_tx_clk_cnt) {
  312. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  313. TX_CORE_CLK, enable);
  314. priv->va_tx_clk_cnt--;
  315. }
  316. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  317. /*
  318. * Handle the following usecase scenarios during disable
  319. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  320. */
  321. while (priv->va_tx_clk_cnt) {
  322. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv,
  323. VA_CORE_CLK, true);
  324. if (ret < 0)
  325. goto err;
  326. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  327. TX_CORE_CLK, false);
  328. priv->va_tx_clk_cnt--;
  329. }
  330. }
  331. }
  332. err:
  333. return ret;
  334. }
  335. /**
  336. * lpass_cdc_clk_rsc_fs_gen_request - request to enable/disable fs generation
  337. * sequence
  338. *
  339. * @dev: Macro device pointer
  340. * @enable: enable or disable flag
  341. */
  342. void lpass_cdc_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  343. {
  344. int i;
  345. struct regmap *regmap;
  346. struct device *clk_dev = NULL;
  347. struct lpass_cdc_clk_rsc *priv = NULL;
  348. if (!dev) {
  349. pr_err("%s: dev is null %d\n", __func__);
  350. return;
  351. }
  352. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  353. if (!clk_dev) {
  354. pr_err("%s: Invalid rsc clk device\n", __func__);
  355. return;
  356. }
  357. priv = dev_get_drvdata(clk_dev);
  358. if (!priv) {
  359. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  360. return;
  361. }
  362. regmap = dev_get_regmap(priv->dev->parent, NULL);
  363. if (!regmap) {
  364. pr_err("%s: regmap is null\n", __func__);
  365. return;
  366. }
  367. mutex_lock(&priv->fs_gen_lock);
  368. if (enable) {
  369. if (priv->reg_seq_en_cnt++ == 0) {
  370. for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
  371. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  372. __func__, priv->fs_gen_seq[i],
  373. priv->fs_gen_seq[i + 1]);
  374. regmap_update_bits(regmap,
  375. priv->fs_gen_seq[i],
  376. priv->fs_gen_seq[i + 1],
  377. priv->fs_gen_seq[i + 1]);
  378. }
  379. }
  380. } else {
  381. if (priv->reg_seq_en_cnt <= 0) {
  382. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  383. __func__, priv->reg_seq_en_cnt);
  384. priv->reg_seq_en_cnt = 0;
  385. mutex_unlock(&priv->fs_gen_lock);
  386. return;
  387. }
  388. if (--priv->reg_seq_en_cnt == 0) {
  389. for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
  390. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  391. __func__, priv->fs_gen_seq[i],
  392. priv->fs_gen_seq[i + 1]);
  393. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  394. priv->fs_gen_seq[i + 1], 0x0);
  395. }
  396. }
  397. }
  398. mutex_unlock(&priv->fs_gen_lock);
  399. }
  400. EXPORT_SYMBOL(lpass_cdc_clk_rsc_fs_gen_request);
  401. /**
  402. * lpass_cdc_clk_rsc_request_clock - request for clock to
  403. * enable/disable
  404. *
  405. * @dev: Macro device pointer.
  406. * @default_clk_id: mux0 Core clock ID input.
  407. * @clk_id_req: Core clock ID requested to enable/disable
  408. * @enable: enable or disable clock flag
  409. *
  410. * Returns 0 on success or -EINVAL on error.
  411. */
  412. int lpass_cdc_clk_rsc_request_clock(struct device *dev,
  413. int default_clk_id,
  414. int clk_id_req,
  415. bool enable)
  416. {
  417. int ret = 0;
  418. struct device *clk_dev = NULL;
  419. struct lpass_cdc_clk_rsc *priv = NULL;
  420. bool mux_switch = false;
  421. if (!dev) {
  422. pr_err("%s: dev is null %d\n", __func__);
  423. return -EINVAL;
  424. }
  425. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  426. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  427. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  428. __func__, clk_id_req, default_clk_id);
  429. return -EINVAL;
  430. }
  431. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  432. if (!clk_dev) {
  433. pr_err("%s: Invalid rsc clk device\n", __func__);
  434. return -EINVAL;
  435. }
  436. priv = dev_get_drvdata(clk_dev);
  437. if (!priv) {
  438. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  439. return -EINVAL;
  440. }
  441. mutex_lock(&priv->rsc_clk_lock);
  442. if (!priv->dev_up && enable) {
  443. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  444. __func__);
  445. trace_printk("%s: SSR is in progress..\n", __func__);
  446. ret = -EINVAL;
  447. goto err;
  448. }
  449. priv->default_clk_id[clk_id_req] = default_clk_id;
  450. if (default_clk_id != clk_id_req)
  451. mux_switch = true;
  452. if (mux_switch) {
  453. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv, clk_id_req,
  454. enable);
  455. if (ret < 0)
  456. goto err;
  457. } else {
  458. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  459. if (ret < 0)
  460. goto err;
  461. }
  462. ret = lpass_cdc_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  463. clk_id_req,
  464. enable);
  465. if (ret < 0)
  466. goto err;
  467. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  468. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  469. enable);
  470. trace_printk("%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  471. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  472. enable);
  473. mutex_unlock(&priv->rsc_clk_lock);
  474. return 0;
  475. err:
  476. mutex_unlock(&priv->rsc_clk_lock);
  477. return ret;
  478. }
  479. EXPORT_SYMBOL(lpass_cdc_clk_rsc_request_clock);
  480. static int lpass_cdc_clk_rsc_probe(struct platform_device *pdev)
  481. {
  482. int ret = 0, fs_gen_size, i, j;
  483. const char **clk_name_array;
  484. int clk_cnt;
  485. struct clk *clk;
  486. struct lpass_cdc_clk_rsc *priv = NULL;
  487. u32 muxsel = 0;
  488. priv = devm_kzalloc(&pdev->dev, sizeof(struct lpass_cdc_clk_rsc),
  489. GFP_KERNEL);
  490. if (!priv)
  491. return -ENOMEM;
  492. /* Get clk fs gen sequence from device tree */
  493. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  494. &fs_gen_size)) {
  495. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  496. __func__);
  497. ret = -EINVAL;
  498. goto err;
  499. }
  500. priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
  501. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  502. if (!priv->fs_gen_seq) {
  503. ret = -ENOMEM;
  504. goto err;
  505. }
  506. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  507. /* Parse fs-gen-sequence */
  508. ret = of_property_read_u32_array(pdev->dev.of_node,
  509. "qcom,fs-gen-sequence",
  510. priv->fs_gen_seq,
  511. priv->num_fs_reg * 2);
  512. if (ret < 0) {
  513. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  514. __func__, ret);
  515. goto err;
  516. }
  517. /* Get clk details from device tree */
  518. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  519. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  520. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  521. __func__, clk_cnt);
  522. ret = -EINVAL;
  523. goto err;
  524. }
  525. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  526. GFP_KERNEL);
  527. if (!clk_name_array) {
  528. ret = -ENOMEM;
  529. goto err;
  530. }
  531. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  532. clk_name_array, clk_cnt);
  533. for (i = 0; i < MAX_CLK; i++) {
  534. priv->clk[i] = NULL;
  535. for (j = 0; j < clk_cnt; j++) {
  536. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  537. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  538. if (IS_ERR(clk)) {
  539. ret = PTR_ERR(clk);
  540. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  541. __func__, clk_src_name[i], ret);
  542. goto err;
  543. }
  544. priv->clk[i] = clk;
  545. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  546. __func__, clk_src_name[i]);
  547. break;
  548. }
  549. }
  550. }
  551. ret = of_property_read_u32(pdev->dev.of_node,
  552. "qcom,rx_mclk_mode_muxsel", &muxsel);
  553. if (ret) {
  554. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  555. __func__);
  556. } else {
  557. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  558. if (!priv->rx_clk_muxsel) {
  559. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  560. __func__);
  561. return -ENOMEM;
  562. }
  563. }
  564. ret = of_property_read_u32(pdev->dev.of_node,
  565. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  566. if (ret) {
  567. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  568. __func__);
  569. } else {
  570. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  571. if (!priv->wsa_clk_muxsel) {
  572. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  573. __func__);
  574. return -ENOMEM;
  575. }
  576. }
  577. ret = of_property_read_u32(pdev->dev.of_node,
  578. "qcom,va_mclk_mode_muxsel", &muxsel);
  579. if (ret) {
  580. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  581. __func__);
  582. } else {
  583. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  584. if (!priv->va_clk_muxsel) {
  585. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  586. __func__);
  587. return -ENOMEM;
  588. }
  589. }
  590. ret = lpass_cdc_register_res_clk(&pdev->dev, lpass_cdc_clk_rsc_cb);
  591. if (ret < 0) {
  592. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  593. __func__, ret);
  594. goto err;
  595. }
  596. priv->dev = &pdev->dev;
  597. priv->dev_up = true;
  598. priv->dev_up_gfmux = true;
  599. mutex_init(&priv->rsc_clk_lock);
  600. mutex_init(&priv->fs_gen_lock);
  601. dev_set_drvdata(&pdev->dev, priv);
  602. err:
  603. return ret;
  604. }
  605. static int lpass_cdc_clk_rsc_remove(struct platform_device *pdev)
  606. {
  607. struct lpass_cdc_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  608. lpass_cdc_unregister_res_clk(&pdev->dev);
  609. of_platform_depopulate(&pdev->dev);
  610. if (!priv)
  611. return -EINVAL;
  612. mutex_destroy(&priv->rsc_clk_lock);
  613. mutex_destroy(&priv->fs_gen_lock);
  614. return 0;
  615. }
  616. static const struct of_device_id lpass_cdc_clk_rsc_dt_match[] = {
  617. {.compatible = "qcom,lpass-cdc-clk-rsc-mngr"},
  618. {}
  619. };
  620. MODULE_DEVICE_TABLE(of, lpass_cdc_clk_rsc_dt_match);
  621. static struct platform_driver lpass_cdc_clk_rsc_mgr = {
  622. .driver = {
  623. .name = "lpass-cdc-clk-rsc-mngr",
  624. .owner = THIS_MODULE,
  625. .of_match_table = lpass_cdc_clk_rsc_dt_match,
  626. .suppress_bind_attrs = true,
  627. },
  628. .probe = lpass_cdc_clk_rsc_probe,
  629. .remove = lpass_cdc_clk_rsc_remove,
  630. };
  631. int lpass_cdc_clk_rsc_mgr_init(void)
  632. {
  633. return platform_driver_register(&lpass_cdc_clk_rsc_mgr);
  634. }
  635. void lpass_cdc_clk_rsc_mgr_exit(void)
  636. {
  637. platform_driver_unregister(&lpass_cdc_clk_rsc_mgr);
  638. }
  639. MODULE_DESCRIPTION("LPASS codec clock resource manager driver");
  640. MODULE_LICENSE("GPL v2");