lpass-cdc-clk-rsc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of_platform.h>
  7. #include <linux/module.h>
  8. #include <linux/io.h>
  9. #include <linux/init.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/kernel.h>
  12. #include <linux/clk.h>
  13. #include <linux/clk-provider.h>
  14. #include "lpass-cdc.h"
  15. #include "lpass-cdc-clk-rsc.h"
  16. #define DRV_NAME "lpass-cdc-clk-rsc"
  17. #define LPASS_CDC_CLK_NAME_LENGTH 30
  18. static char clk_src_name[MAX_CLK][LPASS_CDC_CLK_NAME_LENGTH] = {
  19. "tx_core_clk",
  20. "rx_core_clk",
  21. "wsa_core_clk",
  22. "va_core_clk",
  23. "wsa2_core_clk",
  24. "rx_tx_core_clk",
  25. "wsa_tx_core_clk",
  26. "wsa2_tx_core_clk",
  27. };
  28. struct lpass_cdc_clk_rsc {
  29. struct device *dev;
  30. struct mutex rsc_clk_lock;
  31. struct mutex fs_gen_lock;
  32. struct clk *clk[MAX_CLK];
  33. int clk_cnt[MAX_CLK];
  34. int reg_seq_en_cnt;
  35. int va_tx_clk_cnt;
  36. bool dev_up;
  37. bool dev_up_gfmux;
  38. u32 num_fs_reg;
  39. u32 *fs_gen_seq;
  40. int default_clk_id[MAX_CLK];
  41. struct regmap *regmap;
  42. char __iomem *rx_clk_muxsel;
  43. char __iomem *wsa_clk_muxsel;
  44. char __iomem *va_clk_muxsel;
  45. };
  46. static int lpass_cdc_clk_rsc_cb(struct device *dev, u16 event)
  47. {
  48. struct lpass_cdc_clk_rsc *priv;
  49. if (!dev) {
  50. pr_err("%s: Invalid device pointer\n",
  51. __func__);
  52. return -EINVAL;
  53. }
  54. priv = dev_get_drvdata(dev);
  55. if (!priv) {
  56. pr_err("%s: Invalid clk rsc priviate data\n",
  57. __func__);
  58. return -EINVAL;
  59. }
  60. mutex_lock(&priv->rsc_clk_lock);
  61. if (event == LPASS_CDC_MACRO_EVT_SSR_UP) {
  62. priv->dev_up = true;
  63. } else if (event == LPASS_CDC_MACRO_EVT_SSR_DOWN) {
  64. priv->dev_up = false;
  65. priv->dev_up_gfmux = false;
  66. } else if (event == LPASS_CDC_MACRO_EVT_SSR_GFMUX_UP) {
  67. priv->dev_up_gfmux = true;
  68. }
  69. mutex_unlock(&priv->rsc_clk_lock);
  70. return 0;
  71. }
  72. static char __iomem *lpass_cdc_clk_rsc_get_clk_muxsel(struct lpass_cdc_clk_rsc *priv,
  73. int clk_id)
  74. {
  75. switch (clk_id) {
  76. case RX_CORE_CLK:
  77. return priv->rx_clk_muxsel;
  78. case WSA_CORE_CLK:
  79. case WSA2_CORE_CLK:
  80. return priv->wsa_clk_muxsel;
  81. case VA_CORE_CLK:
  82. return priv->va_clk_muxsel;
  83. case TX_CORE_CLK:
  84. case RX_TX_CORE_CLK:
  85. case WSA_TX_CORE_CLK:
  86. case WSA2_TX_CORE_CLK:
  87. default:
  88. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  89. break;
  90. }
  91. return NULL;
  92. }
  93. int lpass_cdc_rsc_clk_reset(struct device *dev, int clk_id)
  94. {
  95. struct device *clk_dev = NULL;
  96. struct lpass_cdc_clk_rsc *priv = NULL;
  97. int count = 0;
  98. if (!dev) {
  99. pr_err("%s: dev is null\n", __func__);
  100. return -EINVAL;
  101. }
  102. if (clk_id < 0 || clk_id >= MAX_CLK) {
  103. pr_err("%s: Invalid clk_id: %d\n",
  104. __func__, clk_id);
  105. return -EINVAL;
  106. }
  107. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  108. if (!clk_dev) {
  109. pr_err("%s: Invalid rsc clk device\n", __func__);
  110. return -EINVAL;
  111. }
  112. priv = dev_get_drvdata(clk_dev);
  113. if (!priv) {
  114. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  115. return -EINVAL;
  116. }
  117. mutex_lock(&priv->rsc_clk_lock);
  118. while (__clk_is_enabled(priv->clk[clk_id])) {
  119. clk_disable_unprepare(priv->clk[clk_id]);
  120. count++;
  121. }
  122. dev_dbg(priv->dev,
  123. "%s: clock reset after ssr, count %d\n", __func__, count);
  124. while (count--) {
  125. clk_prepare_enable(priv->clk[clk_id]);
  126. }
  127. mutex_unlock(&priv->rsc_clk_lock);
  128. return 0;
  129. }
  130. EXPORT_SYMBOL(lpass_cdc_rsc_clk_reset);
  131. void lpass_cdc_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  132. {
  133. struct device *clk_dev = NULL;
  134. struct lpass_cdc_clk_rsc *priv = NULL;
  135. int i = 0;
  136. if (!dev) {
  137. pr_err("%s: dev is null\n", __func__);
  138. return;
  139. }
  140. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  141. if (!clk_dev) {
  142. pr_err("%s: Invalid rsc clk device\n", __func__);
  143. return;
  144. }
  145. priv = dev_get_drvdata(clk_dev);
  146. if (!priv) {
  147. pr_err("%s: Invalid rsc clk private data\n", __func__);
  148. return;
  149. }
  150. mutex_lock(&priv->rsc_clk_lock);
  151. for (i = 0; i < MAX_CLK; i++) {
  152. if (enable) {
  153. if (priv->clk[i])
  154. clk_prepare_enable(priv->clk[i]);
  155. } else {
  156. if (priv->clk[i] && __clk_is_enabled(priv->clk[i]))
  157. clk_disable_unprepare(priv->clk[i]);
  158. }
  159. }
  160. mutex_unlock(&priv->rsc_clk_lock);
  161. return;
  162. }
  163. EXPORT_SYMBOL(lpass_cdc_clk_rsc_enable_all_clocks);
  164. static int lpass_cdc_clk_rsc_mux0_clk_request(struct lpass_cdc_clk_rsc *priv,
  165. int clk_id,
  166. bool enable)
  167. {
  168. int ret = 0;
  169. if (enable) {
  170. /* Enable Requested Core clk */
  171. if (priv->clk_cnt[clk_id] == 0) {
  172. ret = clk_prepare_enable(priv->clk[clk_id]);
  173. if (ret < 0) {
  174. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  175. __func__, clk_id);
  176. goto done;
  177. }
  178. }
  179. priv->clk_cnt[clk_id]++;
  180. } else {
  181. if (priv->clk_cnt[clk_id] <= 0) {
  182. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  183. __func__, clk_id);
  184. priv->clk_cnt[clk_id] = 0;
  185. goto done;
  186. }
  187. priv->clk_cnt[clk_id]--;
  188. if (priv->clk_cnt[clk_id] == 0)
  189. clk_disable_unprepare(priv->clk[clk_id]);
  190. }
  191. done:
  192. return ret;
  193. }
  194. static int lpass_cdc_clk_rsc_mux1_clk_request(struct lpass_cdc_clk_rsc *priv,
  195. int clk_id,
  196. bool enable)
  197. {
  198. char __iomem *clk_muxsel = NULL;
  199. int ret = 0;
  200. int default_clk_id = priv->default_clk_id[clk_id];
  201. u32 muxsel = 0;
  202. clk_muxsel = lpass_cdc_clk_rsc_get_clk_muxsel(priv, clk_id);
  203. if (!clk_muxsel) {
  204. ret = -EINVAL;
  205. goto done;
  206. }
  207. if (enable) {
  208. if (priv->clk_cnt[clk_id] == 0) {
  209. if (clk_id != VA_CORE_CLK) {
  210. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  211. default_clk_id,
  212. true);
  213. if (ret < 0)
  214. goto done;
  215. }
  216. ret = clk_prepare_enable(priv->clk[clk_id]);
  217. if (ret < 0) {
  218. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  219. __func__, clk_id);
  220. goto err_clk;
  221. }
  222. /*
  223. * Temp SW workaround to address a glitch issue of
  224. * VA GFMux instance responsible for switching from
  225. * TX MCLK to VA MCLK. This configuration would be taken
  226. * care in DSP itself
  227. */
  228. if (clk_id != VA_CORE_CLK) {
  229. if (priv->dev_up_gfmux) {
  230. iowrite32(0x1, clk_muxsel);
  231. muxsel = ioread32(clk_muxsel);
  232. }
  233. lpass_cdc_clk_rsc_mux0_clk_request(priv, default_clk_id,
  234. false);
  235. }
  236. }
  237. priv->clk_cnt[clk_id]++;
  238. } else {
  239. if (priv->clk_cnt[clk_id] <= 0) {
  240. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  241. __func__, clk_id);
  242. priv->clk_cnt[clk_id] = 0;
  243. goto done;
  244. }
  245. priv->clk_cnt[clk_id]--;
  246. if (priv->clk_cnt[clk_id] == 0) {
  247. /*
  248. * Temp SW workaround to address a glitch issue
  249. * of VA GFMux instance responsible for
  250. * switching from TX MCLK to VA MCLK.
  251. * This configuration would be taken
  252. * care in DSP itself.
  253. */
  254. if (clk_id != VA_CORE_CLK) {
  255. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  256. default_clk_id, true);
  257. if (!ret && priv->dev_up_gfmux) {
  258. iowrite32(0x0, clk_muxsel);
  259. muxsel = ioread32(clk_muxsel);
  260. }
  261. }
  262. clk_disable_unprepare(priv->clk[clk_id]);
  263. if (clk_id != VA_CORE_CLK && !ret)
  264. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  265. default_clk_id, false);
  266. }
  267. }
  268. return ret;
  269. err_clk:
  270. if (clk_id != VA_CORE_CLK)
  271. lpass_cdc_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  272. done:
  273. return ret;
  274. }
  275. static int lpass_cdc_clk_rsc_check_and_update_va_clk(struct lpass_cdc_clk_rsc *priv,
  276. bool mux_switch,
  277. int clk_id,
  278. bool enable)
  279. {
  280. int ret = 0;
  281. if (enable) {
  282. if (clk_id == VA_CORE_CLK && mux_switch) {
  283. /*
  284. * Handle the following usecase scenarios during enable
  285. * 1. VA only, Active clk is VA_CORE_CLK
  286. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  287. */
  288. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  289. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv,
  290. VA_CORE_CLK, enable);
  291. if (ret < 0)
  292. goto err;
  293. } else {
  294. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  295. TX_CORE_CLK, enable);
  296. if (ret < 0)
  297. goto err;
  298. priv->va_tx_clk_cnt++;
  299. }
  300. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  301. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  302. /*
  303. * Handle following concurrency scenario during enable
  304. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  305. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  306. */
  307. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  308. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  309. TX_CORE_CLK, true);
  310. if (ret < 0)
  311. goto err;
  312. lpass_cdc_clk_rsc_mux1_clk_request(priv,
  313. VA_CORE_CLK, false);
  314. priv->va_tx_clk_cnt++;
  315. }
  316. }
  317. } else {
  318. if (clk_id == VA_CORE_CLK && mux_switch) {
  319. /*
  320. * Handle the following usecase scenarios during disable
  321. * 1. VA only, disable VA_CORE_CLK
  322. * 2. Record + VA -> Record, decrement TX CLK count
  323. */
  324. if (priv->clk_cnt[VA_CORE_CLK]) {
  325. lpass_cdc_clk_rsc_mux1_clk_request(priv,
  326. VA_CORE_CLK, enable);
  327. } else if (priv->va_tx_clk_cnt) {
  328. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  329. TX_CORE_CLK, enable);
  330. priv->va_tx_clk_cnt--;
  331. }
  332. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  333. /*
  334. * Handle the following usecase scenarios during disable
  335. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  336. */
  337. while (priv->va_tx_clk_cnt) {
  338. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv,
  339. VA_CORE_CLK, true);
  340. if (ret < 0)
  341. goto err;
  342. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  343. TX_CORE_CLK, false);
  344. priv->va_tx_clk_cnt--;
  345. }
  346. }
  347. }
  348. err:
  349. return ret;
  350. }
  351. /**
  352. * lpass_cdc_clk_rsc_fs_gen_request - request to enable/disable fs generation
  353. * sequence
  354. *
  355. * @dev: Macro device pointer
  356. * @enable: enable or disable flag
  357. */
  358. void lpass_cdc_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  359. {
  360. int i;
  361. struct regmap *regmap;
  362. struct device *clk_dev = NULL;
  363. struct lpass_cdc_clk_rsc *priv = NULL;
  364. if (!dev) {
  365. pr_err("%s: dev is null\n", __func__);
  366. return;
  367. }
  368. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  369. if (!clk_dev) {
  370. pr_err("%s: Invalid rsc clk device\n", __func__);
  371. return;
  372. }
  373. priv = dev_get_drvdata(clk_dev);
  374. if (!priv) {
  375. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  376. return;
  377. }
  378. regmap = dev_get_regmap(priv->dev->parent, NULL);
  379. if (!regmap) {
  380. pr_err("%s: regmap is null\n", __func__);
  381. return;
  382. }
  383. mutex_lock(&priv->fs_gen_lock);
  384. if (enable) {
  385. if (priv->reg_seq_en_cnt++ == 0) {
  386. for (i = 0; i < (priv->num_fs_reg * 3); i += 3) {
  387. dev_dbg(priv->dev, "%s: Register: %d, mask: %d, value: %d\n",
  388. __func__, priv->fs_gen_seq[i],
  389. priv->fs_gen_seq[i + 1],
  390. priv->fs_gen_seq[i + 2]);
  391. regmap_update_bits(regmap,
  392. priv->fs_gen_seq[i],
  393. priv->fs_gen_seq[i + 1],
  394. priv->fs_gen_seq[i + 2]);
  395. }
  396. }
  397. } else {
  398. if (priv->reg_seq_en_cnt <= 0) {
  399. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  400. __func__, priv->reg_seq_en_cnt);
  401. priv->reg_seq_en_cnt = 0;
  402. mutex_unlock(&priv->fs_gen_lock);
  403. return;
  404. }
  405. if (--priv->reg_seq_en_cnt == 0) {
  406. for (i = ((priv->num_fs_reg - 1) * 3); i >= 0; i -= 3) {
  407. dev_dbg(priv->dev, "%s: Register: %d, mask: %d\n",
  408. __func__, priv->fs_gen_seq[i],
  409. priv->fs_gen_seq[i + 1]);
  410. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  411. priv->fs_gen_seq[i + 1], 0x0);
  412. }
  413. }
  414. }
  415. mutex_unlock(&priv->fs_gen_lock);
  416. }
  417. EXPORT_SYMBOL(lpass_cdc_clk_rsc_fs_gen_request);
  418. /**
  419. * lpass_cdc_clk_rsc_request_clock - request for clock to
  420. * enable/disable
  421. *
  422. * @dev: Macro device pointer.
  423. * @default_clk_id: mux0 Core clock ID input.
  424. * @clk_id_req: Core clock ID requested to enable/disable
  425. * @enable: enable or disable clock flag
  426. *
  427. * Returns 0 on success or -EINVAL on error.
  428. */
  429. int lpass_cdc_clk_rsc_request_clock(struct device *dev,
  430. int default_clk_id,
  431. int clk_id_req,
  432. bool enable)
  433. {
  434. int ret = 0;
  435. struct device *clk_dev = NULL;
  436. struct lpass_cdc_clk_rsc *priv = NULL;
  437. bool mux_switch = false;
  438. if (!dev) {
  439. pr_err("%s: dev is null\n", __func__);
  440. return -EINVAL;
  441. }
  442. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  443. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  444. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  445. __func__, clk_id_req, default_clk_id);
  446. return -EINVAL;
  447. }
  448. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  449. if (!clk_dev) {
  450. pr_err("%s: Invalid rsc clk device\n", __func__);
  451. return -EINVAL;
  452. }
  453. priv = dev_get_drvdata(clk_dev);
  454. if (!priv) {
  455. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  456. return -EINVAL;
  457. }
  458. mutex_lock(&priv->rsc_clk_lock);
  459. if (!priv->dev_up && enable) {
  460. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  461. __func__);
  462. ret = -EINVAL;
  463. goto err;
  464. }
  465. priv->default_clk_id[clk_id_req] = default_clk_id;
  466. if (default_clk_id != clk_id_req)
  467. mux_switch = true;
  468. if (mux_switch) {
  469. if (clk_id_req != VA_CORE_CLK) {
  470. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv, clk_id_req,
  471. enable);
  472. if (ret < 0)
  473. goto err;
  474. }
  475. } else {
  476. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  477. if (ret < 0)
  478. goto err;
  479. }
  480. ret = lpass_cdc_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  481. clk_id_req,
  482. enable);
  483. if (ret < 0)
  484. goto err;
  485. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  486. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  487. enable);
  488. mutex_unlock(&priv->rsc_clk_lock);
  489. return 0;
  490. err:
  491. mutex_unlock(&priv->rsc_clk_lock);
  492. return ret;
  493. }
  494. EXPORT_SYMBOL(lpass_cdc_clk_rsc_request_clock);
  495. static int lpass_cdc_clk_rsc_probe(struct platform_device *pdev)
  496. {
  497. int ret = 0, fs_gen_size, i, j;
  498. const char **clk_name_array;
  499. int clk_cnt;
  500. struct clk *clk;
  501. struct lpass_cdc_clk_rsc *priv = NULL;
  502. u32 muxsel = 0;
  503. priv = devm_kzalloc(&pdev->dev, sizeof(struct lpass_cdc_clk_rsc),
  504. GFP_KERNEL);
  505. if (!priv)
  506. return -ENOMEM;
  507. /* Get clk fs gen sequence from device tree */
  508. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  509. &fs_gen_size)) {
  510. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  511. __func__);
  512. ret = -EINVAL;
  513. goto err;
  514. }
  515. priv->num_fs_reg = fs_gen_size/(3 * sizeof(u32));
  516. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  517. if (!priv->fs_gen_seq) {
  518. ret = -ENOMEM;
  519. goto err;
  520. }
  521. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  522. /* Parse fs-gen-sequence */
  523. ret = of_property_read_u32_array(pdev->dev.of_node,
  524. "qcom,fs-gen-sequence",
  525. priv->fs_gen_seq,
  526. priv->num_fs_reg * 3);
  527. if (ret < 0) {
  528. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  529. __func__, ret);
  530. goto err;
  531. }
  532. /* Get clk details from device tree */
  533. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  534. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  535. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  536. __func__, clk_cnt);
  537. ret = -EINVAL;
  538. goto err;
  539. }
  540. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  541. GFP_KERNEL);
  542. if (!clk_name_array) {
  543. ret = -ENOMEM;
  544. goto err;
  545. }
  546. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  547. clk_name_array, clk_cnt);
  548. for (i = 0; i < MAX_CLK; i++) {
  549. priv->clk[i] = NULL;
  550. for (j = 0; j < clk_cnt; j++) {
  551. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  552. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  553. if (IS_ERR(clk)) {
  554. ret = PTR_ERR(clk);
  555. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  556. __func__, clk_src_name[i], ret);
  557. goto err;
  558. }
  559. priv->clk[i] = clk;
  560. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  561. __func__, clk_src_name[i]);
  562. break;
  563. }
  564. }
  565. }
  566. ret = of_property_read_u32(pdev->dev.of_node,
  567. "qcom,rx_mclk_mode_muxsel", &muxsel);
  568. if (ret) {
  569. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  570. __func__);
  571. } else {
  572. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  573. if (!priv->rx_clk_muxsel) {
  574. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  575. __func__);
  576. return -ENOMEM;
  577. }
  578. }
  579. ret = of_property_read_u32(pdev->dev.of_node,
  580. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  581. if (ret) {
  582. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  583. __func__);
  584. } else {
  585. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  586. if (!priv->wsa_clk_muxsel) {
  587. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  588. __func__);
  589. return -ENOMEM;
  590. }
  591. }
  592. ret = of_property_read_u32(pdev->dev.of_node,
  593. "qcom,va_mclk_mode_muxsel", &muxsel);
  594. if (ret) {
  595. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  596. __func__);
  597. } else {
  598. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  599. if (!priv->va_clk_muxsel) {
  600. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  601. __func__);
  602. return -ENOMEM;
  603. }
  604. }
  605. ret = lpass_cdc_register_res_clk(&pdev->dev, lpass_cdc_clk_rsc_cb);
  606. if (ret < 0) {
  607. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  608. __func__, ret);
  609. goto err;
  610. }
  611. priv->dev = &pdev->dev;
  612. priv->dev_up = true;
  613. priv->dev_up_gfmux = true;
  614. mutex_init(&priv->rsc_clk_lock);
  615. mutex_init(&priv->fs_gen_lock);
  616. dev_set_drvdata(&pdev->dev, priv);
  617. err:
  618. return ret;
  619. }
  620. static int lpass_cdc_clk_rsc_remove(struct platform_device *pdev)
  621. {
  622. struct lpass_cdc_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  623. lpass_cdc_unregister_res_clk(&pdev->dev);
  624. of_platform_depopulate(&pdev->dev);
  625. if (!priv)
  626. return -EINVAL;
  627. mutex_destroy(&priv->rsc_clk_lock);
  628. mutex_destroy(&priv->fs_gen_lock);
  629. return 0;
  630. }
  631. static const struct of_device_id lpass_cdc_clk_rsc_dt_match[] = {
  632. {.compatible = "qcom,lpass-cdc-clk-rsc-mngr"},
  633. {}
  634. };
  635. MODULE_DEVICE_TABLE(of, lpass_cdc_clk_rsc_dt_match);
  636. static struct platform_driver lpass_cdc_clk_rsc_mgr = {
  637. .driver = {
  638. .name = "lpass-cdc-clk-rsc-mngr",
  639. .owner = THIS_MODULE,
  640. .of_match_table = lpass_cdc_clk_rsc_dt_match,
  641. .suppress_bind_attrs = true,
  642. },
  643. .probe = lpass_cdc_clk_rsc_probe,
  644. .remove = lpass_cdc_clk_rsc_remove,
  645. };
  646. int lpass_cdc_clk_rsc_mgr_init(void)
  647. {
  648. return platform_driver_register(&lpass_cdc_clk_rsc_mgr);
  649. }
  650. void lpass_cdc_clk_rsc_mgr_exit(void)
  651. {
  652. platform_driver_unregister(&lpass_cdc_clk_rsc_mgr);
  653. }
  654. MODULE_DESCRIPTION("LPASS codec clock resource manager driver");
  655. MODULE_LICENSE("GPL v2");