lpass-cdc-clk-rsc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of_platform.h>
  7. #include <linux/module.h>
  8. #include <linux/io.h>
  9. #include <linux/init.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/kernel.h>
  12. #include <linux/clk.h>
  13. #include <linux/clk-provider.h>
  14. #include "lpass-cdc.h"
  15. #include "lpass-cdc-clk-rsc.h"
  16. #define DRV_NAME "lpass-cdc-clk-rsc"
  17. #define LPASS_CDC_CLK_NAME_LENGTH 30
  18. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  19. static char clk_src_name[MAX_CLK][LPASS_CDC_CLK_NAME_LENGTH] = {
  20. "tx_core_clk",
  21. "rx_core_clk",
  22. "wsa_core_clk",
  23. "va_core_clk",
  24. "wsa2_core_clk",
  25. "rx_tx_core_clk",
  26. "wsa_tx_core_clk",
  27. "wsa2_tx_core_clk",
  28. "tx_npl_clk",
  29. "rx_npl_clk",
  30. "wsa_npl_clk",
  31. "va_npl_clk",
  32. };
  33. struct lpass_cdc_clk_rsc {
  34. struct device *dev;
  35. struct mutex rsc_clk_lock;
  36. struct mutex fs_gen_lock;
  37. struct clk *clk[MAX_CLK];
  38. int clk_cnt[MAX_CLK];
  39. int reg_seq_en_cnt;
  40. int va_tx_clk_cnt;
  41. bool dev_up;
  42. bool dev_up_gfmux;
  43. u32 num_fs_reg;
  44. u32 *fs_gen_seq;
  45. int default_clk_id[MAX_CLK];
  46. struct regmap *regmap;
  47. char __iomem *rx_clk_muxsel;
  48. char __iomem *wsa_clk_muxsel;
  49. char __iomem *va_clk_muxsel;
  50. };
  51. static int lpass_cdc_clk_rsc_cb(struct device *dev, u16 event)
  52. {
  53. struct lpass_cdc_clk_rsc *priv;
  54. if (!dev) {
  55. pr_err("%s: Invalid device pointer\n",
  56. __func__);
  57. return -EINVAL;
  58. }
  59. priv = dev_get_drvdata(dev);
  60. if (!priv) {
  61. pr_err("%s: Invalid clk rsc priviate data\n",
  62. __func__);
  63. return -EINVAL;
  64. }
  65. mutex_lock(&priv->rsc_clk_lock);
  66. if (event == LPASS_CDC_MACRO_EVT_SSR_UP) {
  67. priv->dev_up = true;
  68. } else if (event == LPASS_CDC_MACRO_EVT_SSR_DOWN) {
  69. priv->dev_up = false;
  70. priv->dev_up_gfmux = false;
  71. } else if (event == LPASS_CDC_MACRO_EVT_SSR_GFMUX_UP) {
  72. priv->dev_up_gfmux = true;
  73. }
  74. mutex_unlock(&priv->rsc_clk_lock);
  75. return 0;
  76. }
  77. static char __iomem *lpass_cdc_clk_rsc_get_clk_muxsel(struct lpass_cdc_clk_rsc *priv,
  78. int clk_id)
  79. {
  80. switch (clk_id) {
  81. case RX_CORE_CLK:
  82. return priv->rx_clk_muxsel;
  83. case WSA_CORE_CLK:
  84. case WSA2_CORE_CLK:
  85. return priv->wsa_clk_muxsel;
  86. case VA_CORE_CLK:
  87. return priv->va_clk_muxsel;
  88. case TX_CORE_CLK:
  89. case RX_TX_CORE_CLK:
  90. case WSA_TX_CORE_CLK:
  91. case WSA2_TX_CORE_CLK:
  92. default:
  93. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  94. break;
  95. }
  96. return NULL;
  97. }
  98. int lpass_cdc_rsc_clk_reset(struct device *dev, int clk_id)
  99. {
  100. struct device *clk_dev = NULL;
  101. struct lpass_cdc_clk_rsc *priv = NULL;
  102. int count = 0;
  103. if (!dev) {
  104. pr_err("%s: dev is null\n", __func__);
  105. return -EINVAL;
  106. }
  107. #ifdef CONFIG_BOLERO_VER_2P1
  108. if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
  109. #else
  110. if (clk_id < 0 || clk_id >= MAX_CLK) {
  111. #endif
  112. pr_err("%s: Invalid clk_id: %d\n",
  113. __func__, clk_id);
  114. return -EINVAL;
  115. }
  116. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  117. if (!clk_dev) {
  118. pr_err("%s: Invalid rsc clk device\n", __func__);
  119. return -EINVAL;
  120. }
  121. priv = dev_get_drvdata(clk_dev);
  122. if (!priv) {
  123. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  124. return -EINVAL;
  125. }
  126. mutex_lock(&priv->rsc_clk_lock);
  127. while (__clk_is_enabled(priv->clk[clk_id])) {
  128. #ifdef CONFIG_BOLERO_VER_2P1
  129. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  130. #endif
  131. clk_disable_unprepare(priv->clk[clk_id]);
  132. count++;
  133. }
  134. dev_dbg(priv->dev,
  135. "%s: clock reset after ssr, count %d\n", __func__, count);
  136. while (count--) {
  137. clk_prepare_enable(priv->clk[clk_id]);
  138. #ifdef CONFIG_BOLERO_VER_2P1
  139. clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
  140. #endif
  141. }
  142. mutex_unlock(&priv->rsc_clk_lock);
  143. return 0;
  144. }
  145. EXPORT_SYMBOL(lpass_cdc_rsc_clk_reset);
  146. void lpass_cdc_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  147. {
  148. struct device *clk_dev = NULL;
  149. struct lpass_cdc_clk_rsc *priv = NULL;
  150. int i = 0;
  151. if (!dev) {
  152. pr_err("%s: dev is null\n", __func__);
  153. return;
  154. }
  155. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  156. if (!clk_dev) {
  157. pr_err("%s: Invalid rsc clk device\n", __func__);
  158. return;
  159. }
  160. priv = dev_get_drvdata(clk_dev);
  161. if (!priv) {
  162. pr_err("%s: Invalid rsc clk private data\n", __func__);
  163. return;
  164. }
  165. mutex_lock(&priv->rsc_clk_lock);
  166. #ifdef CONFIG_BOLERO_VER_2P1
  167. for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
  168. #else
  169. for (i = 0; i < MAX_CLK; i++) {
  170. #endif
  171. if (enable) {
  172. if (priv->clk[i])
  173. clk_prepare_enable(priv->clk[i]);
  174. #ifdef CONFIG_BOLERO_VER_2P1
  175. if (priv->clk[i + NPL_CLK_OFFSET])
  176. clk_prepare_enable(
  177. priv->clk[i + NPL_CLK_OFFSET]);
  178. #endif
  179. } else {
  180. #ifdef CONFIG_BOLERO_VER_2P1
  181. if (priv->clk[i + NPL_CLK_OFFSET] &&
  182. __clk_is_enabled(priv->clk[i + NPL_CLK_OFFSET]))
  183. clk_disable_unprepare(
  184. priv->clk[i + NPL_CLK_OFFSET]);
  185. #endif
  186. if (priv->clk[i] && __clk_is_enabled(priv->clk[i]))
  187. clk_disable_unprepare(priv->clk[i]);
  188. }
  189. }
  190. mutex_unlock(&priv->rsc_clk_lock);
  191. return;
  192. }
  193. EXPORT_SYMBOL(lpass_cdc_clk_rsc_enable_all_clocks);
  194. static int lpass_cdc_clk_rsc_mux0_clk_request(struct lpass_cdc_clk_rsc *priv,
  195. int clk_id,
  196. bool enable)
  197. {
  198. int ret = 0;
  199. if (enable) {
  200. /* Enable Requested Core clk */
  201. if (priv->clk_cnt[clk_id] == 0) {
  202. ret = clk_prepare_enable(priv->clk[clk_id]);
  203. if (ret < 0) {
  204. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  205. __func__, clk_id);
  206. goto done;
  207. }
  208. #ifdef CONFIG_BOLERO_VER_2P1
  209. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  210. ret = clk_prepare_enable(
  211. priv->clk[clk_id + NPL_CLK_OFFSET]);
  212. if (ret < 0) {
  213. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  214. __func__, clk_id + NPL_CLK_OFFSET);
  215. goto err;
  216. }
  217. }
  218. #endif
  219. }
  220. priv->clk_cnt[clk_id]++;
  221. } else {
  222. if (priv->clk_cnt[clk_id] <= 0) {
  223. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  224. __func__, clk_id);
  225. priv->clk_cnt[clk_id] = 0;
  226. goto done;
  227. }
  228. priv->clk_cnt[clk_id]--;
  229. if (priv->clk_cnt[clk_id] == 0) {
  230. #ifdef CONFIG_BOLERO_VER_2P1
  231. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  232. clk_disable_unprepare(
  233. priv->clk[clk_id + NPL_CLK_OFFSET]);
  234. #endif
  235. clk_disable_unprepare(priv->clk[clk_id]);
  236. }
  237. }
  238. return ret;
  239. #ifdef CONFIG_BOLERO_VER_2P1
  240. err:
  241. clk_disable_unprepare(priv->clk[clk_id]);
  242. #endif
  243. done:
  244. return ret;
  245. }
  246. static int lpass_cdc_clk_rsc_mux1_clk_request(struct lpass_cdc_clk_rsc *priv,
  247. int clk_id,
  248. bool enable)
  249. {
  250. char __iomem *clk_muxsel = NULL;
  251. int ret = 0;
  252. int default_clk_id = priv->default_clk_id[clk_id];
  253. u32 muxsel = 0;
  254. clk_muxsel = lpass_cdc_clk_rsc_get_clk_muxsel(priv, clk_id);
  255. if (!clk_muxsel) {
  256. ret = -EINVAL;
  257. goto done;
  258. }
  259. if (enable) {
  260. if (priv->clk_cnt[clk_id] == 0) {
  261. if (clk_id != VA_CORE_CLK) {
  262. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  263. default_clk_id,
  264. true);
  265. if (ret < 0)
  266. goto done;
  267. }
  268. ret = clk_prepare_enable(priv->clk[clk_id]);
  269. if (ret < 0) {
  270. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  271. __func__, clk_id);
  272. goto err_clk;
  273. }
  274. #ifdef CONFIG_BOLERO_VER_2P1
  275. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  276. ret = clk_prepare_enable(
  277. priv->clk[clk_id + NPL_CLK_OFFSET]);
  278. if (ret < 0) {
  279. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  280. __func__, clk_id + NPL_CLK_OFFSET);
  281. goto err_npl_clk;
  282. }
  283. }
  284. #endif
  285. /*
  286. * Temp SW workaround to address a glitch issue of
  287. * VA GFMux instance responsible for switching from
  288. * TX MCLK to VA MCLK. This configuration would be taken
  289. * care in DSP itself
  290. */
  291. if (clk_id != VA_CORE_CLK) {
  292. if (priv->dev_up_gfmux) {
  293. iowrite32(0x1, clk_muxsel);
  294. muxsel = ioread32(clk_muxsel);
  295. }
  296. lpass_cdc_clk_rsc_mux0_clk_request(priv, default_clk_id,
  297. false);
  298. }
  299. }
  300. priv->clk_cnt[clk_id]++;
  301. } else {
  302. if (priv->clk_cnt[clk_id] <= 0) {
  303. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  304. __func__, clk_id);
  305. priv->clk_cnt[clk_id] = 0;
  306. goto done;
  307. }
  308. priv->clk_cnt[clk_id]--;
  309. if (priv->clk_cnt[clk_id] == 0) {
  310. /*
  311. * Temp SW workaround to address a glitch issue
  312. * of VA GFMux instance responsible for
  313. * switching from TX MCLK to VA MCLK.
  314. * This configuration would be taken
  315. * care in DSP itself.
  316. */
  317. if (clk_id != VA_CORE_CLK) {
  318. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  319. default_clk_id, true);
  320. if (!ret && priv->dev_up_gfmux) {
  321. iowrite32(0x0, clk_muxsel);
  322. muxsel = ioread32(clk_muxsel);
  323. }
  324. }
  325. #ifdef CONFIG_BOLERO_VER_2P1
  326. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  327. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  328. #endif
  329. clk_disable_unprepare(priv->clk[clk_id]);
  330. if (clk_id != VA_CORE_CLK && !ret)
  331. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  332. default_clk_id, false);
  333. }
  334. }
  335. return ret;
  336. #ifdef CONFIG_BOLERO_VER_2P1
  337. err_npl_clk:
  338. clk_disable_unprepare(priv->clk[clk_id]);
  339. #endif
  340. err_clk:
  341. if (clk_id != VA_CORE_CLK)
  342. lpass_cdc_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  343. done:
  344. return ret;
  345. }
  346. static int lpass_cdc_clk_rsc_check_and_update_va_clk(struct lpass_cdc_clk_rsc *priv,
  347. bool mux_switch,
  348. int clk_id,
  349. bool enable)
  350. {
  351. int ret = 0;
  352. if (enable) {
  353. if (clk_id == VA_CORE_CLK && mux_switch) {
  354. /*
  355. * Handle the following usecase scenarios during enable
  356. * 1. VA only, Active clk is VA_CORE_CLK
  357. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  358. */
  359. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  360. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv,
  361. VA_CORE_CLK, enable);
  362. if (ret < 0)
  363. goto err;
  364. } else {
  365. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  366. TX_CORE_CLK, enable);
  367. if (ret < 0)
  368. goto err;
  369. priv->va_tx_clk_cnt++;
  370. }
  371. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  372. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  373. /*
  374. * Handle following concurrency scenario during enable
  375. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  376. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  377. */
  378. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  379. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  380. TX_CORE_CLK, true);
  381. if (ret < 0)
  382. goto err;
  383. lpass_cdc_clk_rsc_mux1_clk_request(priv,
  384. VA_CORE_CLK, false);
  385. priv->va_tx_clk_cnt++;
  386. }
  387. }
  388. } else {
  389. if (clk_id == VA_CORE_CLK && mux_switch) {
  390. /*
  391. * Handle the following usecase scenarios during disable
  392. * 1. VA only, disable VA_CORE_CLK
  393. * 2. Record + VA -> Record, decrement TX CLK count
  394. */
  395. if (priv->clk_cnt[VA_CORE_CLK]) {
  396. lpass_cdc_clk_rsc_mux1_clk_request(priv,
  397. VA_CORE_CLK, enable);
  398. } else if (priv->va_tx_clk_cnt) {
  399. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  400. TX_CORE_CLK, enable);
  401. priv->va_tx_clk_cnt--;
  402. }
  403. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  404. /*
  405. * Handle the following usecase scenarios during disable
  406. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  407. */
  408. while (priv->va_tx_clk_cnt) {
  409. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv,
  410. VA_CORE_CLK, true);
  411. if (ret < 0)
  412. goto err;
  413. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  414. TX_CORE_CLK, false);
  415. priv->va_tx_clk_cnt--;
  416. }
  417. }
  418. }
  419. err:
  420. return ret;
  421. }
  422. /**
  423. * lpass_cdc_clk_rsc_fs_gen_request - request to enable/disable fs generation
  424. * sequence
  425. *
  426. * @dev: Macro device pointer
  427. * @enable: enable or disable flag
  428. */
  429. void lpass_cdc_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  430. {
  431. int i;
  432. struct regmap *regmap;
  433. struct device *clk_dev = NULL;
  434. struct lpass_cdc_clk_rsc *priv = NULL;
  435. if (!dev) {
  436. pr_err("%s: dev is null\n", __func__);
  437. return;
  438. }
  439. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  440. if (!clk_dev) {
  441. pr_err("%s: Invalid rsc clk device\n", __func__);
  442. return;
  443. }
  444. priv = dev_get_drvdata(clk_dev);
  445. if (!priv) {
  446. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  447. return;
  448. }
  449. regmap = dev_get_regmap(priv->dev->parent, NULL);
  450. if (!regmap) {
  451. pr_err("%s: regmap is null\n", __func__);
  452. return;
  453. }
  454. mutex_lock(&priv->fs_gen_lock);
  455. if (enable) {
  456. if (priv->reg_seq_en_cnt++ == 0) {
  457. for (i = 0; i < (priv->num_fs_reg * 3); i += 3) {
  458. dev_dbg(priv->dev, "%s: Register: %d, mask: %d, value: %d\n",
  459. __func__, priv->fs_gen_seq[i],
  460. priv->fs_gen_seq[i + 1],
  461. priv->fs_gen_seq[i + 2]);
  462. regmap_update_bits(regmap,
  463. priv->fs_gen_seq[i],
  464. priv->fs_gen_seq[i + 1],
  465. priv->fs_gen_seq[i + 2]);
  466. }
  467. }
  468. } else {
  469. if (priv->reg_seq_en_cnt <= 0) {
  470. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  471. __func__, priv->reg_seq_en_cnt);
  472. priv->reg_seq_en_cnt = 0;
  473. mutex_unlock(&priv->fs_gen_lock);
  474. return;
  475. }
  476. if (--priv->reg_seq_en_cnt == 0) {
  477. for (i = ((priv->num_fs_reg - 1) * 3); i >= 0; i -= 3) {
  478. dev_dbg(priv->dev, "%s: Register: %d, mask: %d\n",
  479. __func__, priv->fs_gen_seq[i],
  480. priv->fs_gen_seq[i + 1]);
  481. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  482. priv->fs_gen_seq[i + 1], 0x0);
  483. }
  484. }
  485. }
  486. mutex_unlock(&priv->fs_gen_lock);
  487. }
  488. EXPORT_SYMBOL(lpass_cdc_clk_rsc_fs_gen_request);
  489. /**
  490. * lpass_cdc_clk_rsc_request_clock - request for clock to
  491. * enable/disable
  492. *
  493. * @dev: Macro device pointer.
  494. * @default_clk_id: mux0 Core clock ID input.
  495. * @clk_id_req: Core clock ID requested to enable/disable
  496. * @enable: enable or disable clock flag
  497. *
  498. * Returns 0 on success or -EINVAL on error.
  499. */
  500. int lpass_cdc_clk_rsc_request_clock(struct device *dev,
  501. int default_clk_id,
  502. int clk_id_req,
  503. bool enable)
  504. {
  505. int ret = 0;
  506. struct device *clk_dev = NULL;
  507. struct lpass_cdc_clk_rsc *priv = NULL;
  508. bool mux_switch = false;
  509. if (!dev) {
  510. pr_err("%s: dev is null\n", __func__);
  511. return -EINVAL;
  512. }
  513. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  514. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  515. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  516. __func__, clk_id_req, default_clk_id);
  517. return -EINVAL;
  518. }
  519. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  520. if (!clk_dev) {
  521. pr_err("%s: Invalid rsc clk device\n", __func__);
  522. return -EINVAL;
  523. }
  524. priv = dev_get_drvdata(clk_dev);
  525. if (!priv) {
  526. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  527. return -EINVAL;
  528. }
  529. mutex_lock(&priv->rsc_clk_lock);
  530. if (!priv->dev_up && enable) {
  531. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  532. __func__);
  533. ret = -EINVAL;
  534. goto err;
  535. }
  536. priv->default_clk_id[clk_id_req] = default_clk_id;
  537. if (default_clk_id != clk_id_req)
  538. mux_switch = true;
  539. if (mux_switch) {
  540. if (clk_id_req != VA_CORE_CLK) {
  541. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv, clk_id_req,
  542. enable);
  543. if (ret < 0)
  544. goto err;
  545. }
  546. } else {
  547. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  548. if (ret < 0)
  549. goto err;
  550. }
  551. ret = lpass_cdc_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  552. clk_id_req,
  553. enable);
  554. if (ret < 0)
  555. goto err;
  556. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  557. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  558. enable);
  559. mutex_unlock(&priv->rsc_clk_lock);
  560. return 0;
  561. err:
  562. mutex_unlock(&priv->rsc_clk_lock);
  563. return ret;
  564. }
  565. EXPORT_SYMBOL(lpass_cdc_clk_rsc_request_clock);
  566. static int lpass_cdc_clk_rsc_probe(struct platform_device *pdev)
  567. {
  568. int ret = 0, fs_gen_size, i, j;
  569. const char **clk_name_array;
  570. int clk_cnt;
  571. struct clk *clk;
  572. struct lpass_cdc_clk_rsc *priv = NULL;
  573. u32 muxsel = 0;
  574. priv = devm_kzalloc(&pdev->dev, sizeof(struct lpass_cdc_clk_rsc),
  575. GFP_KERNEL);
  576. if (!priv)
  577. return -ENOMEM;
  578. /* Get clk fs gen sequence from device tree */
  579. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  580. &fs_gen_size)) {
  581. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  582. __func__);
  583. ret = -EINVAL;
  584. goto err;
  585. }
  586. priv->num_fs_reg = fs_gen_size/(3 * sizeof(u32));
  587. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  588. if (!priv->fs_gen_seq) {
  589. ret = -ENOMEM;
  590. goto err;
  591. }
  592. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  593. /* Parse fs-gen-sequence */
  594. ret = of_property_read_u32_array(pdev->dev.of_node,
  595. "qcom,fs-gen-sequence",
  596. priv->fs_gen_seq,
  597. priv->num_fs_reg * 3);
  598. if (ret < 0) {
  599. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  600. __func__, ret);
  601. goto err;
  602. }
  603. /* Get clk details from device tree */
  604. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  605. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  606. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  607. __func__, clk_cnt);
  608. ret = -EINVAL;
  609. goto err;
  610. }
  611. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  612. GFP_KERNEL);
  613. if (!clk_name_array) {
  614. ret = -ENOMEM;
  615. goto err;
  616. }
  617. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  618. clk_name_array, clk_cnt);
  619. for (i = 0; i < MAX_CLK; i++) {
  620. priv->clk[i] = NULL;
  621. for (j = 0; j < clk_cnt; j++) {
  622. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  623. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  624. if (IS_ERR(clk)) {
  625. ret = PTR_ERR(clk);
  626. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  627. __func__, clk_src_name[i], ret);
  628. goto err;
  629. }
  630. priv->clk[i] = clk;
  631. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  632. __func__, clk_src_name[i]);
  633. break;
  634. }
  635. }
  636. }
  637. ret = of_property_read_u32(pdev->dev.of_node,
  638. "qcom,rx_mclk_mode_muxsel", &muxsel);
  639. if (ret) {
  640. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  641. __func__);
  642. } else {
  643. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  644. if (!priv->rx_clk_muxsel) {
  645. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  646. __func__);
  647. return -ENOMEM;
  648. }
  649. }
  650. ret = of_property_read_u32(pdev->dev.of_node,
  651. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  652. if (ret) {
  653. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  654. __func__);
  655. } else {
  656. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  657. if (!priv->wsa_clk_muxsel) {
  658. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  659. __func__);
  660. return -ENOMEM;
  661. }
  662. }
  663. ret = of_property_read_u32(pdev->dev.of_node,
  664. "qcom,va_mclk_mode_muxsel", &muxsel);
  665. if (ret) {
  666. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  667. __func__);
  668. } else {
  669. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  670. if (!priv->va_clk_muxsel) {
  671. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  672. __func__);
  673. return -ENOMEM;
  674. }
  675. }
  676. ret = lpass_cdc_register_res_clk(&pdev->dev, lpass_cdc_clk_rsc_cb);
  677. if (ret < 0) {
  678. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  679. __func__, ret);
  680. goto err;
  681. }
  682. priv->dev = &pdev->dev;
  683. priv->dev_up = true;
  684. priv->dev_up_gfmux = true;
  685. mutex_init(&priv->rsc_clk_lock);
  686. mutex_init(&priv->fs_gen_lock);
  687. dev_set_drvdata(&pdev->dev, priv);
  688. err:
  689. return ret;
  690. }
  691. static int lpass_cdc_clk_rsc_remove(struct platform_device *pdev)
  692. {
  693. struct lpass_cdc_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  694. lpass_cdc_unregister_res_clk(&pdev->dev);
  695. of_platform_depopulate(&pdev->dev);
  696. if (!priv)
  697. return -EINVAL;
  698. mutex_destroy(&priv->rsc_clk_lock);
  699. mutex_destroy(&priv->fs_gen_lock);
  700. return 0;
  701. }
  702. static const struct of_device_id lpass_cdc_clk_rsc_dt_match[] = {
  703. {.compatible = "qcom,lpass-cdc-clk-rsc-mngr"},
  704. {}
  705. };
  706. MODULE_DEVICE_TABLE(of, lpass_cdc_clk_rsc_dt_match);
  707. static struct platform_driver lpass_cdc_clk_rsc_mgr = {
  708. .driver = {
  709. .name = "lpass-cdc-clk-rsc-mngr",
  710. .owner = THIS_MODULE,
  711. .of_match_table = lpass_cdc_clk_rsc_dt_match,
  712. .suppress_bind_attrs = true,
  713. },
  714. .probe = lpass_cdc_clk_rsc_probe,
  715. .remove = lpass_cdc_clk_rsc_remove,
  716. };
  717. int lpass_cdc_clk_rsc_mgr_init(void)
  718. {
  719. return platform_driver_register(&lpass_cdc_clk_rsc_mgr);
  720. }
  721. void lpass_cdc_clk_rsc_mgr_exit(void)
  722. {
  723. platform_driver_unregister(&lpass_cdc_clk_rsc_mgr);
  724. }
  725. MODULE_DESCRIPTION("LPASS codec clock resource manager driver");
  726. MODULE_LICENSE("GPL v2");