lpass-cdc-clk-rsc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of_platform.h>
  7. #include <linux/module.h>
  8. #include <linux/io.h>
  9. #include <linux/init.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/kernel.h>
  12. #include <linux/clk.h>
  13. #include <linux/clk-provider.h>
  14. #include "lpass-cdc.h"
  15. #include "lpass-cdc-clk-rsc.h"
  16. #define DRV_NAME "lpass-cdc-clk-rsc"
  17. #define LPASS_CDC_CLK_NAME_LENGTH 30
  18. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  19. static char clk_src_name[MAX_CLK][LPASS_CDC_CLK_NAME_LENGTH] = {
  20. "tx_core_clk",
  21. "rx_core_clk",
  22. "wsa_core_clk",
  23. "va_core_clk",
  24. "wsa2_core_clk",
  25. "rx_tx_core_clk",
  26. "wsa_tx_core_clk",
  27. "wsa2_tx_core_clk",
  28. "tx_npl_clk",
  29. "rx_npl_clk",
  30. "wsa_npl_clk",
  31. "va_npl_clk",
  32. };
  33. struct lpass_cdc_clk_rsc {
  34. struct device *dev;
  35. struct mutex rsc_clk_lock;
  36. struct mutex fs_gen_lock;
  37. struct clk *clk[MAX_CLK];
  38. int clk_cnt[MAX_CLK];
  39. int reg_seq_en_cnt;
  40. int va_tx_clk_cnt;
  41. bool dev_up;
  42. bool dev_up_gfmux;
  43. u32 num_fs_reg;
  44. u32 *fs_gen_seq;
  45. int default_clk_id[MAX_CLK];
  46. struct regmap *regmap;
  47. char __iomem *rx_clk_muxsel;
  48. char __iomem *wsa_clk_muxsel;
  49. char __iomem *va_clk_muxsel;
  50. };
  51. static int lpass_cdc_clk_rsc_cb(struct device *dev, u16 event)
  52. {
  53. struct lpass_cdc_clk_rsc *priv;
  54. if (!dev) {
  55. pr_err("%s: Invalid device pointer\n",
  56. __func__);
  57. return -EINVAL;
  58. }
  59. priv = dev_get_drvdata(dev);
  60. if (!priv) {
  61. pr_err("%s: Invalid clk rsc priviate data\n",
  62. __func__);
  63. return -EINVAL;
  64. }
  65. mutex_lock(&priv->rsc_clk_lock);
  66. if (event == LPASS_CDC_MACRO_EVT_SSR_UP) {
  67. priv->dev_up = true;
  68. } else if (event == LPASS_CDC_MACRO_EVT_SSR_DOWN) {
  69. priv->dev_up = false;
  70. priv->dev_up_gfmux = false;
  71. } else if (event == LPASS_CDC_MACRO_EVT_SSR_GFMUX_UP) {
  72. priv->dev_up_gfmux = true;
  73. }
  74. mutex_unlock(&priv->rsc_clk_lock);
  75. return 0;
  76. }
  77. static char __iomem *lpass_cdc_clk_rsc_get_clk_muxsel(struct lpass_cdc_clk_rsc *priv,
  78. int clk_id)
  79. {
  80. switch (clk_id) {
  81. case RX_CORE_CLK:
  82. return priv->rx_clk_muxsel;
  83. case WSA_CORE_CLK:
  84. case WSA2_CORE_CLK:
  85. return priv->wsa_clk_muxsel;
  86. case VA_CORE_CLK:
  87. return priv->va_clk_muxsel;
  88. case TX_CORE_CLK:
  89. case RX_TX_CORE_CLK:
  90. case WSA_TX_CORE_CLK:
  91. case WSA2_TX_CORE_CLK:
  92. default:
  93. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  94. break;
  95. }
  96. return NULL;
  97. }
  98. int lpass_cdc_rsc_clk_reset(struct device *dev, int clk_id)
  99. {
  100. struct device *clk_dev = NULL;
  101. struct lpass_cdc_clk_rsc *priv = NULL;
  102. int count = 0;
  103. if (!dev) {
  104. pr_err("%s: dev is null\n", __func__);
  105. return -EINVAL;
  106. }
  107. if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
  108. pr_err("%s: Invalid clk_id: %d\n",
  109. __func__, clk_id);
  110. return -EINVAL;
  111. }
  112. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  113. if (!clk_dev) {
  114. pr_err("%s: Invalid rsc clk device\n", __func__);
  115. return -EINVAL;
  116. }
  117. priv = dev_get_drvdata(clk_dev);
  118. if (!priv) {
  119. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  120. return -EINVAL;
  121. }
  122. mutex_lock(&priv->rsc_clk_lock);
  123. while (__clk_is_enabled(priv->clk[clk_id])) {
  124. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  125. clk_disable_unprepare(priv->clk[clk_id]);
  126. count++;
  127. }
  128. dev_dbg(priv->dev,
  129. "%s: clock reset after ssr, count %d\n", __func__, count);
  130. while (count--) {
  131. clk_prepare_enable(priv->clk[clk_id]);
  132. clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
  133. }
  134. mutex_unlock(&priv->rsc_clk_lock);
  135. return 0;
  136. }
  137. EXPORT_SYMBOL(lpass_cdc_rsc_clk_reset);
  138. void lpass_cdc_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  139. {
  140. struct device *clk_dev = NULL;
  141. struct lpass_cdc_clk_rsc *priv = NULL;
  142. int i = 0;
  143. if (!dev) {
  144. pr_err("%s: dev is null\n", __func__);
  145. return;
  146. }
  147. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  148. if (!clk_dev) {
  149. pr_err("%s: Invalid rsc clk device\n", __func__);
  150. return;
  151. }
  152. priv = dev_get_drvdata(clk_dev);
  153. if (!priv) {
  154. pr_err("%s: Invalid rsc clk private data\n", __func__);
  155. return;
  156. }
  157. mutex_lock(&priv->rsc_clk_lock);
  158. for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
  159. if (enable) {
  160. if (priv->clk[i])
  161. clk_prepare_enable(priv->clk[i]);
  162. if (priv->clk[i + NPL_CLK_OFFSET])
  163. clk_prepare_enable(
  164. priv->clk[i + NPL_CLK_OFFSET]);
  165. } else {
  166. if (priv->clk[i + NPL_CLK_OFFSET])
  167. clk_disable_unprepare(
  168. priv->clk[i + NPL_CLK_OFFSET]);
  169. if (priv->clk[i])
  170. clk_disable_unprepare(priv->clk[i]);
  171. }
  172. }
  173. mutex_unlock(&priv->rsc_clk_lock);
  174. return;
  175. }
  176. EXPORT_SYMBOL(lpass_cdc_clk_rsc_enable_all_clocks);
  177. static int lpass_cdc_clk_rsc_mux0_clk_request(struct lpass_cdc_clk_rsc *priv,
  178. int clk_id,
  179. bool enable)
  180. {
  181. int ret = 0;
  182. if (enable) {
  183. /* Enable Requested Core clk */
  184. if (priv->clk_cnt[clk_id] == 0) {
  185. ret = clk_prepare_enable(priv->clk[clk_id]);
  186. if (ret < 0) {
  187. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  188. __func__, clk_id);
  189. goto done;
  190. }
  191. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  192. ret = clk_prepare_enable(
  193. priv->clk[clk_id + NPL_CLK_OFFSET]);
  194. if (ret < 0) {
  195. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  196. __func__, clk_id + NPL_CLK_OFFSET);
  197. goto err;
  198. }
  199. }
  200. }
  201. priv->clk_cnt[clk_id]++;
  202. } else {
  203. if (priv->clk_cnt[clk_id] <= 0) {
  204. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  205. __func__, clk_id);
  206. priv->clk_cnt[clk_id] = 0;
  207. goto done;
  208. }
  209. priv->clk_cnt[clk_id]--;
  210. if (priv->clk_cnt[clk_id] == 0) {
  211. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  212. clk_disable_unprepare(
  213. priv->clk[clk_id + NPL_CLK_OFFSET]);
  214. clk_disable_unprepare(priv->clk[clk_id]);
  215. }
  216. }
  217. return ret;
  218. err:
  219. clk_disable_unprepare(priv->clk[clk_id]);
  220. done:
  221. return ret;
  222. }
  223. static int lpass_cdc_clk_rsc_mux1_clk_request(struct lpass_cdc_clk_rsc *priv,
  224. int clk_id,
  225. bool enable)
  226. {
  227. char __iomem *clk_muxsel = NULL;
  228. int ret = 0;
  229. int default_clk_id = priv->default_clk_id[clk_id];
  230. u32 muxsel = 0;
  231. clk_muxsel = lpass_cdc_clk_rsc_get_clk_muxsel(priv, clk_id);
  232. if (!clk_muxsel) {
  233. ret = -EINVAL;
  234. goto done;
  235. }
  236. if (enable) {
  237. if (priv->clk_cnt[clk_id] == 0) {
  238. if (clk_id != VA_CORE_CLK) {
  239. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  240. default_clk_id,
  241. true);
  242. if (ret < 0)
  243. goto done;
  244. }
  245. ret = clk_prepare_enable(priv->clk[clk_id]);
  246. if (ret < 0) {
  247. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  248. __func__, clk_id);
  249. goto err_clk;
  250. }
  251. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  252. ret = clk_prepare_enable(
  253. priv->clk[clk_id + NPL_CLK_OFFSET]);
  254. if (ret < 0) {
  255. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  256. __func__, clk_id + NPL_CLK_OFFSET);
  257. goto err_npl_clk;
  258. }
  259. }
  260. /*
  261. * Temp SW workaround to address a glitch issue of
  262. * VA GFMux instance responsible for switching from
  263. * TX MCLK to VA MCLK. This configuration would be taken
  264. * care in DSP itself
  265. */
  266. if (clk_id != VA_CORE_CLK) {
  267. if (priv->dev_up_gfmux) {
  268. iowrite32(0x1, clk_muxsel);
  269. muxsel = ioread32(clk_muxsel);
  270. }
  271. lpass_cdc_clk_rsc_mux0_clk_request(priv, default_clk_id,
  272. false);
  273. }
  274. }
  275. priv->clk_cnt[clk_id]++;
  276. } else {
  277. if (priv->clk_cnt[clk_id] <= 0) {
  278. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  279. __func__, clk_id);
  280. priv->clk_cnt[clk_id] = 0;
  281. goto done;
  282. }
  283. priv->clk_cnt[clk_id]--;
  284. if (priv->clk_cnt[clk_id] == 0) {
  285. /*
  286. * Temp SW workaround to address a glitch issue
  287. * of VA GFMux instance responsible for
  288. * switching from TX MCLK to VA MCLK.
  289. * This configuration would be taken
  290. * care in DSP itself.
  291. */
  292. if (clk_id != VA_CORE_CLK) {
  293. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  294. default_clk_id, true);
  295. if (!ret && priv->dev_up_gfmux) {
  296. iowrite32(0x0, clk_muxsel);
  297. muxsel = ioread32(clk_muxsel);
  298. }
  299. }
  300. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  301. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  302. clk_disable_unprepare(priv->clk[clk_id]);
  303. if (clk_id != VA_CORE_CLK && !ret)
  304. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  305. default_clk_id, false);
  306. }
  307. }
  308. return ret;
  309. err_npl_clk:
  310. clk_disable_unprepare(priv->clk[clk_id]);
  311. err_clk:
  312. if (clk_id != VA_CORE_CLK)
  313. lpass_cdc_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  314. done:
  315. return ret;
  316. }
  317. static int lpass_cdc_clk_rsc_check_and_update_va_clk(struct lpass_cdc_clk_rsc *priv,
  318. bool mux_switch,
  319. int clk_id,
  320. bool enable)
  321. {
  322. int ret = 0;
  323. if (enable) {
  324. if (clk_id == VA_CORE_CLK && mux_switch) {
  325. /*
  326. * Handle the following usecase scenarios during enable
  327. * 1. VA only, Active clk is VA_CORE_CLK
  328. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  329. */
  330. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  331. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv,
  332. VA_CORE_CLK, enable);
  333. if (ret < 0)
  334. goto err;
  335. } else {
  336. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  337. TX_CORE_CLK, enable);
  338. if (ret < 0)
  339. goto err;
  340. priv->va_tx_clk_cnt++;
  341. }
  342. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  343. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  344. /*
  345. * Handle following concurrency scenario during enable
  346. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  347. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  348. */
  349. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  350. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv,
  351. TX_CORE_CLK, true);
  352. if (ret < 0)
  353. goto err;
  354. lpass_cdc_clk_rsc_mux1_clk_request(priv,
  355. VA_CORE_CLK, false);
  356. priv->va_tx_clk_cnt++;
  357. }
  358. }
  359. } else {
  360. if (clk_id == VA_CORE_CLK && mux_switch) {
  361. /*
  362. * Handle the following usecase scenarios during disable
  363. * 1. VA only, disable VA_CORE_CLK
  364. * 2. Record + VA -> Record, decrement TX CLK count
  365. */
  366. if (priv->clk_cnt[VA_CORE_CLK]) {
  367. lpass_cdc_clk_rsc_mux1_clk_request(priv,
  368. VA_CORE_CLK, enable);
  369. } else if (priv->va_tx_clk_cnt) {
  370. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  371. TX_CORE_CLK, enable);
  372. priv->va_tx_clk_cnt--;
  373. }
  374. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  375. /*
  376. * Handle the following usecase scenarios during disable
  377. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  378. */
  379. while (priv->va_tx_clk_cnt) {
  380. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv,
  381. VA_CORE_CLK, true);
  382. if (ret < 0)
  383. goto err;
  384. lpass_cdc_clk_rsc_mux0_clk_request(priv,
  385. TX_CORE_CLK, false);
  386. priv->va_tx_clk_cnt--;
  387. }
  388. }
  389. }
  390. err:
  391. return ret;
  392. }
  393. /**
  394. * lpass_cdc_clk_rsc_fs_gen_request - request to enable/disable fs generation
  395. * sequence
  396. *
  397. * @dev: Macro device pointer
  398. * @enable: enable or disable flag
  399. */
  400. void lpass_cdc_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  401. {
  402. int i;
  403. struct regmap *regmap;
  404. struct device *clk_dev = NULL;
  405. struct lpass_cdc_clk_rsc *priv = NULL;
  406. if (!dev) {
  407. pr_err("%s: dev is null\n", __func__);
  408. return;
  409. }
  410. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  411. if (!clk_dev) {
  412. pr_err("%s: Invalid rsc clk device\n", __func__);
  413. return;
  414. }
  415. priv = dev_get_drvdata(clk_dev);
  416. if (!priv) {
  417. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  418. return;
  419. }
  420. regmap = dev_get_regmap(priv->dev->parent, NULL);
  421. if (!regmap) {
  422. pr_err("%s: regmap is null\n", __func__);
  423. return;
  424. }
  425. mutex_lock(&priv->fs_gen_lock);
  426. if (enable) {
  427. if (priv->reg_seq_en_cnt++ == 0) {
  428. for (i = 0; i < (priv->num_fs_reg * 3); i += 3) {
  429. dev_dbg(priv->dev, "%s: Register: %d, mask: %d, value: %d\n",
  430. __func__, priv->fs_gen_seq[i],
  431. priv->fs_gen_seq[i + 1],
  432. priv->fs_gen_seq[i + 2]);
  433. regmap_update_bits(regmap,
  434. priv->fs_gen_seq[i],
  435. priv->fs_gen_seq[i + 1],
  436. priv->fs_gen_seq[i + 2]);
  437. }
  438. }
  439. } else {
  440. if (priv->reg_seq_en_cnt <= 0) {
  441. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  442. __func__, priv->reg_seq_en_cnt);
  443. priv->reg_seq_en_cnt = 0;
  444. mutex_unlock(&priv->fs_gen_lock);
  445. return;
  446. }
  447. if (--priv->reg_seq_en_cnt == 0) {
  448. for (i = ((priv->num_fs_reg - 1) * 3); i >= 0; i -= 3) {
  449. dev_dbg(priv->dev, "%s: Register: %d, mask: %d\n",
  450. __func__, priv->fs_gen_seq[i],
  451. priv->fs_gen_seq[i + 1]);
  452. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  453. priv->fs_gen_seq[i + 1], 0x0);
  454. }
  455. }
  456. }
  457. mutex_unlock(&priv->fs_gen_lock);
  458. }
  459. EXPORT_SYMBOL(lpass_cdc_clk_rsc_fs_gen_request);
  460. /**
  461. * lpass_cdc_clk_rsc_request_clock - request for clock to
  462. * enable/disable
  463. *
  464. * @dev: Macro device pointer.
  465. * @default_clk_id: mux0 Core clock ID input.
  466. * @clk_id_req: Core clock ID requested to enable/disable
  467. * @enable: enable or disable clock flag
  468. *
  469. * Returns 0 on success or -EINVAL on error.
  470. */
  471. int lpass_cdc_clk_rsc_request_clock(struct device *dev,
  472. int default_clk_id,
  473. int clk_id_req,
  474. bool enable)
  475. {
  476. int ret = 0;
  477. struct device *clk_dev = NULL;
  478. struct lpass_cdc_clk_rsc *priv = NULL;
  479. bool mux_switch = false;
  480. if (!dev) {
  481. pr_err("%s: dev is null\n", __func__);
  482. return -EINVAL;
  483. }
  484. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  485. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  486. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  487. __func__, clk_id_req, default_clk_id);
  488. return -EINVAL;
  489. }
  490. clk_dev = lpass_cdc_get_rsc_clk_device_ptr(dev->parent);
  491. if (!clk_dev) {
  492. pr_err("%s: Invalid rsc clk device\n", __func__);
  493. return -EINVAL;
  494. }
  495. priv = dev_get_drvdata(clk_dev);
  496. if (!priv) {
  497. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  498. return -EINVAL;
  499. }
  500. mutex_lock(&priv->rsc_clk_lock);
  501. if (!priv->dev_up && enable) {
  502. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  503. __func__);
  504. ret = -EINVAL;
  505. goto err;
  506. }
  507. priv->default_clk_id[clk_id_req] = default_clk_id;
  508. if (default_clk_id != clk_id_req)
  509. mux_switch = true;
  510. if (mux_switch) {
  511. if (clk_id_req != VA_CORE_CLK) {
  512. ret = lpass_cdc_clk_rsc_mux1_clk_request(priv, clk_id_req,
  513. enable);
  514. if (ret < 0)
  515. goto err;
  516. }
  517. } else {
  518. ret = lpass_cdc_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  519. if (ret < 0)
  520. goto err;
  521. }
  522. ret = lpass_cdc_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  523. clk_id_req,
  524. enable);
  525. if (ret < 0)
  526. goto err;
  527. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  528. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  529. enable);
  530. mutex_unlock(&priv->rsc_clk_lock);
  531. return 0;
  532. err:
  533. mutex_unlock(&priv->rsc_clk_lock);
  534. return ret;
  535. }
  536. EXPORT_SYMBOL(lpass_cdc_clk_rsc_request_clock);
  537. static int lpass_cdc_clk_rsc_probe(struct platform_device *pdev)
  538. {
  539. int ret = 0, fs_gen_size, i, j;
  540. const char **clk_name_array;
  541. int clk_cnt;
  542. struct clk *clk;
  543. struct lpass_cdc_clk_rsc *priv = NULL;
  544. u32 muxsel = 0;
  545. priv = devm_kzalloc(&pdev->dev, sizeof(struct lpass_cdc_clk_rsc),
  546. GFP_KERNEL);
  547. if (!priv)
  548. return -ENOMEM;
  549. /* Get clk fs gen sequence from device tree */
  550. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  551. &fs_gen_size)) {
  552. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  553. __func__);
  554. ret = -EINVAL;
  555. goto err;
  556. }
  557. priv->num_fs_reg = fs_gen_size/(3 * sizeof(u32));
  558. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  559. if (!priv->fs_gen_seq) {
  560. ret = -ENOMEM;
  561. goto err;
  562. }
  563. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  564. /* Parse fs-gen-sequence */
  565. ret = of_property_read_u32_array(pdev->dev.of_node,
  566. "qcom,fs-gen-sequence",
  567. priv->fs_gen_seq,
  568. priv->num_fs_reg * 3);
  569. if (ret < 0) {
  570. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  571. __func__, ret);
  572. goto err;
  573. }
  574. /* Get clk details from device tree */
  575. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  576. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  577. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  578. __func__, clk_cnt);
  579. ret = -EINVAL;
  580. goto err;
  581. }
  582. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  583. GFP_KERNEL);
  584. if (!clk_name_array) {
  585. ret = -ENOMEM;
  586. goto err;
  587. }
  588. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  589. clk_name_array, clk_cnt);
  590. for (i = 0; i < MAX_CLK; i++) {
  591. priv->clk[i] = NULL;
  592. for (j = 0; j < clk_cnt; j++) {
  593. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  594. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  595. if (IS_ERR(clk)) {
  596. ret = PTR_ERR(clk);
  597. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  598. __func__, clk_src_name[i], ret);
  599. goto err;
  600. }
  601. priv->clk[i] = clk;
  602. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  603. __func__, clk_src_name[i]);
  604. break;
  605. }
  606. }
  607. }
  608. ret = of_property_read_u32(pdev->dev.of_node,
  609. "qcom,rx_mclk_mode_muxsel", &muxsel);
  610. if (ret) {
  611. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  612. __func__);
  613. } else {
  614. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  615. if (!priv->rx_clk_muxsel) {
  616. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  617. __func__);
  618. return -ENOMEM;
  619. }
  620. }
  621. ret = of_property_read_u32(pdev->dev.of_node,
  622. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  623. if (ret) {
  624. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  625. __func__);
  626. } else {
  627. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  628. if (!priv->wsa_clk_muxsel) {
  629. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  630. __func__);
  631. return -ENOMEM;
  632. }
  633. }
  634. ret = of_property_read_u32(pdev->dev.of_node,
  635. "qcom,va_mclk_mode_muxsel", &muxsel);
  636. if (ret) {
  637. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  638. __func__);
  639. } else {
  640. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  641. if (!priv->va_clk_muxsel) {
  642. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  643. __func__);
  644. return -ENOMEM;
  645. }
  646. }
  647. ret = lpass_cdc_register_res_clk(&pdev->dev, lpass_cdc_clk_rsc_cb);
  648. if (ret < 0) {
  649. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  650. __func__, ret);
  651. goto err;
  652. }
  653. priv->dev = &pdev->dev;
  654. priv->dev_up = true;
  655. priv->dev_up_gfmux = true;
  656. mutex_init(&priv->rsc_clk_lock);
  657. mutex_init(&priv->fs_gen_lock);
  658. dev_set_drvdata(&pdev->dev, priv);
  659. err:
  660. return ret;
  661. }
  662. static int lpass_cdc_clk_rsc_remove(struct platform_device *pdev)
  663. {
  664. struct lpass_cdc_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  665. lpass_cdc_unregister_res_clk(&pdev->dev);
  666. of_platform_depopulate(&pdev->dev);
  667. if (!priv)
  668. return -EINVAL;
  669. mutex_destroy(&priv->rsc_clk_lock);
  670. mutex_destroy(&priv->fs_gen_lock);
  671. return 0;
  672. }
  673. static const struct of_device_id lpass_cdc_clk_rsc_dt_match[] = {
  674. {.compatible = "qcom,lpass-cdc-clk-rsc-mngr"},
  675. {}
  676. };
  677. MODULE_DEVICE_TABLE(of, lpass_cdc_clk_rsc_dt_match);
  678. static struct platform_driver lpass_cdc_clk_rsc_mgr = {
  679. .driver = {
  680. .name = "lpass-cdc-clk-rsc-mngr",
  681. .owner = THIS_MODULE,
  682. .of_match_table = lpass_cdc_clk_rsc_dt_match,
  683. .suppress_bind_attrs = true,
  684. },
  685. .probe = lpass_cdc_clk_rsc_probe,
  686. .remove = lpass_cdc_clk_rsc_remove,
  687. };
  688. int lpass_cdc_clk_rsc_mgr_init(void)
  689. {
  690. return platform_driver_register(&lpass_cdc_clk_rsc_mgr);
  691. }
  692. void lpass_cdc_clk_rsc_mgr_exit(void)
  693. {
  694. platform_driver_unregister(&lpass_cdc_clk_rsc_mgr);
  695. }
  696. MODULE_DESCRIPTION("LPASS codec clock resource manager driver");
  697. MODULE_LICENSE("GPL v2");