bolero-clk-rsc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/module.h>
  7. #include <linux/io.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/kernel.h>
  11. #include <linux/clk.h>
  12. #include <linux/clk-provider.h>
  13. #include <linux/ratelimit.h>
  14. #include "bolero-cdc.h"
  15. #include "bolero-clk-rsc.h"
  16. #define DRV_NAME "bolero-clk-rsc"
  17. #define BOLERO_CLK_NAME_LENGTH 30
  18. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  19. static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
  20. "tx_core_clk",
  21. "rx_core_clk",
  22. "wsa_core_clk",
  23. "va_core_clk",
  24. "tx_npl_clk",
  25. "rx_npl_clk",
  26. "wsa_npl_clk",
  27. "va_npl_clk",
  28. };
  29. struct bolero_clk_rsc {
  30. struct device *dev;
  31. struct mutex rsc_clk_lock;
  32. struct mutex fs_gen_lock;
  33. struct clk *clk[MAX_CLK];
  34. int clk_cnt[MAX_CLK];
  35. int reg_seq_en_cnt;
  36. int va_tx_clk_cnt;
  37. bool dev_up;
  38. bool dev_up_gfmux;
  39. u32 num_fs_reg;
  40. u32 *fs_gen_seq;
  41. int default_clk_id[MAX_CLK];
  42. struct regmap *regmap;
  43. char __iomem *rx_clk_muxsel;
  44. char __iomem *wsa_clk_muxsel;
  45. char __iomem *va_clk_muxsel;
  46. };
  47. static int bolero_clk_rsc_cb(struct device *dev, u16 event)
  48. {
  49. struct bolero_clk_rsc *priv;
  50. if (!dev) {
  51. pr_err("%s: Invalid device pointer\n",
  52. __func__);
  53. return -EINVAL;
  54. }
  55. priv = dev_get_drvdata(dev);
  56. if (!priv) {
  57. pr_err("%s: Invalid clk rsc priviate data\n",
  58. __func__);
  59. return -EINVAL;
  60. }
  61. mutex_lock(&priv->rsc_clk_lock);
  62. if (event == BOLERO_MACRO_EVT_SSR_UP) {
  63. priv->dev_up = true;
  64. } else if (event == BOLERO_MACRO_EVT_SSR_DOWN) {
  65. priv->dev_up = false;
  66. priv->dev_up_gfmux = false;
  67. } else if (event == BOLERO_MACRO_EVT_SSR_GFMUX_UP) {
  68. priv->dev_up_gfmux = true;
  69. }
  70. mutex_unlock(&priv->rsc_clk_lock);
  71. return 0;
  72. }
  73. static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
  74. int clk_id)
  75. {
  76. switch (clk_id) {
  77. case RX_CORE_CLK:
  78. return priv->rx_clk_muxsel;
  79. case WSA_CORE_CLK:
  80. return priv->wsa_clk_muxsel;
  81. case VA_CORE_CLK:
  82. return priv->va_clk_muxsel;
  83. case TX_CORE_CLK:
  84. default:
  85. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  86. break;
  87. }
  88. return NULL;
  89. }
  90. int bolero_rsc_clk_reset(struct device *dev, int clk_id)
  91. {
  92. struct device *clk_dev = NULL;
  93. struct bolero_clk_rsc *priv = NULL;
  94. int count = 0;
  95. if (!dev) {
  96. pr_err("%s: dev is null %d\n", __func__);
  97. return -EINVAL;
  98. }
  99. if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
  100. pr_err("%s: Invalid clk_id: %d\n",
  101. __func__, clk_id);
  102. return -EINVAL;
  103. }
  104. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  105. if (!clk_dev) {
  106. pr_err("%s: Invalid rsc clk device\n", __func__);
  107. return -EINVAL;
  108. }
  109. priv = dev_get_drvdata(clk_dev);
  110. if (!priv) {
  111. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  112. return -EINVAL;
  113. }
  114. mutex_lock(&priv->rsc_clk_lock);
  115. while (__clk_is_enabled(priv->clk[clk_id])) {
  116. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  117. clk_disable_unprepare(priv->clk[clk_id]);
  118. count++;
  119. }
  120. dev_dbg(priv->dev,
  121. "%s: clock reset after ssr, count %d\n", __func__, count);
  122. trace_printk("%s: clock reset after ssr, count %d\n", __func__, count);
  123. while (count--) {
  124. clk_prepare_enable(priv->clk[clk_id]);
  125. clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
  126. }
  127. mutex_unlock(&priv->rsc_clk_lock);
  128. return 0;
  129. }
  130. EXPORT_SYMBOL(bolero_rsc_clk_reset);
  131. void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  132. {
  133. struct device *clk_dev = NULL;
  134. struct bolero_clk_rsc *priv = NULL;
  135. int i = 0;
  136. if (!dev) {
  137. pr_err("%s: dev is null %d\n", __func__);
  138. return;
  139. }
  140. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  141. if (!clk_dev) {
  142. pr_err("%s: Invalid rsc clk device\n", __func__);
  143. return;
  144. }
  145. priv = dev_get_drvdata(clk_dev);
  146. if (!priv) {
  147. pr_err("%s: Invalid rsc clk private data\n", __func__);
  148. return;
  149. }
  150. mutex_lock(&priv->rsc_clk_lock);
  151. for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
  152. if (enable) {
  153. if (priv->clk[i])
  154. clk_prepare_enable(priv->clk[i]);
  155. if (priv->clk[i + NPL_CLK_OFFSET])
  156. clk_prepare_enable(
  157. priv->clk[i + NPL_CLK_OFFSET]);
  158. } else {
  159. if (priv->clk[i + NPL_CLK_OFFSET])
  160. clk_disable_unprepare(
  161. priv->clk[i + NPL_CLK_OFFSET]);
  162. if (priv->clk[i])
  163. clk_disable_unprepare(priv->clk[i]);
  164. }
  165. }
  166. mutex_unlock(&priv->rsc_clk_lock);
  167. return;
  168. }
  169. EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
  170. static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
  171. int clk_id,
  172. bool enable)
  173. {
  174. int ret = 0;
  175. static DEFINE_RATELIMIT_STATE(rtl, 1 * HZ, 1);
  176. if (enable) {
  177. /* Enable Requested Core clk */
  178. if (priv->clk_cnt[clk_id] == 0) {
  179. ret = clk_prepare_enable(priv->clk[clk_id]);
  180. if (ret < 0) {
  181. if (__ratelimit(&rtl))
  182. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  183. __func__, clk_id);
  184. goto done;
  185. }
  186. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  187. ret = clk_prepare_enable(
  188. priv->clk[clk_id + NPL_CLK_OFFSET]);
  189. if (ret < 0) {
  190. if (__ratelimit(&rtl))
  191. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  192. __func__,
  193. clk_id + NPL_CLK_OFFSET);
  194. goto err;
  195. }
  196. }
  197. }
  198. priv->clk_cnt[clk_id]++;
  199. } else {
  200. if (priv->clk_cnt[clk_id] <= 0) {
  201. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  202. __func__, clk_id);
  203. priv->clk_cnt[clk_id] = 0;
  204. goto done;
  205. }
  206. priv->clk_cnt[clk_id]--;
  207. if (priv->clk_cnt[clk_id] == 0) {
  208. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  209. clk_disable_unprepare(
  210. priv->clk[clk_id + NPL_CLK_OFFSET]);
  211. clk_disable_unprepare(priv->clk[clk_id]);
  212. }
  213. }
  214. return ret;
  215. err:
  216. clk_disable_unprepare(priv->clk[clk_id]);
  217. done:
  218. return ret;
  219. }
  220. static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
  221. int clk_id,
  222. bool enable)
  223. {
  224. char __iomem *clk_muxsel = NULL;
  225. int ret = 0;
  226. int default_clk_id = priv->default_clk_id[clk_id];
  227. u32 muxsel = 0;
  228. static DEFINE_RATELIMIT_STATE(rtl, 1 * HZ, 1);
  229. clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
  230. if (!clk_muxsel) {
  231. ret = -EINVAL;
  232. goto done;
  233. }
  234. if (enable) {
  235. if (priv->clk_cnt[clk_id] == 0) {
  236. if (clk_id != VA_CORE_CLK) {
  237. ret = bolero_clk_rsc_mux0_clk_request(priv,
  238. default_clk_id,
  239. true);
  240. if (ret < 0)
  241. goto done;
  242. }
  243. ret = clk_prepare_enable(priv->clk[clk_id]);
  244. if (ret < 0) {
  245. if (__ratelimit(&rtl))
  246. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  247. __func__, clk_id);
  248. goto err_clk;
  249. }
  250. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  251. ret = clk_prepare_enable(
  252. priv->clk[clk_id + NPL_CLK_OFFSET]);
  253. if (ret < 0) {
  254. if (__ratelimit(&rtl))
  255. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  256. __func__,
  257. clk_id + NPL_CLK_OFFSET);
  258. goto err_npl_clk;
  259. }
  260. }
  261. /*
  262. * Temp SW workaround to address a glitch issue of
  263. * VA GFMux instance responsible for switching from
  264. * TX MCLK to VA MCLK. This configuration would be taken
  265. * care in DSP itself
  266. */
  267. if (clk_id != VA_CORE_CLK) {
  268. if (priv->dev_up_gfmux) {
  269. iowrite32(0x1, clk_muxsel);
  270. muxsel = ioread32(clk_muxsel);
  271. trace_printk("%s: muxsel value after enable: %d\n",
  272. __func__, muxsel);
  273. }
  274. bolero_clk_rsc_mux0_clk_request(priv,
  275. default_clk_id,
  276. false);
  277. }
  278. }
  279. priv->clk_cnt[clk_id]++;
  280. } else {
  281. if (priv->clk_cnt[clk_id] <= 0) {
  282. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  283. __func__, clk_id);
  284. priv->clk_cnt[clk_id] = 0;
  285. goto done;
  286. }
  287. priv->clk_cnt[clk_id]--;
  288. if (priv->clk_cnt[clk_id] == 0) {
  289. if (clk_id != VA_CORE_CLK) {
  290. ret = bolero_clk_rsc_mux0_clk_request(priv,
  291. default_clk_id, true);
  292. if (!ret) {
  293. /*
  294. * Temp SW workaround to address a glitch issue
  295. * of VA GFMux instance responsible for
  296. * switching from TX MCLK to VA MCLK.
  297. * This configuration would be taken
  298. * care in DSP itself.
  299. */
  300. if (priv->dev_up_gfmux) {
  301. iowrite32(0x0, clk_muxsel);
  302. muxsel = ioread32(clk_muxsel);
  303. trace_printk("%s: muxsel value after disable: %d\n",
  304. __func__, muxsel);
  305. }
  306. }
  307. }
  308. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  309. clk_disable_unprepare(
  310. priv->clk[clk_id + NPL_CLK_OFFSET]);
  311. clk_disable_unprepare(priv->clk[clk_id]);
  312. if (clk_id != VA_CORE_CLK) {
  313. if (!ret)
  314. bolero_clk_rsc_mux0_clk_request(priv,
  315. default_clk_id, false);
  316. }
  317. }
  318. }
  319. return ret;
  320. err_npl_clk:
  321. clk_disable_unprepare(priv->clk[clk_id]);
  322. err_clk:
  323. if (clk_id != VA_CORE_CLK)
  324. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  325. done:
  326. return ret;
  327. }
  328. static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
  329. bool mux_switch,
  330. int clk_id,
  331. bool enable)
  332. {
  333. int ret = 0;
  334. if (enable) {
  335. if (clk_id == VA_CORE_CLK && mux_switch) {
  336. /*
  337. * Handle the following usecase scenarios during enable
  338. * 1. VA only, Active clk is VA_CORE_CLK
  339. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  340. */
  341. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  342. ret = bolero_clk_rsc_mux1_clk_request(priv,
  343. VA_CORE_CLK, enable);
  344. if (ret < 0)
  345. goto err;
  346. } else {
  347. ret = bolero_clk_rsc_mux0_clk_request(priv,
  348. TX_CORE_CLK, enable);
  349. if (ret < 0)
  350. goto err;
  351. priv->va_tx_clk_cnt++;
  352. }
  353. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  354. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  355. /*
  356. * Handle following concurrency scenario during enable
  357. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  358. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  359. */
  360. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  361. ret = bolero_clk_rsc_mux0_clk_request(priv,
  362. TX_CORE_CLK, true);
  363. if (ret < 0)
  364. goto err;
  365. bolero_clk_rsc_mux1_clk_request(priv,
  366. VA_CORE_CLK, false);
  367. priv->va_tx_clk_cnt++;
  368. }
  369. }
  370. } else {
  371. if (clk_id == VA_CORE_CLK && mux_switch) {
  372. /*
  373. * Handle the following usecase scenarios during disable
  374. * 1. VA only, disable VA_CORE_CLK
  375. * 2. Record + VA -> Record, decrement TX CLK count
  376. */
  377. if (priv->clk_cnt[VA_CORE_CLK]) {
  378. bolero_clk_rsc_mux1_clk_request(priv,
  379. VA_CORE_CLK, enable);
  380. } else if (priv->va_tx_clk_cnt) {
  381. bolero_clk_rsc_mux0_clk_request(priv,
  382. TX_CORE_CLK, enable);
  383. priv->va_tx_clk_cnt--;
  384. }
  385. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  386. /*
  387. * Handle the following usecase scenarios during disable
  388. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  389. */
  390. while (priv->va_tx_clk_cnt) {
  391. ret = bolero_clk_rsc_mux1_clk_request(priv,
  392. VA_CORE_CLK, true);
  393. if (ret < 0)
  394. goto err;
  395. bolero_clk_rsc_mux0_clk_request(priv,
  396. TX_CORE_CLK, false);
  397. priv->va_tx_clk_cnt--;
  398. }
  399. }
  400. }
  401. err:
  402. return ret;
  403. }
  404. /**
  405. * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
  406. * sequence
  407. *
  408. * @dev: Macro device pointer
  409. * @enable: enable or disable flag
  410. */
  411. void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  412. {
  413. int i;
  414. struct regmap *regmap;
  415. struct device *clk_dev = NULL;
  416. struct bolero_clk_rsc *priv = NULL;
  417. if (!dev) {
  418. pr_err("%s: dev is null %d\n", __func__);
  419. return;
  420. }
  421. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  422. if (!clk_dev) {
  423. pr_err("%s: Invalid rsc clk device\n", __func__);
  424. return;
  425. }
  426. priv = dev_get_drvdata(clk_dev);
  427. if (!priv) {
  428. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  429. return;
  430. }
  431. regmap = dev_get_regmap(priv->dev->parent, NULL);
  432. if (!regmap) {
  433. pr_err("%s: regmap is null\n", __func__);
  434. return;
  435. }
  436. mutex_lock(&priv->fs_gen_lock);
  437. if (enable) {
  438. if (priv->reg_seq_en_cnt++ == 0) {
  439. for (i = 0; i < (priv->num_fs_reg * 3); i += 3) {
  440. dev_dbg(priv->dev, "%s: Register: %d, mask: %d, value %d\n",
  441. __func__, priv->fs_gen_seq[i],
  442. priv->fs_gen_seq[i + 1],
  443. priv->fs_gen_seq[i + 2]);
  444. regmap_update_bits(regmap,
  445. priv->fs_gen_seq[i],
  446. priv->fs_gen_seq[i + 1],
  447. priv->fs_gen_seq[i + 2]);
  448. }
  449. }
  450. } else {
  451. if (priv->reg_seq_en_cnt <= 0) {
  452. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  453. __func__, priv->reg_seq_en_cnt);
  454. priv->reg_seq_en_cnt = 0;
  455. mutex_unlock(&priv->fs_gen_lock);
  456. return;
  457. }
  458. if (--priv->reg_seq_en_cnt == 0) {
  459. for (i = ((priv->num_fs_reg - 1) * 3); i >= 0; i -= 3) {
  460. dev_dbg(priv->dev, "%s: Register: %d, mask: %d\n",
  461. __func__, priv->fs_gen_seq[i],
  462. priv->fs_gen_seq[i + 1]);
  463. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  464. priv->fs_gen_seq[i + 1], 0x0);
  465. }
  466. }
  467. }
  468. mutex_unlock(&priv->fs_gen_lock);
  469. }
  470. EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
  471. /**
  472. * bolero_clk_rsc_request_clock - request for clock to
  473. * enable/disable
  474. *
  475. * @dev: Macro device pointer.
  476. * @default_clk_id: mux0 Core clock ID input.
  477. * @clk_id_req: Core clock ID requested to enable/disable
  478. * @enable: enable or disable clock flag
  479. *
  480. * Returns 0 on success or -EINVAL on error.
  481. */
  482. int bolero_clk_rsc_request_clock(struct device *dev,
  483. int default_clk_id,
  484. int clk_id_req,
  485. bool enable)
  486. {
  487. int ret = 0;
  488. struct device *clk_dev = NULL;
  489. struct bolero_clk_rsc *priv = NULL;
  490. bool mux_switch = false;
  491. if (!dev) {
  492. pr_err("%s: dev is null %d\n", __func__);
  493. return -EINVAL;
  494. }
  495. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  496. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  497. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  498. __func__, clk_id_req, default_clk_id);
  499. return -EINVAL;
  500. }
  501. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  502. if (!clk_dev) {
  503. pr_err("%s: Invalid rsc clk device\n", __func__);
  504. return -EINVAL;
  505. }
  506. priv = dev_get_drvdata(clk_dev);
  507. if (!priv) {
  508. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  509. return -EINVAL;
  510. }
  511. mutex_lock(&priv->rsc_clk_lock);
  512. if (!priv->dev_up && enable) {
  513. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  514. __func__);
  515. trace_printk("%s: SSR is in progress..\n", __func__);
  516. ret = -EINVAL;
  517. goto err;
  518. }
  519. priv->default_clk_id[clk_id_req] = default_clk_id;
  520. if (default_clk_id != clk_id_req)
  521. mux_switch = true;
  522. if (mux_switch) {
  523. if (clk_id_req != VA_CORE_CLK) {
  524. ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
  525. enable);
  526. if (ret < 0)
  527. goto err;
  528. }
  529. } else {
  530. ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  531. if (ret < 0)
  532. goto err;
  533. }
  534. ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  535. clk_id_req,
  536. enable);
  537. if (ret < 0)
  538. goto err;
  539. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  540. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  541. enable);
  542. trace_printk("%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  543. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  544. enable);
  545. mutex_unlock(&priv->rsc_clk_lock);
  546. return 0;
  547. err:
  548. mutex_unlock(&priv->rsc_clk_lock);
  549. return ret;
  550. }
  551. EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
  552. static int bolero_clk_rsc_probe(struct platform_device *pdev)
  553. {
  554. int ret = 0, fs_gen_size, i, j;
  555. const char **clk_name_array;
  556. int clk_cnt;
  557. struct clk *clk;
  558. struct bolero_clk_rsc *priv = NULL;
  559. u32 muxsel = 0;
  560. priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
  561. GFP_KERNEL);
  562. if (!priv)
  563. return -ENOMEM;
  564. /* Get clk fs gen sequence from device tree */
  565. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  566. &fs_gen_size)) {
  567. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  568. __func__);
  569. ret = -EINVAL;
  570. goto err;
  571. }
  572. priv->num_fs_reg = fs_gen_size/(3 * sizeof(u32));
  573. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  574. if (!priv->fs_gen_seq) {
  575. ret = -ENOMEM;
  576. goto err;
  577. }
  578. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  579. /* Parse fs-gen-sequence */
  580. ret = of_property_read_u32_array(pdev->dev.of_node,
  581. "qcom,fs-gen-sequence",
  582. priv->fs_gen_seq,
  583. priv->num_fs_reg * 3);
  584. if (ret < 0) {
  585. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  586. __func__, ret);
  587. goto err;
  588. }
  589. /* Get clk details from device tree */
  590. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  591. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  592. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  593. __func__, clk_cnt);
  594. ret = -EINVAL;
  595. goto err;
  596. }
  597. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  598. GFP_KERNEL);
  599. if (!clk_name_array) {
  600. ret = -ENOMEM;
  601. goto err;
  602. }
  603. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  604. clk_name_array, clk_cnt);
  605. for (i = 0; i < MAX_CLK; i++) {
  606. priv->clk[i] = NULL;
  607. for (j = 0; j < clk_cnt; j++) {
  608. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  609. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  610. if (IS_ERR(clk)) {
  611. ret = PTR_ERR(clk);
  612. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  613. __func__, clk_src_name[i], ret);
  614. goto err;
  615. }
  616. priv->clk[i] = clk;
  617. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  618. __func__, clk_src_name[i]);
  619. }
  620. }
  621. }
  622. ret = of_property_read_u32(pdev->dev.of_node,
  623. "qcom,rx_mclk_mode_muxsel", &muxsel);
  624. if (ret) {
  625. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  626. __func__);
  627. } else {
  628. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  629. if (!priv->rx_clk_muxsel) {
  630. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  631. __func__);
  632. return -ENOMEM;
  633. }
  634. }
  635. ret = of_property_read_u32(pdev->dev.of_node,
  636. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  637. if (ret) {
  638. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  639. __func__);
  640. } else {
  641. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  642. if (!priv->wsa_clk_muxsel) {
  643. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  644. __func__);
  645. return -ENOMEM;
  646. }
  647. }
  648. ret = of_property_read_u32(pdev->dev.of_node,
  649. "qcom,va_mclk_mode_muxsel", &muxsel);
  650. if (ret) {
  651. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  652. __func__);
  653. } else {
  654. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  655. if (!priv->va_clk_muxsel) {
  656. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  657. __func__);
  658. return -ENOMEM;
  659. }
  660. }
  661. ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
  662. if (ret < 0) {
  663. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  664. __func__, ret);
  665. goto err;
  666. }
  667. priv->dev = &pdev->dev;
  668. priv->dev_up = true;
  669. priv->dev_up_gfmux = true;
  670. mutex_init(&priv->rsc_clk_lock);
  671. mutex_init(&priv->fs_gen_lock);
  672. dev_set_drvdata(&pdev->dev, priv);
  673. err:
  674. return ret;
  675. }
  676. static int bolero_clk_rsc_remove(struct platform_device *pdev)
  677. {
  678. struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  679. bolero_unregister_res_clk(&pdev->dev);
  680. of_platform_depopulate(&pdev->dev);
  681. if (!priv)
  682. return -EINVAL;
  683. mutex_destroy(&priv->rsc_clk_lock);
  684. mutex_destroy(&priv->fs_gen_lock);
  685. return 0;
  686. }
  687. static const struct of_device_id bolero_clk_rsc_dt_match[] = {
  688. {.compatible = "qcom,bolero-clk-rsc-mngr"},
  689. {}
  690. };
  691. MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
  692. static struct platform_driver bolero_clk_rsc_mgr = {
  693. .driver = {
  694. .name = "bolero-clk-rsc-mngr",
  695. .owner = THIS_MODULE,
  696. .of_match_table = bolero_clk_rsc_dt_match,
  697. .suppress_bind_attrs = true,
  698. },
  699. .probe = bolero_clk_rsc_probe,
  700. .remove = bolero_clk_rsc_remove,
  701. };
  702. int bolero_clk_rsc_mgr_init(void)
  703. {
  704. return platform_driver_register(&bolero_clk_rsc_mgr);
  705. }
  706. void bolero_clk_rsc_mgr_exit(void)
  707. {
  708. platform_driver_unregister(&bolero_clk_rsc_mgr);
  709. }
  710. MODULE_DESCRIPTION("Bolero clock resource manager driver");
  711. MODULE_LICENSE("GPL v2");