bolero-clk-rsc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of_platform.h>
  7. #include <linux/module.h>
  8. #include <linux/io.h>
  9. #include <linux/init.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/kernel.h>
  12. #include <linux/clk.h>
  13. #include <linux/clk-provider.h>
  14. #include <linux/ratelimit.h>
  15. #include "bolero-cdc.h"
  16. #include "bolero-clk-rsc.h"
  17. #define DRV_NAME "bolero-clk-rsc"
  18. #define BOLERO_CLK_NAME_LENGTH 30
  19. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  20. static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
  21. "tx_core_clk",
  22. "rx_core_clk",
  23. "wsa_core_clk",
  24. "va_core_clk",
  25. "tx_npl_clk",
  26. "rx_npl_clk",
  27. "wsa_npl_clk",
  28. "va_npl_clk",
  29. };
  30. struct bolero_clk_rsc {
  31. struct device *dev;
  32. struct mutex rsc_clk_lock;
  33. struct mutex fs_gen_lock;
  34. struct clk *clk[MAX_CLK];
  35. int clk_cnt[MAX_CLK];
  36. int reg_seq_en_cnt;
  37. int va_tx_clk_cnt;
  38. bool dev_up;
  39. bool dev_up_gfmux;
  40. u32 num_fs_reg;
  41. u32 *fs_gen_seq;
  42. int default_clk_id[MAX_CLK];
  43. struct regmap *regmap;
  44. char __iomem *rx_clk_muxsel;
  45. char __iomem *wsa_clk_muxsel;
  46. char __iomem *va_clk_muxsel;
  47. };
  48. static int bolero_clk_rsc_cb(struct device *dev, u16 event)
  49. {
  50. struct bolero_clk_rsc *priv;
  51. if (!dev) {
  52. pr_err("%s: Invalid device pointer\n",
  53. __func__);
  54. return -EINVAL;
  55. }
  56. priv = dev_get_drvdata(dev);
  57. if (!priv) {
  58. pr_err("%s: Invalid clk rsc priviate data\n",
  59. __func__);
  60. return -EINVAL;
  61. }
  62. mutex_lock(&priv->rsc_clk_lock);
  63. if (event == BOLERO_MACRO_EVT_SSR_UP) {
  64. priv->dev_up = true;
  65. } else if (event == BOLERO_MACRO_EVT_SSR_DOWN) {
  66. priv->dev_up = false;
  67. priv->dev_up_gfmux = false;
  68. } else if (event == BOLERO_MACRO_EVT_SSR_GFMUX_UP) {
  69. priv->dev_up_gfmux = true;
  70. }
  71. mutex_unlock(&priv->rsc_clk_lock);
  72. return 0;
  73. }
  74. static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
  75. int clk_id)
  76. {
  77. switch (clk_id) {
  78. case RX_CORE_CLK:
  79. return priv->rx_clk_muxsel;
  80. case WSA_CORE_CLK:
  81. return priv->wsa_clk_muxsel;
  82. case VA_CORE_CLK:
  83. return priv->va_clk_muxsel;
  84. case TX_CORE_CLK:
  85. default:
  86. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  87. break;
  88. }
  89. return NULL;
  90. }
  91. int bolero_rsc_clk_reset(struct device *dev, int clk_id)
  92. {
  93. struct device *clk_dev = NULL;
  94. struct bolero_clk_rsc *priv = NULL;
  95. int count = 0;
  96. if (!dev) {
  97. pr_err("%s: dev is null\n", __func__);
  98. return -EINVAL;
  99. }
  100. if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
  101. pr_err("%s: Invalid clk_id: %d\n",
  102. __func__, clk_id);
  103. return -EINVAL;
  104. }
  105. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  106. if (!clk_dev) {
  107. pr_err("%s: Invalid rsc clk device\n", __func__);
  108. return -EINVAL;
  109. }
  110. priv = dev_get_drvdata(clk_dev);
  111. if (!priv) {
  112. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  113. return -EINVAL;
  114. }
  115. mutex_lock(&priv->rsc_clk_lock);
  116. while (__clk_is_enabled(priv->clk[clk_id])) {
  117. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  118. clk_disable_unprepare(priv->clk[clk_id]);
  119. count++;
  120. }
  121. dev_dbg(priv->dev,
  122. "%s: clock reset after ssr, count %d\n", __func__, count);
  123. while (count--) {
  124. clk_prepare_enable(priv->clk[clk_id]);
  125. clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
  126. }
  127. mutex_unlock(&priv->rsc_clk_lock);
  128. return 0;
  129. }
  130. EXPORT_SYMBOL(bolero_rsc_clk_reset);
  131. void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  132. {
  133. struct device *clk_dev = NULL;
  134. struct bolero_clk_rsc *priv = NULL;
  135. int i = 0;
  136. if (!dev) {
  137. pr_err("%s: dev is null\n", __func__);
  138. return;
  139. }
  140. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  141. if (!clk_dev) {
  142. pr_err("%s: Invalid rsc clk device\n", __func__);
  143. return;
  144. }
  145. priv = dev_get_drvdata(clk_dev);
  146. if (!priv) {
  147. pr_err("%s: Invalid rsc clk private data\n", __func__);
  148. return;
  149. }
  150. mutex_lock(&priv->rsc_clk_lock);
  151. for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
  152. if (enable) {
  153. if (priv->clk[i])
  154. clk_prepare_enable(priv->clk[i]);
  155. if (priv->clk[i + NPL_CLK_OFFSET])
  156. clk_prepare_enable(
  157. priv->clk[i + NPL_CLK_OFFSET]);
  158. } else {
  159. if (priv->clk[i + NPL_CLK_OFFSET])
  160. clk_disable_unprepare(
  161. priv->clk[i + NPL_CLK_OFFSET]);
  162. if (priv->clk[i])
  163. clk_disable_unprepare(priv->clk[i]);
  164. }
  165. }
  166. mutex_unlock(&priv->rsc_clk_lock);
  167. return;
  168. }
  169. EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
  170. static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
  171. int clk_id,
  172. bool enable)
  173. {
  174. int ret = 0;
  175. static DEFINE_RATELIMIT_STATE(rtl, 1 * HZ, 1);
  176. if (enable) {
  177. /* Enable Requested Core clk */
  178. if (priv->clk_cnt[clk_id] == 0) {
  179. ret = clk_prepare_enable(priv->clk[clk_id]);
  180. if (ret < 0) {
  181. if (__ratelimit(&rtl))
  182. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  183. __func__, clk_id);
  184. goto done;
  185. }
  186. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  187. ret = clk_prepare_enable(
  188. priv->clk[clk_id + NPL_CLK_OFFSET]);
  189. if (ret < 0) {
  190. if (__ratelimit(&rtl))
  191. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  192. __func__,
  193. clk_id + NPL_CLK_OFFSET);
  194. goto err;
  195. }
  196. }
  197. }
  198. priv->clk_cnt[clk_id]++;
  199. } else {
  200. if (priv->clk_cnt[clk_id] <= 0) {
  201. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  202. __func__, clk_id);
  203. priv->clk_cnt[clk_id] = 0;
  204. goto done;
  205. }
  206. priv->clk_cnt[clk_id]--;
  207. if (priv->clk_cnt[clk_id] == 0) {
  208. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  209. clk_disable_unprepare(
  210. priv->clk[clk_id + NPL_CLK_OFFSET]);
  211. clk_disable_unprepare(priv->clk[clk_id]);
  212. }
  213. }
  214. return ret;
  215. err:
  216. clk_disable_unprepare(priv->clk[clk_id]);
  217. done:
  218. return ret;
  219. }
  220. static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
  221. int clk_id,
  222. bool enable)
  223. {
  224. char __iomem *clk_muxsel = NULL;
  225. int ret = 0;
  226. int default_clk_id = priv->default_clk_id[clk_id];
  227. u32 muxsel = 0;
  228. static DEFINE_RATELIMIT_STATE(rtl, 1 * HZ, 1);
  229. clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
  230. if (!clk_muxsel) {
  231. ret = -EINVAL;
  232. goto done;
  233. }
  234. if (enable) {
  235. if (priv->clk_cnt[clk_id] == 0) {
  236. if (clk_id != VA_CORE_CLK) {
  237. ret = bolero_clk_rsc_mux0_clk_request(priv,
  238. default_clk_id,
  239. true);
  240. if (ret < 0)
  241. goto done;
  242. }
  243. ret = clk_prepare_enable(priv->clk[clk_id]);
  244. if (ret < 0) {
  245. if (__ratelimit(&rtl))
  246. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  247. __func__, clk_id);
  248. goto err_clk;
  249. }
  250. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  251. ret = clk_prepare_enable(
  252. priv->clk[clk_id + NPL_CLK_OFFSET]);
  253. if (ret < 0) {
  254. if (__ratelimit(&rtl))
  255. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  256. __func__,
  257. clk_id + NPL_CLK_OFFSET);
  258. goto err_npl_clk;
  259. }
  260. }
  261. /*
  262. * Temp SW workaround to address a glitch issue of
  263. * VA GFMux instance responsible for switching from
  264. * TX MCLK to VA MCLK. This configuration would be taken
  265. * care in DSP itself
  266. */
  267. if (clk_id != VA_CORE_CLK) {
  268. if (priv->dev_up_gfmux) {
  269. iowrite32(0x1, clk_muxsel);
  270. muxsel = ioread32(clk_muxsel);
  271. }
  272. bolero_clk_rsc_mux0_clk_request(priv,
  273. default_clk_id,
  274. false);
  275. }
  276. }
  277. priv->clk_cnt[clk_id]++;
  278. } else {
  279. if (priv->clk_cnt[clk_id] <= 0) {
  280. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  281. __func__, clk_id);
  282. priv->clk_cnt[clk_id] = 0;
  283. goto done;
  284. }
  285. priv->clk_cnt[clk_id]--;
  286. if (priv->clk_cnt[clk_id] == 0) {
  287. if (clk_id != VA_CORE_CLK) {
  288. ret = bolero_clk_rsc_mux0_clk_request(priv,
  289. default_clk_id, true);
  290. if (!ret) {
  291. /*
  292. * Temp SW workaround to address a glitch issue
  293. * of VA GFMux instance responsible for
  294. * switching from TX MCLK to VA MCLK.
  295. * This configuration would be taken
  296. * care in DSP itself.
  297. */
  298. if (priv->dev_up_gfmux) {
  299. iowrite32(0x0, clk_muxsel);
  300. muxsel = ioread32(clk_muxsel);
  301. }
  302. }
  303. }
  304. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  305. clk_disable_unprepare(
  306. priv->clk[clk_id + NPL_CLK_OFFSET]);
  307. clk_disable_unprepare(priv->clk[clk_id]);
  308. if (clk_id != VA_CORE_CLK) {
  309. if (!ret)
  310. bolero_clk_rsc_mux0_clk_request(priv,
  311. default_clk_id, false);
  312. }
  313. }
  314. }
  315. return ret;
  316. err_npl_clk:
  317. clk_disable_unprepare(priv->clk[clk_id]);
  318. err_clk:
  319. if (clk_id != VA_CORE_CLK)
  320. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  321. done:
  322. return ret;
  323. }
  324. static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
  325. bool mux_switch,
  326. int clk_id,
  327. bool enable)
  328. {
  329. int ret = 0;
  330. if (enable) {
  331. if (clk_id == VA_CORE_CLK && mux_switch) {
  332. /*
  333. * Handle the following usecase scenarios during enable
  334. * 1. VA only, Active clk is VA_CORE_CLK
  335. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  336. */
  337. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  338. ret = bolero_clk_rsc_mux1_clk_request(priv,
  339. VA_CORE_CLK, enable);
  340. if (ret < 0)
  341. goto err;
  342. } else {
  343. ret = bolero_clk_rsc_mux0_clk_request(priv,
  344. TX_CORE_CLK, enable);
  345. if (ret < 0)
  346. goto err;
  347. priv->va_tx_clk_cnt++;
  348. }
  349. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  350. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  351. /*
  352. * Handle following concurrency scenario during enable
  353. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  354. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  355. */
  356. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  357. ret = bolero_clk_rsc_mux0_clk_request(priv,
  358. TX_CORE_CLK, true);
  359. if (ret < 0)
  360. goto err;
  361. bolero_clk_rsc_mux1_clk_request(priv,
  362. VA_CORE_CLK, false);
  363. priv->va_tx_clk_cnt++;
  364. }
  365. }
  366. } else {
  367. if (clk_id == VA_CORE_CLK && mux_switch) {
  368. /*
  369. * Handle the following usecase scenarios during disable
  370. * 1. VA only, disable VA_CORE_CLK
  371. * 2. Record + VA -> Record, decrement TX CLK count
  372. */
  373. if (priv->clk_cnt[VA_CORE_CLK]) {
  374. bolero_clk_rsc_mux1_clk_request(priv,
  375. VA_CORE_CLK, enable);
  376. } else if (priv->va_tx_clk_cnt) {
  377. bolero_clk_rsc_mux0_clk_request(priv,
  378. TX_CORE_CLK, enable);
  379. priv->va_tx_clk_cnt--;
  380. }
  381. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  382. /*
  383. * Handle the following usecase scenarios during disable
  384. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  385. */
  386. while (priv->va_tx_clk_cnt) {
  387. ret = bolero_clk_rsc_mux1_clk_request(priv,
  388. VA_CORE_CLK, true);
  389. if (ret < 0)
  390. goto err;
  391. bolero_clk_rsc_mux0_clk_request(priv,
  392. TX_CORE_CLK, false);
  393. priv->va_tx_clk_cnt--;
  394. }
  395. }
  396. }
  397. err:
  398. return ret;
  399. }
  400. /**
  401. * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
  402. * sequence
  403. *
  404. * @dev: Macro device pointer
  405. * @enable: enable or disable flag
  406. */
  407. void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  408. {
  409. int i;
  410. struct regmap *regmap;
  411. struct device *clk_dev = NULL;
  412. struct bolero_clk_rsc *priv = NULL;
  413. if (!dev) {
  414. pr_err("%s: dev is null\n", __func__);
  415. return;
  416. }
  417. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  418. if (!clk_dev) {
  419. pr_err("%s: Invalid rsc clk device\n", __func__);
  420. return;
  421. }
  422. priv = dev_get_drvdata(clk_dev);
  423. if (!priv) {
  424. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  425. return;
  426. }
  427. regmap = dev_get_regmap(priv->dev->parent, NULL);
  428. if (!regmap) {
  429. pr_err("%s: regmap is null\n", __func__);
  430. return;
  431. }
  432. mutex_lock(&priv->fs_gen_lock);
  433. if (enable) {
  434. if (priv->reg_seq_en_cnt++ == 0) {
  435. for (i = 0; i < (priv->num_fs_reg * 3); i += 3) {
  436. dev_dbg(priv->dev, "%s: Register: %d, mask: %d, value %d\n",
  437. __func__, priv->fs_gen_seq[i],
  438. priv->fs_gen_seq[i + 1],
  439. priv->fs_gen_seq[i + 2]);
  440. regmap_update_bits(regmap,
  441. priv->fs_gen_seq[i],
  442. priv->fs_gen_seq[i + 1],
  443. priv->fs_gen_seq[i + 2]);
  444. }
  445. }
  446. } else {
  447. if (priv->reg_seq_en_cnt <= 0) {
  448. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  449. __func__, priv->reg_seq_en_cnt);
  450. priv->reg_seq_en_cnt = 0;
  451. mutex_unlock(&priv->fs_gen_lock);
  452. return;
  453. }
  454. if (--priv->reg_seq_en_cnt == 0) {
  455. for (i = ((priv->num_fs_reg - 1) * 3); i >= 0; i -= 3) {
  456. dev_dbg(priv->dev, "%s: Register: %d, mask: %d\n",
  457. __func__, priv->fs_gen_seq[i],
  458. priv->fs_gen_seq[i + 1]);
  459. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  460. priv->fs_gen_seq[i + 1], 0x0);
  461. }
  462. }
  463. }
  464. mutex_unlock(&priv->fs_gen_lock);
  465. }
  466. EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
  467. /**
  468. * bolero_clk_rsc_request_clock - request for clock to
  469. * enable/disable
  470. *
  471. * @dev: Macro device pointer.
  472. * @default_clk_id: mux0 Core clock ID input.
  473. * @clk_id_req: Core clock ID requested to enable/disable
  474. * @enable: enable or disable clock flag
  475. *
  476. * Returns 0 on success or -EINVAL on error.
  477. */
  478. int bolero_clk_rsc_request_clock(struct device *dev,
  479. int default_clk_id,
  480. int clk_id_req,
  481. bool enable)
  482. {
  483. int ret = 0;
  484. struct device *clk_dev = NULL;
  485. struct bolero_clk_rsc *priv = NULL;
  486. bool mux_switch = false;
  487. if (!dev) {
  488. pr_err("%s: dev is null\n", __func__);
  489. return -EINVAL;
  490. }
  491. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  492. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  493. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  494. __func__, clk_id_req, default_clk_id);
  495. return -EINVAL;
  496. }
  497. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  498. if (!clk_dev) {
  499. pr_err("%s: Invalid rsc clk device\n", __func__);
  500. return -EINVAL;
  501. }
  502. priv = dev_get_drvdata(clk_dev);
  503. if (!priv) {
  504. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  505. return -EINVAL;
  506. }
  507. mutex_lock(&priv->rsc_clk_lock);
  508. if (!priv->dev_up && enable) {
  509. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  510. __func__);
  511. ret = -EINVAL;
  512. goto err;
  513. }
  514. priv->default_clk_id[clk_id_req] = default_clk_id;
  515. if (default_clk_id != clk_id_req)
  516. mux_switch = true;
  517. if (mux_switch) {
  518. if (clk_id_req != VA_CORE_CLK) {
  519. ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
  520. enable);
  521. if (ret < 0)
  522. goto err;
  523. }
  524. } else {
  525. ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  526. if (ret < 0)
  527. goto err;
  528. }
  529. ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  530. clk_id_req,
  531. enable);
  532. if (ret < 0)
  533. goto err;
  534. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  535. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  536. enable);
  537. mutex_unlock(&priv->rsc_clk_lock);
  538. return 0;
  539. err:
  540. mutex_unlock(&priv->rsc_clk_lock);
  541. return ret;
  542. }
  543. EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
  544. static int bolero_clk_rsc_probe(struct platform_device *pdev)
  545. {
  546. int ret = 0, fs_gen_size, i, j;
  547. const char **clk_name_array;
  548. int clk_cnt;
  549. struct clk *clk;
  550. struct bolero_clk_rsc *priv = NULL;
  551. u32 muxsel = 0;
  552. priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
  553. GFP_KERNEL);
  554. if (!priv)
  555. return -ENOMEM;
  556. /* Get clk fs gen sequence from device tree */
  557. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  558. &fs_gen_size)) {
  559. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  560. __func__);
  561. ret = -EINVAL;
  562. goto err;
  563. }
  564. priv->num_fs_reg = fs_gen_size/(3 * sizeof(u32));
  565. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  566. if (!priv->fs_gen_seq) {
  567. ret = -ENOMEM;
  568. goto err;
  569. }
  570. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  571. /* Parse fs-gen-sequence */
  572. ret = of_property_read_u32_array(pdev->dev.of_node,
  573. "qcom,fs-gen-sequence",
  574. priv->fs_gen_seq,
  575. priv->num_fs_reg * 3);
  576. if (ret < 0) {
  577. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  578. __func__, ret);
  579. goto err;
  580. }
  581. /* Get clk details from device tree */
  582. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  583. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  584. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  585. __func__, clk_cnt);
  586. ret = -EINVAL;
  587. goto err;
  588. }
  589. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  590. GFP_KERNEL);
  591. if (!clk_name_array) {
  592. ret = -ENOMEM;
  593. goto err;
  594. }
  595. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  596. clk_name_array, clk_cnt);
  597. for (i = 0; i < MAX_CLK; i++) {
  598. priv->clk[i] = NULL;
  599. for (j = 0; j < clk_cnt; j++) {
  600. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  601. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  602. if (IS_ERR(clk)) {
  603. ret = PTR_ERR(clk);
  604. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  605. __func__, clk_src_name[i], ret);
  606. goto err;
  607. }
  608. priv->clk[i] = clk;
  609. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  610. __func__, clk_src_name[i]);
  611. }
  612. }
  613. }
  614. ret = of_property_read_u32(pdev->dev.of_node,
  615. "qcom,rx_mclk_mode_muxsel", &muxsel);
  616. if (ret) {
  617. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  618. __func__);
  619. } else {
  620. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  621. if (!priv->rx_clk_muxsel) {
  622. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  623. __func__);
  624. return -ENOMEM;
  625. }
  626. }
  627. ret = of_property_read_u32(pdev->dev.of_node,
  628. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  629. if (ret) {
  630. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  631. __func__);
  632. } else {
  633. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  634. if (!priv->wsa_clk_muxsel) {
  635. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  636. __func__);
  637. return -ENOMEM;
  638. }
  639. }
  640. ret = of_property_read_u32(pdev->dev.of_node,
  641. "qcom,va_mclk_mode_muxsel", &muxsel);
  642. if (ret) {
  643. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  644. __func__);
  645. } else {
  646. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  647. if (!priv->va_clk_muxsel) {
  648. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  649. __func__);
  650. return -ENOMEM;
  651. }
  652. }
  653. ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
  654. if (ret < 0) {
  655. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  656. __func__, ret);
  657. goto err;
  658. }
  659. priv->dev = &pdev->dev;
  660. priv->dev_up = true;
  661. priv->dev_up_gfmux = true;
  662. mutex_init(&priv->rsc_clk_lock);
  663. mutex_init(&priv->fs_gen_lock);
  664. dev_set_drvdata(&pdev->dev, priv);
  665. err:
  666. return ret;
  667. }
  668. static int bolero_clk_rsc_remove(struct platform_device *pdev)
  669. {
  670. struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  671. bolero_unregister_res_clk(&pdev->dev);
  672. of_platform_depopulate(&pdev->dev);
  673. if (!priv)
  674. return -EINVAL;
  675. mutex_destroy(&priv->rsc_clk_lock);
  676. mutex_destroy(&priv->fs_gen_lock);
  677. return 0;
  678. }
  679. static const struct of_device_id bolero_clk_rsc_dt_match[] = {
  680. {.compatible = "qcom,bolero-clk-rsc-mngr"},
  681. {}
  682. };
  683. MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
  684. static struct platform_driver bolero_clk_rsc_mgr = {
  685. .driver = {
  686. .name = "bolero-clk-rsc-mngr",
  687. .owner = THIS_MODULE,
  688. .of_match_table = bolero_clk_rsc_dt_match,
  689. .suppress_bind_attrs = true,
  690. },
  691. .probe = bolero_clk_rsc_probe,
  692. .remove = bolero_clk_rsc_remove,
  693. };
  694. int bolero_clk_rsc_mgr_init(void)
  695. {
  696. return platform_driver_register(&bolero_clk_rsc_mgr);
  697. }
  698. void bolero_clk_rsc_mgr_exit(void)
  699. {
  700. platform_driver_unregister(&bolero_clk_rsc_mgr);
  701. }
  702. MODULE_DESCRIPTION("Bolero clock resource manager driver");
  703. MODULE_LICENSE("GPL v2");