bolero-clk-rsc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/module.h>
  7. #include <linux/io.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/kernel.h>
  11. #include <linux/clk.h>
  12. #include <linux/clk-provider.h>
  13. #include "bolero-cdc.h"
  14. #include "bolero-clk-rsc.h"
  15. #define DRV_NAME "bolero-clk-rsc"
  16. #define BOLERO_CLK_NAME_LENGTH 30
  17. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  18. static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
  19. "tx_core_clk",
  20. "rx_core_clk",
  21. "wsa_core_clk",
  22. "va_core_clk",
  23. "tx_npl_clk",
  24. "rx_npl_clk",
  25. "wsa_npl_clk",
  26. "va_npl_clk",
  27. };
  28. struct bolero_clk_rsc {
  29. struct device *dev;
  30. struct mutex rsc_clk_lock;
  31. struct mutex fs_gen_lock;
  32. struct clk *clk[MAX_CLK];
  33. int clk_cnt[MAX_CLK];
  34. int reg_seq_en_cnt;
  35. int va_tx_clk_cnt;
  36. bool dev_up;
  37. bool dev_up_gfmux;
  38. u32 num_fs_reg;
  39. u32 *fs_gen_seq;
  40. int default_clk_id[MAX_CLK];
  41. struct regmap *regmap;
  42. char __iomem *rx_clk_muxsel;
  43. char __iomem *wsa_clk_muxsel;
  44. char __iomem *va_clk_muxsel;
  45. };
  46. static int bolero_clk_rsc_cb(struct device *dev, u16 event)
  47. {
  48. struct bolero_clk_rsc *priv;
  49. if (!dev) {
  50. pr_err("%s: Invalid device pointer\n",
  51. __func__);
  52. return -EINVAL;
  53. }
  54. priv = dev_get_drvdata(dev);
  55. if (!priv) {
  56. pr_err("%s: Invalid clk rsc priviate data\n",
  57. __func__);
  58. return -EINVAL;
  59. }
  60. mutex_lock(&priv->rsc_clk_lock);
  61. if (event == BOLERO_MACRO_EVT_SSR_UP) {
  62. priv->dev_up = true;
  63. } else if (event == BOLERO_MACRO_EVT_SSR_DOWN) {
  64. priv->dev_up = false;
  65. priv->dev_up_gfmux = false;
  66. } else if (event == BOLERO_MACRO_EVT_SSR_GFMUX_UP) {
  67. priv->dev_up_gfmux = true;
  68. }
  69. mutex_unlock(&priv->rsc_clk_lock);
  70. return 0;
  71. }
  72. static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
  73. int clk_id)
  74. {
  75. switch (clk_id) {
  76. case RX_CORE_CLK:
  77. return priv->rx_clk_muxsel;
  78. case WSA_CORE_CLK:
  79. return priv->wsa_clk_muxsel;
  80. case VA_CORE_CLK:
  81. return priv->va_clk_muxsel;
  82. case TX_CORE_CLK:
  83. default:
  84. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  85. break;
  86. }
  87. return NULL;
  88. }
  89. int bolero_rsc_clk_reset(struct device *dev, int clk_id)
  90. {
  91. struct device *clk_dev = NULL;
  92. struct bolero_clk_rsc *priv = NULL;
  93. int count = 0;
  94. if (!dev) {
  95. pr_err("%s: dev is null %d\n", __func__);
  96. return -EINVAL;
  97. }
  98. if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
  99. pr_err("%s: Invalid clk_id: %d\n",
  100. __func__, clk_id);
  101. return -EINVAL;
  102. }
  103. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  104. if (!clk_dev) {
  105. pr_err("%s: Invalid rsc clk device\n", __func__);
  106. return -EINVAL;
  107. }
  108. priv = dev_get_drvdata(clk_dev);
  109. if (!priv) {
  110. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  111. return -EINVAL;
  112. }
  113. mutex_lock(&priv->rsc_clk_lock);
  114. while (__clk_is_enabled(priv->clk[clk_id])) {
  115. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  116. clk_disable_unprepare(priv->clk[clk_id]);
  117. count++;
  118. }
  119. dev_dbg(priv->dev,
  120. "%s: clock reset after ssr, count %d\n", __func__, count);
  121. trace_printk("%s: clock reset after ssr, count %d\n", __func__, count);
  122. while (count--) {
  123. clk_prepare_enable(priv->clk[clk_id]);
  124. clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
  125. }
  126. mutex_unlock(&priv->rsc_clk_lock);
  127. return 0;
  128. }
  129. EXPORT_SYMBOL(bolero_rsc_clk_reset);
  130. void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  131. {
  132. struct device *clk_dev = NULL;
  133. struct bolero_clk_rsc *priv = NULL;
  134. int i = 0;
  135. if (!dev) {
  136. pr_err("%s: dev is null %d\n", __func__);
  137. return;
  138. }
  139. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  140. if (!clk_dev) {
  141. pr_err("%s: Invalid rsc clk device\n", __func__);
  142. return;
  143. }
  144. priv = dev_get_drvdata(clk_dev);
  145. if (!priv) {
  146. pr_err("%s: Invalid rsc clk private data\n", __func__);
  147. return;
  148. }
  149. mutex_lock(&priv->rsc_clk_lock);
  150. for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
  151. if (enable) {
  152. if (priv->clk[i])
  153. clk_prepare_enable(priv->clk[i]);
  154. if (priv->clk[i + NPL_CLK_OFFSET])
  155. clk_prepare_enable(
  156. priv->clk[i + NPL_CLK_OFFSET]);
  157. } else {
  158. if (priv->clk[i + NPL_CLK_OFFSET])
  159. clk_disable_unprepare(
  160. priv->clk[i + NPL_CLK_OFFSET]);
  161. if (priv->clk[i])
  162. clk_disable_unprepare(priv->clk[i]);
  163. }
  164. }
  165. mutex_unlock(&priv->rsc_clk_lock);
  166. return;
  167. }
  168. EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
  169. static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
  170. int clk_id,
  171. bool enable)
  172. {
  173. int ret = 0;
  174. if (enable) {
  175. /* Enable Requested Core clk */
  176. if (priv->clk_cnt[clk_id] == 0) {
  177. ret = clk_prepare_enable(priv->clk[clk_id]);
  178. if (ret < 0) {
  179. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  180. __func__, clk_id);
  181. goto done;
  182. }
  183. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  184. ret = clk_prepare_enable(
  185. priv->clk[clk_id + NPL_CLK_OFFSET]);
  186. if (ret < 0) {
  187. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  188. __func__,
  189. clk_id + NPL_CLK_OFFSET);
  190. goto err;
  191. }
  192. }
  193. }
  194. priv->clk_cnt[clk_id]++;
  195. } else {
  196. if (priv->clk_cnt[clk_id] <= 0) {
  197. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  198. __func__, clk_id);
  199. priv->clk_cnt[clk_id] = 0;
  200. goto done;
  201. }
  202. priv->clk_cnt[clk_id]--;
  203. if (priv->clk_cnt[clk_id] == 0) {
  204. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  205. clk_disable_unprepare(
  206. priv->clk[clk_id + NPL_CLK_OFFSET]);
  207. clk_disable_unprepare(priv->clk[clk_id]);
  208. }
  209. }
  210. return ret;
  211. err:
  212. clk_disable_unprepare(priv->clk[clk_id]);
  213. done:
  214. return ret;
  215. }
  216. static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
  217. int clk_id,
  218. bool enable)
  219. {
  220. char __iomem *clk_muxsel = NULL;
  221. int ret = 0;
  222. int default_clk_id = priv->default_clk_id[clk_id];
  223. u32 muxsel = 0;
  224. clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
  225. if (!clk_muxsel) {
  226. ret = -EINVAL;
  227. goto done;
  228. }
  229. if (enable) {
  230. if (priv->clk_cnt[clk_id] == 0) {
  231. if (clk_id != VA_CORE_CLK) {
  232. ret = bolero_clk_rsc_mux0_clk_request(priv,
  233. default_clk_id,
  234. true);
  235. if (ret < 0)
  236. goto done;
  237. }
  238. ret = clk_prepare_enable(priv->clk[clk_id]);
  239. if (ret < 0) {
  240. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  241. __func__, clk_id);
  242. goto err_clk;
  243. }
  244. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  245. ret = clk_prepare_enable(
  246. priv->clk[clk_id + NPL_CLK_OFFSET]);
  247. if (ret < 0) {
  248. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  249. __func__,
  250. clk_id + NPL_CLK_OFFSET);
  251. goto err_npl_clk;
  252. }
  253. }
  254. /*
  255. * Temp SW workaround to address a glitch issue of
  256. * VA GFMux instance responsible for switching from
  257. * TX MCLK to VA MCLK. This configuration would be taken
  258. * care in DSP itself
  259. */
  260. if (clk_id != VA_CORE_CLK) {
  261. if (priv->dev_up_gfmux) {
  262. iowrite32(0x1, clk_muxsel);
  263. muxsel = ioread32(clk_muxsel);
  264. trace_printk("%s: muxsel value after enable: %d\n",
  265. __func__, muxsel);
  266. }
  267. bolero_clk_rsc_mux0_clk_request(priv,
  268. default_clk_id,
  269. false);
  270. }
  271. }
  272. priv->clk_cnt[clk_id]++;
  273. } else {
  274. if (priv->clk_cnt[clk_id] <= 0) {
  275. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  276. __func__, clk_id);
  277. priv->clk_cnt[clk_id] = 0;
  278. goto done;
  279. }
  280. priv->clk_cnt[clk_id]--;
  281. if (priv->clk_cnt[clk_id] == 0) {
  282. if (clk_id != VA_CORE_CLK) {
  283. ret = bolero_clk_rsc_mux0_clk_request(priv,
  284. default_clk_id, true);
  285. if (!ret) {
  286. /*
  287. * Temp SW workaround to address a glitch issue
  288. * of VA GFMux instance responsible for
  289. * switching from TX MCLK to VA MCLK.
  290. * This configuration would be taken
  291. * care in DSP itself.
  292. */
  293. if (priv->dev_up_gfmux) {
  294. iowrite32(0x0, clk_muxsel);
  295. muxsel = ioread32(clk_muxsel);
  296. trace_printk("%s: muxsel value after disable: %d\n",
  297. __func__, muxsel);
  298. }
  299. }
  300. }
  301. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  302. clk_disable_unprepare(
  303. priv->clk[clk_id + NPL_CLK_OFFSET]);
  304. clk_disable_unprepare(priv->clk[clk_id]);
  305. if (clk_id != VA_CORE_CLK) {
  306. if (!ret)
  307. bolero_clk_rsc_mux0_clk_request(priv,
  308. default_clk_id, false);
  309. }
  310. }
  311. }
  312. return ret;
  313. err_npl_clk:
  314. clk_disable_unprepare(priv->clk[clk_id]);
  315. err_clk:
  316. if (clk_id != VA_CORE_CLK)
  317. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  318. done:
  319. return ret;
  320. }
  321. static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
  322. bool mux_switch,
  323. int clk_id,
  324. bool enable)
  325. {
  326. int ret = 0;
  327. if (enable) {
  328. if (clk_id == VA_CORE_CLK && mux_switch) {
  329. /*
  330. * Handle the following usecase scenarios during enable
  331. * 1. VA only, Active clk is VA_CORE_CLK
  332. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  333. */
  334. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  335. ret = bolero_clk_rsc_mux1_clk_request(priv,
  336. VA_CORE_CLK, enable);
  337. if (ret < 0)
  338. goto err;
  339. } else {
  340. ret = bolero_clk_rsc_mux0_clk_request(priv,
  341. TX_CORE_CLK, enable);
  342. if (ret < 0)
  343. goto err;
  344. priv->va_tx_clk_cnt++;
  345. }
  346. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  347. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  348. /*
  349. * Handle following concurrency scenario during enable
  350. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  351. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  352. */
  353. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  354. ret = bolero_clk_rsc_mux0_clk_request(priv,
  355. TX_CORE_CLK, true);
  356. if (ret < 0)
  357. goto err;
  358. bolero_clk_rsc_mux1_clk_request(priv,
  359. VA_CORE_CLK, false);
  360. priv->va_tx_clk_cnt++;
  361. }
  362. }
  363. } else {
  364. if (clk_id == VA_CORE_CLK && mux_switch) {
  365. /*
  366. * Handle the following usecase scenarios during disable
  367. * 1. VA only, disable VA_CORE_CLK
  368. * 2. Record + VA -> Record, decrement TX CLK count
  369. */
  370. if (priv->clk_cnt[VA_CORE_CLK]) {
  371. bolero_clk_rsc_mux1_clk_request(priv,
  372. VA_CORE_CLK, enable);
  373. } else if (priv->va_tx_clk_cnt) {
  374. bolero_clk_rsc_mux0_clk_request(priv,
  375. TX_CORE_CLK, enable);
  376. priv->va_tx_clk_cnt--;
  377. }
  378. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  379. /*
  380. * Handle the following usecase scenarios during disable
  381. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  382. */
  383. while (priv->va_tx_clk_cnt) {
  384. ret = bolero_clk_rsc_mux1_clk_request(priv,
  385. VA_CORE_CLK, true);
  386. if (ret < 0)
  387. goto err;
  388. bolero_clk_rsc_mux0_clk_request(priv,
  389. TX_CORE_CLK, false);
  390. priv->va_tx_clk_cnt--;
  391. }
  392. }
  393. }
  394. err:
  395. return ret;
  396. }
  397. /**
  398. * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
  399. * sequence
  400. *
  401. * @dev: Macro device pointer
  402. * @enable: enable or disable flag
  403. */
  404. void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  405. {
  406. int i;
  407. struct regmap *regmap;
  408. struct device *clk_dev = NULL;
  409. struct bolero_clk_rsc *priv = NULL;
  410. if (!dev) {
  411. pr_err("%s: dev is null %d\n", __func__);
  412. return;
  413. }
  414. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  415. if (!clk_dev) {
  416. pr_err("%s: Invalid rsc clk device\n", __func__);
  417. return;
  418. }
  419. priv = dev_get_drvdata(clk_dev);
  420. if (!priv) {
  421. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  422. return;
  423. }
  424. regmap = dev_get_regmap(priv->dev->parent, NULL);
  425. if (!regmap) {
  426. pr_err("%s: regmap is null\n", __func__);
  427. return;
  428. }
  429. mutex_lock(&priv->fs_gen_lock);
  430. if (enable) {
  431. if (priv->reg_seq_en_cnt++ == 0) {
  432. for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
  433. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  434. __func__, priv->fs_gen_seq[i],
  435. priv->fs_gen_seq[i + 1]);
  436. regmap_update_bits(regmap,
  437. priv->fs_gen_seq[i],
  438. priv->fs_gen_seq[i + 1],
  439. priv->fs_gen_seq[i + 1]);
  440. }
  441. }
  442. } else {
  443. if (priv->reg_seq_en_cnt <= 0) {
  444. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  445. __func__, priv->reg_seq_en_cnt);
  446. priv->reg_seq_en_cnt = 0;
  447. mutex_unlock(&priv->fs_gen_lock);
  448. return;
  449. }
  450. if (--priv->reg_seq_en_cnt == 0) {
  451. for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
  452. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  453. __func__, priv->fs_gen_seq[i],
  454. priv->fs_gen_seq[i + 1]);
  455. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  456. priv->fs_gen_seq[i + 1], 0x0);
  457. }
  458. }
  459. }
  460. mutex_unlock(&priv->fs_gen_lock);
  461. }
  462. EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
  463. /**
  464. * bolero_clk_rsc_request_clock - request for clock to
  465. * enable/disable
  466. *
  467. * @dev: Macro device pointer.
  468. * @default_clk_id: mux0 Core clock ID input.
  469. * @clk_id_req: Core clock ID requested to enable/disable
  470. * @enable: enable or disable clock flag
  471. *
  472. * Returns 0 on success or -EINVAL on error.
  473. */
  474. int bolero_clk_rsc_request_clock(struct device *dev,
  475. int default_clk_id,
  476. int clk_id_req,
  477. bool enable)
  478. {
  479. int ret = 0;
  480. struct device *clk_dev = NULL;
  481. struct bolero_clk_rsc *priv = NULL;
  482. bool mux_switch = false;
  483. if (!dev) {
  484. pr_err("%s: dev is null %d\n", __func__);
  485. return -EINVAL;
  486. }
  487. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  488. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  489. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  490. __func__, clk_id_req, default_clk_id);
  491. return -EINVAL;
  492. }
  493. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  494. if (!clk_dev) {
  495. pr_err("%s: Invalid rsc clk device\n", __func__);
  496. return -EINVAL;
  497. }
  498. priv = dev_get_drvdata(clk_dev);
  499. if (!priv) {
  500. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  501. return -EINVAL;
  502. }
  503. mutex_lock(&priv->rsc_clk_lock);
  504. if (!priv->dev_up && enable) {
  505. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  506. __func__);
  507. trace_printk("%s: SSR is in progress..\n", __func__);
  508. ret = -EINVAL;
  509. goto err;
  510. }
  511. priv->default_clk_id[clk_id_req] = default_clk_id;
  512. if (default_clk_id != clk_id_req)
  513. mux_switch = true;
  514. if (mux_switch) {
  515. if (clk_id_req != VA_CORE_CLK) {
  516. ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
  517. enable);
  518. if (ret < 0)
  519. goto err;
  520. }
  521. } else {
  522. ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  523. if (ret < 0)
  524. goto err;
  525. }
  526. ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  527. clk_id_req,
  528. enable);
  529. if (ret < 0)
  530. goto err;
  531. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  532. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  533. enable);
  534. trace_printk("%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  535. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  536. enable);
  537. mutex_unlock(&priv->rsc_clk_lock);
  538. return 0;
  539. err:
  540. mutex_unlock(&priv->rsc_clk_lock);
  541. return ret;
  542. }
  543. EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
  544. static int bolero_clk_rsc_probe(struct platform_device *pdev)
  545. {
  546. int ret = 0, fs_gen_size, i, j;
  547. const char **clk_name_array;
  548. int clk_cnt;
  549. struct clk *clk;
  550. struct bolero_clk_rsc *priv = NULL;
  551. u32 muxsel = 0;
  552. priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
  553. GFP_KERNEL);
  554. if (!priv)
  555. return -ENOMEM;
  556. /* Get clk fs gen sequence from device tree */
  557. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  558. &fs_gen_size)) {
  559. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  560. __func__);
  561. ret = -EINVAL;
  562. goto err;
  563. }
  564. priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
  565. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  566. if (!priv->fs_gen_seq) {
  567. ret = -ENOMEM;
  568. goto err;
  569. }
  570. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  571. /* Parse fs-gen-sequence */
  572. ret = of_property_read_u32_array(pdev->dev.of_node,
  573. "qcom,fs-gen-sequence",
  574. priv->fs_gen_seq,
  575. priv->num_fs_reg * 2);
  576. if (ret < 0) {
  577. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  578. __func__, ret);
  579. goto err;
  580. }
  581. /* Get clk details from device tree */
  582. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  583. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  584. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  585. __func__, clk_cnt);
  586. ret = -EINVAL;
  587. goto err;
  588. }
  589. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  590. GFP_KERNEL);
  591. if (!clk_name_array) {
  592. ret = -ENOMEM;
  593. goto err;
  594. }
  595. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  596. clk_name_array, clk_cnt);
  597. for (i = 0; i < MAX_CLK; i++) {
  598. priv->clk[i] = NULL;
  599. for (j = 0; j < clk_cnt; j++) {
  600. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  601. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  602. if (IS_ERR(clk)) {
  603. ret = PTR_ERR(clk);
  604. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  605. __func__, clk_src_name[i], ret);
  606. goto err;
  607. }
  608. priv->clk[i] = clk;
  609. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  610. __func__, clk_src_name[i]);
  611. }
  612. }
  613. }
  614. ret = of_property_read_u32(pdev->dev.of_node,
  615. "qcom,rx_mclk_mode_muxsel", &muxsel);
  616. if (ret) {
  617. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  618. __func__);
  619. } else {
  620. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  621. if (!priv->rx_clk_muxsel) {
  622. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  623. __func__);
  624. return -ENOMEM;
  625. }
  626. }
  627. ret = of_property_read_u32(pdev->dev.of_node,
  628. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  629. if (ret) {
  630. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  631. __func__);
  632. } else {
  633. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  634. if (!priv->wsa_clk_muxsel) {
  635. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  636. __func__);
  637. return -ENOMEM;
  638. }
  639. }
  640. ret = of_property_read_u32(pdev->dev.of_node,
  641. "qcom,va_mclk_mode_muxsel", &muxsel);
  642. if (ret) {
  643. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  644. __func__);
  645. } else {
  646. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  647. if (!priv->va_clk_muxsel) {
  648. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  649. __func__);
  650. return -ENOMEM;
  651. }
  652. }
  653. ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
  654. if (ret < 0) {
  655. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  656. __func__, ret);
  657. goto err;
  658. }
  659. priv->dev = &pdev->dev;
  660. priv->dev_up = true;
  661. priv->dev_up_gfmux = true;
  662. mutex_init(&priv->rsc_clk_lock);
  663. mutex_init(&priv->fs_gen_lock);
  664. dev_set_drvdata(&pdev->dev, priv);
  665. err:
  666. return ret;
  667. }
  668. static int bolero_clk_rsc_remove(struct platform_device *pdev)
  669. {
  670. struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  671. bolero_unregister_res_clk(&pdev->dev);
  672. of_platform_depopulate(&pdev->dev);
  673. if (!priv)
  674. return -EINVAL;
  675. mutex_destroy(&priv->rsc_clk_lock);
  676. mutex_destroy(&priv->fs_gen_lock);
  677. return 0;
  678. }
  679. static const struct of_device_id bolero_clk_rsc_dt_match[] = {
  680. {.compatible = "qcom,bolero-clk-rsc-mngr"},
  681. {}
  682. };
  683. MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
  684. static struct platform_driver bolero_clk_rsc_mgr = {
  685. .driver = {
  686. .name = "bolero-clk-rsc-mngr",
  687. .owner = THIS_MODULE,
  688. .of_match_table = bolero_clk_rsc_dt_match,
  689. .suppress_bind_attrs = true,
  690. },
  691. .probe = bolero_clk_rsc_probe,
  692. .remove = bolero_clk_rsc_remove,
  693. };
  694. int bolero_clk_rsc_mgr_init(void)
  695. {
  696. return platform_driver_register(&bolero_clk_rsc_mgr);
  697. }
  698. void bolero_clk_rsc_mgr_exit(void)
  699. {
  700. platform_driver_unregister(&bolero_clk_rsc_mgr);
  701. }
  702. MODULE_DESCRIPTION("Bolero clock resource manager driver");
  703. MODULE_LICENSE("GPL v2");