bolero-clk-rsc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/module.h>
  7. #include <linux/io.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/kernel.h>
  11. #include <linux/clk.h>
  12. #include <linux/clk-provider.h>
  13. #include "bolero-cdc.h"
  14. #include "bolero-clk-rsc.h"
  15. #define DRV_NAME "bolero-clk-rsc"
  16. #define BOLERO_CLK_NAME_LENGTH 30
  17. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  18. static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
  19. "tx_core_clk",
  20. "rx_core_clk",
  21. "wsa_core_clk",
  22. "va_core_clk",
  23. "tx_npl_clk",
  24. "rx_npl_clk",
  25. "wsa_npl_clk",
  26. "va_npl_clk",
  27. };
  28. struct bolero_clk_rsc {
  29. struct device *dev;
  30. struct mutex rsc_clk_lock;
  31. struct mutex fs_gen_lock;
  32. struct clk *clk[MAX_CLK];
  33. int clk_cnt[MAX_CLK];
  34. int reg_seq_en_cnt;
  35. int va_tx_clk_cnt;
  36. bool dev_up;
  37. u32 num_fs_reg;
  38. u32 *fs_gen_seq;
  39. int default_clk_id[MAX_CLK];
  40. struct regmap *regmap;
  41. char __iomem *rx_clk_muxsel;
  42. char __iomem *wsa_clk_muxsel;
  43. char __iomem *va_clk_muxsel;
  44. };
  45. static int bolero_clk_rsc_cb(struct device *dev, u16 event)
  46. {
  47. struct bolero_clk_rsc *priv;
  48. if (!dev) {
  49. pr_err("%s: Invalid device pointer\n",
  50. __func__);
  51. return -EINVAL;
  52. }
  53. priv = dev_get_drvdata(dev);
  54. if (!priv) {
  55. pr_err("%s: Invalid clk rsc priviate data\n",
  56. __func__);
  57. return -EINVAL;
  58. }
  59. mutex_lock(&priv->rsc_clk_lock);
  60. if (event == BOLERO_MACRO_EVT_SSR_UP)
  61. priv->dev_up = true;
  62. else if (event == BOLERO_MACRO_EVT_SSR_DOWN)
  63. priv->dev_up = false;
  64. mutex_unlock(&priv->rsc_clk_lock);
  65. return 0;
  66. }
  67. static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
  68. int clk_id)
  69. {
  70. switch (clk_id) {
  71. case RX_CORE_CLK:
  72. return priv->rx_clk_muxsel;
  73. case WSA_CORE_CLK:
  74. return priv->wsa_clk_muxsel;
  75. case VA_CORE_CLK:
  76. return priv->va_clk_muxsel;
  77. case TX_CORE_CLK:
  78. default:
  79. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  80. break;
  81. }
  82. return NULL;
  83. }
  84. int bolero_rsc_clk_reset(struct device *dev, int clk_id)
  85. {
  86. struct device *clk_dev = NULL;
  87. struct bolero_clk_rsc *priv = NULL;
  88. int count = 0;
  89. if (!dev) {
  90. pr_err("%s: dev is null %d\n", __func__);
  91. return -EINVAL;
  92. }
  93. if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
  94. pr_err("%s: Invalid clk_id: %d\n",
  95. __func__, clk_id);
  96. return -EINVAL;
  97. }
  98. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  99. if (!clk_dev) {
  100. pr_err("%s: Invalid rsc clk device\n", __func__);
  101. return -EINVAL;
  102. }
  103. priv = dev_get_drvdata(clk_dev);
  104. if (!priv) {
  105. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  106. return -EINVAL;
  107. }
  108. mutex_lock(&priv->rsc_clk_lock);
  109. while (__clk_is_enabled(priv->clk[clk_id])) {
  110. clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
  111. clk_disable_unprepare(priv->clk[clk_id]);
  112. count++;
  113. }
  114. dev_dbg(priv->dev,
  115. "%s: clock reset after ssr, count %d\n", __func__, count);
  116. trace_printk("%s: clock reset after ssr, count %d\n", __func__, count);
  117. while (count--) {
  118. clk_prepare_enable(priv->clk[clk_id]);
  119. clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
  120. }
  121. mutex_unlock(&priv->rsc_clk_lock);
  122. return 0;
  123. }
  124. EXPORT_SYMBOL(bolero_rsc_clk_reset);
  125. void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
  126. {
  127. struct device *clk_dev = NULL;
  128. struct bolero_clk_rsc *priv = NULL;
  129. int i = 0;
  130. if (!dev) {
  131. pr_err("%s: dev is null %d\n", __func__);
  132. return;
  133. }
  134. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  135. if (!clk_dev) {
  136. pr_err("%s: Invalid rsc clk device\n", __func__);
  137. return;
  138. }
  139. priv = dev_get_drvdata(clk_dev);
  140. if (!priv) {
  141. pr_err("%s: Invalid rsc clk private data\n", __func__);
  142. return;
  143. }
  144. mutex_lock(&priv->rsc_clk_lock);
  145. for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
  146. if (enable) {
  147. if (priv->clk[i])
  148. clk_prepare_enable(priv->clk[i]);
  149. if (priv->clk[i + NPL_CLK_OFFSET])
  150. clk_prepare_enable(
  151. priv->clk[i + NPL_CLK_OFFSET]);
  152. } else {
  153. if (priv->clk[i + NPL_CLK_OFFSET])
  154. clk_disable_unprepare(
  155. priv->clk[i + NPL_CLK_OFFSET]);
  156. if (priv->clk[i])
  157. clk_disable_unprepare(priv->clk[i]);
  158. }
  159. }
  160. mutex_unlock(&priv->rsc_clk_lock);
  161. return;
  162. }
  163. EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
  164. static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
  165. int clk_id,
  166. bool enable)
  167. {
  168. int ret = 0;
  169. if (enable) {
  170. /* Enable Requested Core clk */
  171. if (priv->clk_cnt[clk_id] == 0) {
  172. ret = clk_prepare_enable(priv->clk[clk_id]);
  173. if (ret < 0) {
  174. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  175. __func__, clk_id);
  176. goto done;
  177. }
  178. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  179. ret = clk_prepare_enable(
  180. priv->clk[clk_id + NPL_CLK_OFFSET]);
  181. if (ret < 0) {
  182. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  183. __func__,
  184. clk_id + NPL_CLK_OFFSET);
  185. goto err;
  186. }
  187. }
  188. }
  189. priv->clk_cnt[clk_id]++;
  190. } else {
  191. if (priv->clk_cnt[clk_id] <= 0) {
  192. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  193. __func__, clk_id);
  194. priv->clk_cnt[clk_id] = 0;
  195. goto done;
  196. }
  197. priv->clk_cnt[clk_id]--;
  198. if (priv->clk_cnt[clk_id] == 0) {
  199. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  200. clk_disable_unprepare(
  201. priv->clk[clk_id + NPL_CLK_OFFSET]);
  202. clk_disable_unprepare(priv->clk[clk_id]);
  203. }
  204. }
  205. return ret;
  206. err:
  207. clk_disable_unprepare(priv->clk[clk_id]);
  208. done:
  209. return ret;
  210. }
  211. static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
  212. int clk_id,
  213. bool enable)
  214. {
  215. char __iomem *clk_muxsel = NULL;
  216. int ret = 0;
  217. int default_clk_id = priv->default_clk_id[clk_id];
  218. u32 muxsel = 0;
  219. clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
  220. if (!clk_muxsel) {
  221. ret = -EINVAL;
  222. goto done;
  223. }
  224. if (enable) {
  225. if (priv->clk_cnt[clk_id] == 0) {
  226. if (clk_id != VA_CORE_CLK) {
  227. ret = bolero_clk_rsc_mux0_clk_request(priv,
  228. default_clk_id,
  229. true);
  230. if (ret < 0)
  231. goto done;
  232. }
  233. ret = clk_prepare_enable(priv->clk[clk_id]);
  234. if (ret < 0) {
  235. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  236. __func__, clk_id);
  237. goto err_clk;
  238. }
  239. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  240. ret = clk_prepare_enable(
  241. priv->clk[clk_id + NPL_CLK_OFFSET]);
  242. if (ret < 0) {
  243. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  244. __func__,
  245. clk_id + NPL_CLK_OFFSET);
  246. goto err_npl_clk;
  247. }
  248. }
  249. /*
  250. * Temp SW workaround to address a glitch issue of
  251. * VA GFMux instance responsible for switching from
  252. * TX MCLK to VA MCLK. This configuration would be taken
  253. * care in DSP itself
  254. */
  255. if (clk_id != VA_CORE_CLK) {
  256. iowrite32(0x1, clk_muxsel);
  257. muxsel = ioread32(clk_muxsel);
  258. trace_printk("%s: muxsel value after enable: %d\n",
  259. __func__, muxsel);
  260. bolero_clk_rsc_mux0_clk_request(priv,
  261. default_clk_id,
  262. false);
  263. }
  264. }
  265. priv->clk_cnt[clk_id]++;
  266. } else {
  267. if (priv->clk_cnt[clk_id] <= 0) {
  268. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  269. __func__, clk_id);
  270. priv->clk_cnt[clk_id] = 0;
  271. goto done;
  272. }
  273. priv->clk_cnt[clk_id]--;
  274. if (priv->clk_cnt[clk_id] == 0) {
  275. if (clk_id != VA_CORE_CLK) {
  276. ret = bolero_clk_rsc_mux0_clk_request(priv,
  277. default_clk_id, true);
  278. if (!ret) {
  279. /*
  280. * Temp SW workaround to address a glitch issue
  281. * of VA GFMux instance responsible for
  282. * switching from TX MCLK to VA MCLK.
  283. * This configuration would be taken
  284. * care in DSP itself.
  285. */
  286. iowrite32(0x0, clk_muxsel);
  287. muxsel = ioread32(clk_muxsel);
  288. trace_printk("%s: muxsel value after disable: %d\n",
  289. __func__, muxsel);
  290. }
  291. }
  292. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  293. clk_disable_unprepare(
  294. priv->clk[clk_id + NPL_CLK_OFFSET]);
  295. clk_disable_unprepare(priv->clk[clk_id]);
  296. if (clk_id != VA_CORE_CLK) {
  297. if (!ret)
  298. bolero_clk_rsc_mux0_clk_request(priv,
  299. default_clk_id, false);
  300. }
  301. }
  302. }
  303. return ret;
  304. err_npl_clk:
  305. clk_disable_unprepare(priv->clk[clk_id]);
  306. err_clk:
  307. if (clk_id != VA_CORE_CLK)
  308. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  309. done:
  310. return ret;
  311. }
  312. static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
  313. bool mux_switch,
  314. int clk_id,
  315. bool enable)
  316. {
  317. int ret = 0;
  318. if (enable) {
  319. if (clk_id == VA_CORE_CLK && mux_switch) {
  320. /*
  321. * Handle the following usecase scenarios during enable
  322. * 1. VA only, Active clk is VA_CORE_CLK
  323. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  324. */
  325. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  326. ret = bolero_clk_rsc_mux1_clk_request(priv,
  327. VA_CORE_CLK, enable);
  328. if (ret < 0)
  329. goto err;
  330. } else {
  331. ret = bolero_clk_rsc_mux0_clk_request(priv,
  332. TX_CORE_CLK, enable);
  333. if (ret < 0)
  334. goto err;
  335. priv->va_tx_clk_cnt++;
  336. }
  337. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  338. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  339. /*
  340. * Handle following concurrency scenario during enable
  341. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  342. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  343. */
  344. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  345. ret = bolero_clk_rsc_mux0_clk_request(priv,
  346. TX_CORE_CLK, true);
  347. if (ret < 0)
  348. goto err;
  349. bolero_clk_rsc_mux1_clk_request(priv,
  350. VA_CORE_CLK, false);
  351. priv->va_tx_clk_cnt++;
  352. }
  353. }
  354. } else {
  355. if (clk_id == VA_CORE_CLK && mux_switch) {
  356. /*
  357. * Handle the following usecase scenarios during disable
  358. * 1. VA only, disable VA_CORE_CLK
  359. * 2. Record + VA -> Record, decrement TX CLK count
  360. */
  361. if (priv->clk_cnt[VA_CORE_CLK]) {
  362. bolero_clk_rsc_mux1_clk_request(priv,
  363. VA_CORE_CLK, enable);
  364. } else if (priv->va_tx_clk_cnt) {
  365. bolero_clk_rsc_mux0_clk_request(priv,
  366. TX_CORE_CLK, enable);
  367. priv->va_tx_clk_cnt--;
  368. }
  369. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  370. /*
  371. * Handle the following usecase scenarios during disable
  372. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  373. */
  374. while (priv->va_tx_clk_cnt) {
  375. ret = bolero_clk_rsc_mux1_clk_request(priv,
  376. VA_CORE_CLK, true);
  377. if (ret < 0)
  378. goto err;
  379. bolero_clk_rsc_mux0_clk_request(priv,
  380. TX_CORE_CLK, false);
  381. priv->va_tx_clk_cnt--;
  382. }
  383. }
  384. }
  385. err:
  386. return ret;
  387. }
  388. /**
  389. * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
  390. * sequence
  391. *
  392. * @dev: Macro device pointer
  393. * @enable: enable or disable flag
  394. */
  395. void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  396. {
  397. int i;
  398. struct regmap *regmap;
  399. struct device *clk_dev = NULL;
  400. struct bolero_clk_rsc *priv = NULL;
  401. if (!dev) {
  402. pr_err("%s: dev is null %d\n", __func__);
  403. return;
  404. }
  405. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  406. if (!clk_dev) {
  407. pr_err("%s: Invalid rsc clk device\n", __func__);
  408. return;
  409. }
  410. priv = dev_get_drvdata(clk_dev);
  411. if (!priv) {
  412. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  413. return;
  414. }
  415. regmap = dev_get_regmap(priv->dev->parent, NULL);
  416. if (!regmap) {
  417. pr_err("%s: regmap is null\n", __func__);
  418. return;
  419. }
  420. mutex_lock(&priv->fs_gen_lock);
  421. if (enable) {
  422. if (priv->reg_seq_en_cnt++ == 0) {
  423. for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
  424. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  425. __func__, priv->fs_gen_seq[i],
  426. priv->fs_gen_seq[i + 1]);
  427. regmap_update_bits(regmap,
  428. priv->fs_gen_seq[i],
  429. priv->fs_gen_seq[i + 1],
  430. priv->fs_gen_seq[i + 1]);
  431. }
  432. }
  433. } else {
  434. if (priv->reg_seq_en_cnt <= 0) {
  435. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  436. __func__, priv->reg_seq_en_cnt);
  437. priv->reg_seq_en_cnt = 0;
  438. mutex_unlock(&priv->fs_gen_lock);
  439. return;
  440. }
  441. if (--priv->reg_seq_en_cnt == 0) {
  442. for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
  443. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  444. __func__, priv->fs_gen_seq[i],
  445. priv->fs_gen_seq[i + 1]);
  446. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  447. priv->fs_gen_seq[i + 1], 0x0);
  448. }
  449. }
  450. }
  451. mutex_unlock(&priv->fs_gen_lock);
  452. }
  453. EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
  454. /**
  455. * bolero_clk_rsc_request_clock - request for clock to
  456. * enable/disable
  457. *
  458. * @dev: Macro device pointer.
  459. * @default_clk_id: mux0 Core clock ID input.
  460. * @clk_id_req: Core clock ID requested to enable/disable
  461. * @enable: enable or disable clock flag
  462. *
  463. * Returns 0 on success or -EINVAL on error.
  464. */
  465. int bolero_clk_rsc_request_clock(struct device *dev,
  466. int default_clk_id,
  467. int clk_id_req,
  468. bool enable)
  469. {
  470. int ret = 0;
  471. struct device *clk_dev = NULL;
  472. struct bolero_clk_rsc *priv = NULL;
  473. bool mux_switch = false;
  474. if (!dev) {
  475. pr_err("%s: dev is null %d\n", __func__);
  476. return -EINVAL;
  477. }
  478. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  479. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  480. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  481. __func__, clk_id_req, default_clk_id);
  482. return -EINVAL;
  483. }
  484. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  485. if (!clk_dev) {
  486. pr_err("%s: Invalid rsc clk device\n", __func__);
  487. return -EINVAL;
  488. }
  489. priv = dev_get_drvdata(clk_dev);
  490. if (!priv) {
  491. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  492. return -EINVAL;
  493. }
  494. mutex_lock(&priv->rsc_clk_lock);
  495. if (!priv->dev_up && enable) {
  496. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  497. __func__);
  498. trace_printk("%s: SSR is in progress..\n", __func__);
  499. ret = -EINVAL;
  500. goto err;
  501. }
  502. priv->default_clk_id[clk_id_req] = default_clk_id;
  503. if (default_clk_id != clk_id_req)
  504. mux_switch = true;
  505. if (mux_switch) {
  506. if (clk_id_req != VA_CORE_CLK) {
  507. ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
  508. enable);
  509. if (ret < 0)
  510. goto err;
  511. }
  512. } else {
  513. ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  514. if (ret < 0)
  515. goto err;
  516. }
  517. ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  518. clk_id_req,
  519. enable);
  520. if (ret < 0)
  521. goto err;
  522. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  523. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  524. enable);
  525. trace_printk("%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  526. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  527. enable);
  528. mutex_unlock(&priv->rsc_clk_lock);
  529. return 0;
  530. err:
  531. mutex_unlock(&priv->rsc_clk_lock);
  532. return ret;
  533. }
  534. EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
  535. static int bolero_clk_rsc_probe(struct platform_device *pdev)
  536. {
  537. int ret = 0, fs_gen_size, i, j;
  538. const char **clk_name_array;
  539. int clk_cnt;
  540. struct clk *clk;
  541. struct bolero_clk_rsc *priv = NULL;
  542. u32 muxsel = 0;
  543. priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
  544. GFP_KERNEL);
  545. if (!priv)
  546. return -ENOMEM;
  547. /* Get clk fs gen sequence from device tree */
  548. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  549. &fs_gen_size)) {
  550. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  551. __func__);
  552. ret = -EINVAL;
  553. goto err;
  554. }
  555. priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
  556. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  557. if (!priv->fs_gen_seq) {
  558. ret = -ENOMEM;
  559. goto err;
  560. }
  561. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  562. /* Parse fs-gen-sequence */
  563. ret = of_property_read_u32_array(pdev->dev.of_node,
  564. "qcom,fs-gen-sequence",
  565. priv->fs_gen_seq,
  566. priv->num_fs_reg * 2);
  567. if (ret < 0) {
  568. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  569. __func__, ret);
  570. goto err;
  571. }
  572. /* Get clk details from device tree */
  573. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  574. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  575. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  576. __func__, clk_cnt);
  577. ret = -EINVAL;
  578. goto err;
  579. }
  580. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  581. GFP_KERNEL);
  582. if (!clk_name_array) {
  583. ret = -ENOMEM;
  584. goto err;
  585. }
  586. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  587. clk_name_array, clk_cnt);
  588. for (i = 0; i < MAX_CLK; i++) {
  589. priv->clk[i] = NULL;
  590. for (j = 0; j < clk_cnt; j++) {
  591. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  592. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  593. if (IS_ERR(clk)) {
  594. ret = PTR_ERR(clk);
  595. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  596. __func__, clk_src_name[i], ret);
  597. goto err;
  598. }
  599. priv->clk[i] = clk;
  600. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  601. __func__, clk_src_name[i]);
  602. }
  603. }
  604. }
  605. ret = of_property_read_u32(pdev->dev.of_node,
  606. "qcom,rx_mclk_mode_muxsel", &muxsel);
  607. if (ret) {
  608. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  609. __func__);
  610. } else {
  611. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  612. if (!priv->rx_clk_muxsel) {
  613. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  614. __func__);
  615. return -ENOMEM;
  616. }
  617. }
  618. ret = of_property_read_u32(pdev->dev.of_node,
  619. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  620. if (ret) {
  621. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  622. __func__);
  623. } else {
  624. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  625. if (!priv->wsa_clk_muxsel) {
  626. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  627. __func__);
  628. return -ENOMEM;
  629. }
  630. }
  631. ret = of_property_read_u32(pdev->dev.of_node,
  632. "qcom,va_mclk_mode_muxsel", &muxsel);
  633. if (ret) {
  634. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  635. __func__);
  636. } else {
  637. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  638. if (!priv->va_clk_muxsel) {
  639. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  640. __func__);
  641. return -ENOMEM;
  642. }
  643. }
  644. ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
  645. if (ret < 0) {
  646. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  647. __func__, ret);
  648. goto err;
  649. }
  650. priv->dev = &pdev->dev;
  651. priv->dev_up = true;
  652. mutex_init(&priv->rsc_clk_lock);
  653. mutex_init(&priv->fs_gen_lock);
  654. dev_set_drvdata(&pdev->dev, priv);
  655. err:
  656. return ret;
  657. }
  658. static int bolero_clk_rsc_remove(struct platform_device *pdev)
  659. {
  660. struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  661. bolero_unregister_res_clk(&pdev->dev);
  662. of_platform_depopulate(&pdev->dev);
  663. if (!priv)
  664. return -EINVAL;
  665. mutex_destroy(&priv->rsc_clk_lock);
  666. mutex_destroy(&priv->fs_gen_lock);
  667. return 0;
  668. }
  669. static const struct of_device_id bolero_clk_rsc_dt_match[] = {
  670. {.compatible = "qcom,bolero-clk-rsc-mngr"},
  671. {}
  672. };
  673. MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
  674. static struct platform_driver bolero_clk_rsc_mgr = {
  675. .driver = {
  676. .name = "bolero-clk-rsc-mngr",
  677. .owner = THIS_MODULE,
  678. .of_match_table = bolero_clk_rsc_dt_match,
  679. .suppress_bind_attrs = true,
  680. },
  681. .probe = bolero_clk_rsc_probe,
  682. .remove = bolero_clk_rsc_remove,
  683. };
  684. int bolero_clk_rsc_mgr_init(void)
  685. {
  686. return platform_driver_register(&bolero_clk_rsc_mgr);
  687. }
  688. void bolero_clk_rsc_mgr_exit(void)
  689. {
  690. platform_driver_unregister(&bolero_clk_rsc_mgr);
  691. }
  692. MODULE_DESCRIPTION("Bolero clock resource manager driver");
  693. MODULE_LICENSE("GPL v2");