bolero-clk-rsc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/of_platform.h>
  6. #include <linux/module.h>
  7. #include <linux/io.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/kernel.h>
  11. #include <linux/clk.h>
  12. #include "bolero-cdc.h"
  13. #include "bolero-clk-rsc.h"
  14. #define DRV_NAME "bolero-clk-rsc"
  15. #define BOLERO_CLK_NAME_LENGTH 30
  16. #define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
  17. static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
  18. "tx_core_clk",
  19. "rx_core_clk",
  20. "wsa_core_clk",
  21. "va_core_clk",
  22. "tx_npl_clk",
  23. "rx_npl_clk",
  24. "wsa_npl_clk",
  25. "va_npl_clk",
  26. };
  27. struct bolero_clk_rsc {
  28. struct device *dev;
  29. struct mutex rsc_clk_lock;
  30. struct clk *clk[MAX_CLK];
  31. int clk_cnt[MAX_CLK];
  32. int reg_seq_en_cnt;
  33. int va_tx_clk_cnt;
  34. bool dev_up;
  35. u32 num_fs_reg;
  36. u32 *fs_gen_seq;
  37. int default_clk_id[MAX_CLK];
  38. struct regmap *regmap;
  39. char __iomem *rx_clk_muxsel;
  40. char __iomem *wsa_clk_muxsel;
  41. char __iomem *va_clk_muxsel;
  42. };
  43. static int bolero_clk_rsc_cb(struct device *dev, u16 event)
  44. {
  45. struct bolero_clk_rsc *priv;
  46. if (!dev) {
  47. pr_err("%s: Invalid device pointer\n",
  48. __func__);
  49. return -EINVAL;
  50. }
  51. priv = dev_get_drvdata(dev);
  52. if (!priv) {
  53. pr_err("%s: Invalid clk rsc priviate data\n",
  54. __func__);
  55. return -EINVAL;
  56. }
  57. mutex_lock(&priv->rsc_clk_lock);
  58. if (event == BOLERO_MACRO_EVT_SSR_UP)
  59. priv->dev_up = true;
  60. else if (event == BOLERO_MACRO_EVT_SSR_DOWN)
  61. priv->dev_up = false;
  62. mutex_unlock(&priv->rsc_clk_lock);
  63. return 0;
  64. }
  65. static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
  66. int clk_id)
  67. {
  68. switch (clk_id) {
  69. case RX_CORE_CLK:
  70. return priv->rx_clk_muxsel;
  71. case WSA_CORE_CLK:
  72. return priv->wsa_clk_muxsel;
  73. case VA_CORE_CLK:
  74. return priv->va_clk_muxsel;
  75. case TX_CORE_CLK:
  76. default:
  77. dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
  78. break;
  79. }
  80. return NULL;
  81. }
  82. static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
  83. int clk_id,
  84. bool enable)
  85. {
  86. int ret = 0;
  87. if (enable) {
  88. /* Enable Requested Core clk */
  89. if (priv->clk_cnt[clk_id] == 0) {
  90. ret = clk_prepare_enable(priv->clk[clk_id]);
  91. if (ret < 0) {
  92. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  93. __func__, clk_id);
  94. goto done;
  95. }
  96. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  97. ret = clk_prepare_enable(
  98. priv->clk[clk_id + NPL_CLK_OFFSET]);
  99. if (ret < 0) {
  100. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  101. __func__,
  102. clk_id + NPL_CLK_OFFSET);
  103. goto err;
  104. }
  105. }
  106. }
  107. priv->clk_cnt[clk_id]++;
  108. } else {
  109. if (priv->clk_cnt[clk_id] <= 0) {
  110. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  111. __func__, clk_id);
  112. priv->clk_cnt[clk_id] = 0;
  113. goto done;
  114. }
  115. priv->clk_cnt[clk_id]--;
  116. if (priv->clk_cnt[clk_id] == 0) {
  117. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  118. clk_disable_unprepare(
  119. priv->clk[clk_id + NPL_CLK_OFFSET]);
  120. clk_disable_unprepare(priv->clk[clk_id]);
  121. }
  122. }
  123. return ret;
  124. err:
  125. clk_disable_unprepare(priv->clk[clk_id]);
  126. done:
  127. return ret;
  128. }
  129. static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
  130. int clk_id,
  131. bool enable)
  132. {
  133. char __iomem *clk_muxsel = NULL;
  134. int ret = 0;
  135. int default_clk_id = priv->default_clk_id[clk_id];
  136. clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
  137. if (!clk_muxsel) {
  138. ret = -EINVAL;
  139. goto done;
  140. }
  141. if (enable) {
  142. if (priv->clk_cnt[clk_id] == 0) {
  143. ret = bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
  144. true);
  145. if (ret < 0)
  146. goto done;
  147. ret = clk_prepare_enable(priv->clk[clk_id]);
  148. if (ret < 0) {
  149. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  150. __func__, clk_id);
  151. goto err_clk;
  152. }
  153. if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
  154. ret = clk_prepare_enable(
  155. priv->clk[clk_id + NPL_CLK_OFFSET]);
  156. if (ret < 0) {
  157. dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
  158. __func__,
  159. clk_id + NPL_CLK_OFFSET);
  160. goto err_npl_clk;
  161. }
  162. }
  163. iowrite32(0x1, clk_muxsel);
  164. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
  165. false);
  166. }
  167. priv->clk_cnt[clk_id]++;
  168. } else {
  169. if (priv->clk_cnt[clk_id] <= 0) {
  170. dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
  171. __func__, clk_id);
  172. priv->clk_cnt[clk_id] = 0;
  173. goto done;
  174. }
  175. priv->clk_cnt[clk_id]--;
  176. if (priv->clk_cnt[clk_id] == 0) {
  177. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
  178. true);
  179. iowrite32(0x0, clk_muxsel);
  180. if (priv->clk[clk_id + NPL_CLK_OFFSET])
  181. clk_disable_unprepare(
  182. priv->clk[clk_id + NPL_CLK_OFFSET]);
  183. clk_disable_unprepare(priv->clk[clk_id]);
  184. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
  185. false);
  186. }
  187. }
  188. return ret;
  189. err_npl_clk:
  190. clk_disable_unprepare(priv->clk[clk_id]);
  191. err_clk:
  192. bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
  193. done:
  194. return ret;
  195. }
  196. static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
  197. bool mux_switch,
  198. int clk_id,
  199. bool enable)
  200. {
  201. int ret = 0;
  202. if (enable) {
  203. if (clk_id == VA_CORE_CLK && mux_switch) {
  204. /*
  205. * Handle the following usecase scenarios during enable
  206. * 1. VA only, Active clk is VA_CORE_CLK
  207. * 2. record -> record + VA, Active clk is TX_CORE_CLK
  208. */
  209. if (priv->clk_cnt[TX_CORE_CLK] == 0) {
  210. ret = bolero_clk_rsc_mux1_clk_request(priv,
  211. VA_CORE_CLK, enable);
  212. if (ret < 0)
  213. goto err;
  214. } else {
  215. ret = bolero_clk_rsc_mux0_clk_request(priv,
  216. TX_CORE_CLK, enable);
  217. if (ret < 0)
  218. goto err;
  219. priv->va_tx_clk_cnt++;
  220. }
  221. } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
  222. (priv->clk_cnt[VA_CORE_CLK] > 0)) {
  223. /*
  224. * Handle following concurrency scenario during enable
  225. * 1. VA-> Record+VA, Increment TX CLK and Disable VA
  226. * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
  227. */
  228. while (priv->clk_cnt[VA_CORE_CLK] > 0) {
  229. ret = bolero_clk_rsc_mux0_clk_request(priv,
  230. TX_CORE_CLK, true);
  231. if (ret < 0)
  232. goto err;
  233. bolero_clk_rsc_mux1_clk_request(priv,
  234. VA_CORE_CLK, false);
  235. priv->va_tx_clk_cnt++;
  236. }
  237. }
  238. } else {
  239. if (clk_id == VA_CORE_CLK && mux_switch) {
  240. /*
  241. * Handle the following usecase scenarios during disable
  242. * 1. VA only, disable VA_CORE_CLK
  243. * 2. Record + VA -> Record, decrement TX CLK count
  244. */
  245. if (priv->clk_cnt[VA_CORE_CLK]) {
  246. bolero_clk_rsc_mux1_clk_request(priv,
  247. VA_CORE_CLK, enable);
  248. } else if (priv->va_tx_clk_cnt) {
  249. bolero_clk_rsc_mux0_clk_request(priv,
  250. TX_CORE_CLK, enable);
  251. priv->va_tx_clk_cnt--;
  252. }
  253. } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
  254. /*
  255. * Handle the following usecase scenarios during disable
  256. * Record+VA-> VA: enable VA CLK, decrement TX CLK count
  257. */
  258. while (priv->va_tx_clk_cnt) {
  259. ret = bolero_clk_rsc_mux1_clk_request(priv,
  260. VA_CORE_CLK, true);
  261. if (ret < 0)
  262. goto err;
  263. bolero_clk_rsc_mux0_clk_request(priv,
  264. TX_CORE_CLK, false);
  265. priv->va_tx_clk_cnt--;
  266. }
  267. }
  268. }
  269. err:
  270. return ret;
  271. }
  272. /**
  273. * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
  274. * sequence
  275. *
  276. * @dev: Macro device pointer
  277. * @enable: enable or disable flag
  278. */
  279. void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
  280. {
  281. int i;
  282. struct regmap *regmap;
  283. struct device *clk_dev = NULL;
  284. struct bolero_clk_rsc *priv = NULL;
  285. if (!dev) {
  286. pr_err("%s: dev is null %d\n", __func__);
  287. return;
  288. }
  289. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  290. if (!clk_dev) {
  291. pr_err("%s: Invalid rsc clk device\n", __func__);
  292. return;
  293. }
  294. priv = dev_get_drvdata(clk_dev);
  295. if (!priv) {
  296. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  297. return;
  298. }
  299. regmap = dev_get_regmap(priv->dev->parent, NULL);
  300. if (!regmap) {
  301. pr_err("%s: regmap is null\n", __func__);
  302. return;
  303. }
  304. if (enable) {
  305. if (priv->reg_seq_en_cnt++ == 0) {
  306. for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
  307. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  308. __func__, priv->fs_gen_seq[i],
  309. priv->fs_gen_seq[i + 1]);
  310. regmap_update_bits(regmap,
  311. priv->fs_gen_seq[i],
  312. priv->fs_gen_seq[i + 1],
  313. priv->fs_gen_seq[i + 1]);
  314. }
  315. }
  316. } else {
  317. if (priv->reg_seq_en_cnt <= 0) {
  318. dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
  319. __func__, priv->reg_seq_en_cnt);
  320. priv->reg_seq_en_cnt = 0;
  321. return;
  322. }
  323. if (--priv->reg_seq_en_cnt == 0) {
  324. for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
  325. dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
  326. __func__, priv->fs_gen_seq[i],
  327. priv->fs_gen_seq[i + 1]);
  328. regmap_update_bits(regmap, priv->fs_gen_seq[i],
  329. priv->fs_gen_seq[i + 1], 0x0);
  330. }
  331. }
  332. }
  333. }
  334. EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
  335. /**
  336. * bolero_clk_rsc_request_clock - request for clock to
  337. * enable/disable
  338. *
  339. * @dev: Macro device pointer.
  340. * @default_clk_id: mux0 Core clock ID input.
  341. * @clk_id_req: Core clock ID requested to enable/disable
  342. * @enable: enable or disable clock flag
  343. *
  344. * Returns 0 on success or -EINVAL on error.
  345. */
  346. int bolero_clk_rsc_request_clock(struct device *dev,
  347. int default_clk_id,
  348. int clk_id_req,
  349. bool enable)
  350. {
  351. int ret = 0;
  352. struct device *clk_dev = NULL;
  353. struct bolero_clk_rsc *priv = NULL;
  354. bool mux_switch = false;
  355. if (!dev) {
  356. pr_err("%s: dev is null %d\n", __func__);
  357. return -EINVAL;
  358. }
  359. if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
  360. (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
  361. pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
  362. __func__, clk_id_req, default_clk_id);
  363. return -EINVAL;
  364. }
  365. clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
  366. if (!clk_dev) {
  367. pr_err("%s: Invalid rsc clk device\n", __func__);
  368. return -EINVAL;
  369. }
  370. priv = dev_get_drvdata(clk_dev);
  371. if (!priv) {
  372. pr_err("%s: Invalid rsc clk priviate data\n", __func__);
  373. return -EINVAL;
  374. }
  375. mutex_lock(&priv->rsc_clk_lock);
  376. if (!priv->dev_up && enable) {
  377. dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
  378. __func__);
  379. ret = -EINVAL;
  380. goto err;
  381. }
  382. priv->default_clk_id[clk_id_req] = default_clk_id;
  383. if (default_clk_id != clk_id_req)
  384. mux_switch = true;
  385. if (mux_switch) {
  386. if (clk_id_req != VA_CORE_CLK) {
  387. ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
  388. enable);
  389. if (ret < 0)
  390. goto err;
  391. }
  392. } else {
  393. ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
  394. if (ret < 0)
  395. goto err;
  396. }
  397. ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
  398. clk_id_req,
  399. enable);
  400. if (ret < 0)
  401. goto err;
  402. dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
  403. __func__, priv->clk_cnt[clk_id_req], clk_id_req,
  404. enable);
  405. mutex_unlock(&priv->rsc_clk_lock);
  406. return 0;
  407. err:
  408. mutex_unlock(&priv->rsc_clk_lock);
  409. return ret;
  410. }
  411. EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
  412. static int bolero_clk_rsc_probe(struct platform_device *pdev)
  413. {
  414. int ret = 0, fs_gen_size, i, j;
  415. const char **clk_name_array;
  416. int clk_cnt;
  417. struct clk *clk;
  418. struct bolero_clk_rsc *priv = NULL;
  419. u32 muxsel = 0;
  420. priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
  421. GFP_KERNEL);
  422. if (!priv)
  423. return -ENOMEM;
  424. /* Get clk fs gen sequence from device tree */
  425. if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
  426. &fs_gen_size)) {
  427. dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
  428. __func__);
  429. ret = -EINVAL;
  430. goto err;
  431. }
  432. priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
  433. priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
  434. if (!priv->fs_gen_seq) {
  435. ret = -ENOMEM;
  436. goto err;
  437. }
  438. dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
  439. /* Parse fs-gen-sequence */
  440. ret = of_property_read_u32_array(pdev->dev.of_node,
  441. "qcom,fs-gen-sequence",
  442. priv->fs_gen_seq,
  443. priv->num_fs_reg * 2);
  444. if (ret < 0) {
  445. dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
  446. __func__, ret);
  447. goto err;
  448. }
  449. /* Get clk details from device tree */
  450. clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
  451. if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
  452. dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
  453. __func__, clk_cnt);
  454. ret = -EINVAL;
  455. goto err;
  456. }
  457. clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
  458. GFP_KERNEL);
  459. if (!clk_name_array) {
  460. ret = -ENOMEM;
  461. goto err;
  462. }
  463. ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
  464. clk_name_array, clk_cnt);
  465. for (i = 0; i < MAX_CLK; i++) {
  466. priv->clk[i] = NULL;
  467. for (j = 0; j < clk_cnt; j++) {
  468. if (!strcmp(clk_src_name[i], clk_name_array[j])) {
  469. clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
  470. if (IS_ERR(clk)) {
  471. ret = PTR_ERR(clk);
  472. dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
  473. __func__, clk_src_name[i], ret);
  474. goto err;
  475. }
  476. priv->clk[i] = clk;
  477. dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
  478. __func__, clk_src_name[i]);
  479. }
  480. }
  481. }
  482. ret = of_property_read_u32(pdev->dev.of_node,
  483. "qcom,rx_mclk_mode_muxsel", &muxsel);
  484. if (ret) {
  485. dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
  486. __func__);
  487. } else {
  488. priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  489. if (!priv->rx_clk_muxsel) {
  490. dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
  491. __func__);
  492. return -ENOMEM;
  493. }
  494. }
  495. ret = of_property_read_u32(pdev->dev.of_node,
  496. "qcom,wsa_mclk_mode_muxsel", &muxsel);
  497. if (ret) {
  498. dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
  499. __func__);
  500. } else {
  501. priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  502. if (!priv->wsa_clk_muxsel) {
  503. dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
  504. __func__);
  505. return -ENOMEM;
  506. }
  507. }
  508. ret = of_property_read_u32(pdev->dev.of_node,
  509. "qcom,va_mclk_mode_muxsel", &muxsel);
  510. if (ret) {
  511. dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
  512. __func__);
  513. } else {
  514. priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
  515. if (!priv->va_clk_muxsel) {
  516. dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
  517. __func__);
  518. return -ENOMEM;
  519. }
  520. }
  521. ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
  522. if (ret < 0) {
  523. dev_err(&pdev->dev, "%s: Failed to register cb %d",
  524. __func__, ret);
  525. goto err;
  526. }
  527. priv->dev = &pdev->dev;
  528. priv->dev_up = true;
  529. mutex_init(&priv->rsc_clk_lock);
  530. dev_set_drvdata(&pdev->dev, priv);
  531. err:
  532. return ret;
  533. }
  534. static int bolero_clk_rsc_remove(struct platform_device *pdev)
  535. {
  536. struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
  537. bolero_unregister_res_clk(&pdev->dev);
  538. of_platform_depopulate(&pdev->dev);
  539. if (!priv)
  540. return -EINVAL;
  541. mutex_destroy(&priv->rsc_clk_lock);
  542. return 0;
  543. }
  544. static const struct of_device_id bolero_clk_rsc_dt_match[] = {
  545. {.compatible = "qcom,bolero-clk-rsc-mngr"},
  546. {}
  547. };
  548. MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
  549. static struct platform_driver bolero_clk_rsc_mgr = {
  550. .driver = {
  551. .name = "bolero-clk-rsc-mngr",
  552. .owner = THIS_MODULE,
  553. .of_match_table = bolero_clk_rsc_dt_match,
  554. .suppress_bind_attrs = true,
  555. },
  556. .probe = bolero_clk_rsc_probe,
  557. .remove = bolero_clk_rsc_remove,
  558. };
  559. int bolero_clk_rsc_mgr_init(void)
  560. {
  561. return platform_driver_register(&bolero_clk_rsc_mgr);
  562. }
  563. void bolero_clk_rsc_mgr_exit(void)
  564. {
  565. platform_driver_unregister(&bolero_clk_rsc_mgr);
  566. }
  567. MODULE_DESCRIPTION("Bolero clock resource manager driver");
  568. MODULE_LICENSE("GPL v2");