mmrm_clk_rsrc_mgr_sw.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/slab.h>
  6. #include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
  7. #include <linux/clk.h>
  8. #include <linux/clk/qcom.h>
  9. #include "mmrm_debug.h"
  10. #include "mmrm_clk_rsrc_mgr.h"
  11. #include "mmrm_fixedpoint.h"
  12. #define Q16_INT(q) ((q) >> 16)
  13. #define Q16_FRAC(q) ((((q) & 0xFFFF) * 100) >> 16)
  14. #define CLK_RATE_STEP 1000000
  15. #define NOTIFY_TIMEOUT 100000000
  16. static int mmrm_sw_update_freq(
  17. struct mmrm_sw_clk_mgr_info *sinfo, struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
  18. {
  19. int rc = 0;
  20. u32 i;
  21. struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
  22. struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
  23. struct voltage_corner_set *cset = &cres->corner_set;
  24. long clk_val_min, clk_val_max, clk_val, clk_val_round;
  25. int voltage_corner;
  26. clk_val_min = clk_round_rate(tbl_entry->clk, 1);
  27. clk_val_max = clk_round_rate(tbl_entry->clk, ~0UL);
  28. d_mpr_h("%s: csid(0x%x): min_clk_rate(%llu) max_clk_rate(%llu)\n",
  29. __func__,
  30. tbl_entry->clk_src_id,
  31. clk_val_min,
  32. clk_val_max);
  33. /* init with min val */
  34. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  35. tbl_entry->freq[i] = clk_val_min;
  36. }
  37. /* step through rates */
  38. for (clk_val = clk_val_min; clk_val < clk_val_max; clk_val += CLK_RATE_STEP) {
  39. /* get next clk rate */
  40. clk_val_round = clk_round_rate(tbl_entry->clk, clk_val);
  41. if (clk_val_round > clk_val_min) {
  42. clk_val_min = clk_val_round;
  43. /* Get voltage corner */
  44. voltage_corner = qcom_clk_get_voltage(tbl_entry->clk, clk_val_round);
  45. if (voltage_corner < 0 || voltage_corner > mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_TURBO]) {
  46. break;
  47. }
  48. /* voltage corner is below svsl1 */
  49. if (voltage_corner < mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS])
  50. voltage_corner = mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS];
  51. /* match vdd level */
  52. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  53. if (voltage_corner == mmrm_sw_vdd_corner[i])
  54. break;
  55. }
  56. /* update freq */
  57. while (i < MMRM_VDD_LEVEL_MAX) {
  58. tbl_entry->freq[i++] = clk_val_round;
  59. }
  60. }
  61. }
  62. /* print results */
  63. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  64. d_mpr_h("%s: csid(0x%x) corner(%s) clk_rate(%llu)\n",
  65. __func__,
  66. tbl_entry->clk_src_id,
  67. cset->corner_tbl[i].name,
  68. tbl_entry->freq[i]);
  69. }
  70. return rc;
  71. }
  72. static void mmrm_sw_print_client_data(struct mmrm_sw_clk_mgr_info *sinfo,
  73. struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
  74. {
  75. struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
  76. struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
  77. struct voltage_corner_set *cset = &cres->corner_set;
  78. u32 i, j;
  79. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  80. d_mpr_p("%s: csid(0x%x) corner(%s) dyn_pwr(%zu) leak_pwr(%zu)\n",
  81. __func__,
  82. tbl_entry->clk_src_id,
  83. cset->corner_tbl[i].name,
  84. tbl_entry->dyn_pwr[i],
  85. tbl_entry->leak_pwr[i]);
  86. for (j = 0; j < MMRM_VDD_LEVEL_MAX; j++) {
  87. d_mpr_p("%s: csid(0x%x) total_pwr(%zu) cur_ma(%zu)\n",
  88. __func__,
  89. tbl_entry->clk_src_id,
  90. (tbl_entry->dyn_pwr[i] + tbl_entry->leak_pwr[i]),
  91. tbl_entry->current_ma[i][j]);
  92. }
  93. }
  94. }
  95. static int mmrm_sw_update_curr(struct mmrm_sw_clk_mgr_info *sinfo,
  96. struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
  97. {
  98. u32 i, j;
  99. struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
  100. struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
  101. struct voltage_corner_set *cset = &cres->corner_set;
  102. u32 scaling_factor = 0, voltage_factor = 0;
  103. fp_t nom_dyn_pwr, nom_leak_pwr, dyn_sc, leak_sc,
  104. volt, dyn_pwr, leak_pwr, pwr_mw, nom_freq;
  105. u32 c;
  106. struct nom_clk_src_info *nom_tbl_entry = NULL;
  107. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  108. if (tbl_entry->clk_src_id == sinfo->clk_client_tbl[c].clk_src_id) {
  109. nom_tbl_entry = &cres->nom_clk_set.clk_src_tbl[c];
  110. break;
  111. }
  112. }
  113. if (nom_tbl_entry == NULL) {
  114. d_mpr_h("%s: can't find 0x%x clock src ID\n",
  115. __func__,
  116. tbl_entry->clk_src_id);
  117. return -EINVAL;
  118. }
  119. nom_dyn_pwr = FP(Q16_INT(nom_tbl_entry->nom_dyn_pwr),
  120. Q16_FRAC(nom_tbl_entry->nom_dyn_pwr), 100);
  121. nom_leak_pwr = FP(Q16_INT(nom_tbl_entry->nom_leak_pwr),
  122. Q16_FRAC(nom_tbl_entry->nom_leak_pwr), 100);
  123. nom_freq = tbl_entry->freq[MMRM_VDD_LEVEL_NOM];
  124. /* update power & current entries for all levels */
  125. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  126. scaling_factor = cset->corner_tbl[i].scaling_factor_dyn;
  127. dyn_sc = FP(
  128. Q16_INT(scaling_factor), Q16_FRAC(scaling_factor), 100);
  129. scaling_factor = cset->corner_tbl[i].scaling_factor_leak;
  130. leak_sc = FP(
  131. Q16_INT(scaling_factor), Q16_FRAC(scaling_factor), 100);
  132. /* Frequency scaling */
  133. pwr_mw = fp_mult(nom_dyn_pwr, tbl_entry->freq[i]);
  134. pwr_mw = fp_div(pwr_mw, nom_freq);
  135. /* Scaling factor */
  136. dyn_pwr = fp_mult(pwr_mw, dyn_sc);
  137. leak_pwr = fp_mult(nom_leak_pwr, leak_sc);
  138. tbl_entry->dyn_pwr[i] = fp_round(dyn_pwr);
  139. tbl_entry->leak_pwr[i] = fp_round(leak_pwr);
  140. for (j = 0; j < MMRM_VDD_LEVEL_MAX; j++) {
  141. voltage_factor = cset->corner_tbl[j].volt_factor;
  142. volt = FP(Q16_INT(voltage_factor), Q16_FRAC(voltage_factor), 100);
  143. tbl_entry->current_ma[i][j] = fp_round(fp_div((dyn_pwr+leak_pwr), volt));
  144. }
  145. }
  146. mmrm_sw_print_client_data(sinfo, tbl_entry);
  147. return 0;
  148. }
  149. static struct mmrm_client *mmrm_sw_clk_client_register(
  150. struct mmrm_clk_mgr *sw_clk_mgr,
  151. struct mmrm_clk_client_desc clk_desc,
  152. enum mmrm_client_priority priority,
  153. void *pvt_data,
  154. notifier_callback_fn_t not_fn_cb)
  155. {
  156. int rc = 0;
  157. struct mmrm_client *clk_client = NULL;
  158. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  159. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  160. u32 c = 0;
  161. u32 clk_client_src_id = 0;
  162. mutex_lock(&sw_clk_mgr->lock);
  163. /* check if entry is free in table */
  164. if (sinfo->tot_clk_clients == sinfo->enabled_clk_clients) {
  165. d_mpr_e("%s: no free entry to register a clk client\n",
  166. __func__);
  167. rc = -EINVAL;
  168. goto err_nofree_entry;
  169. }
  170. /* look for entry that matches domain and id */
  171. clk_client_src_id = (clk_desc.client_domain << 16 | clk_desc.client_id);
  172. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  173. if (clk_client_src_id == sinfo->clk_client_tbl[c].clk_src_id)
  174. break;
  175. }
  176. /* entry not found */
  177. if (c == sinfo->tot_clk_clients) {
  178. d_mpr_e("%s: unknown clk client 0x%x\n",
  179. __func__, clk_client_src_id);
  180. rc = -EINVAL;
  181. goto err_nofree_entry;
  182. }
  183. tbl_entry = &sinfo->clk_client_tbl[c];
  184. /* entry already registered */
  185. if (tbl_entry->client) {
  186. if (msm_mmrm_allow_multiple_register) {
  187. tbl_entry->ref_count++;
  188. d_mpr_h("%s: client csid(0x%x) already registered ref:%d\n",
  189. __func__, tbl_entry->clk_src_id, tbl_entry->ref_count);
  190. clk_client = tbl_entry->client;
  191. mmrm_sw_print_client_data(sinfo, tbl_entry);
  192. goto exit_found;
  193. }
  194. d_mpr_e("%s: client csid(0x%x) already registered\n",
  195. __func__, tbl_entry->clk_src_id);
  196. rc = -EINVAL;
  197. goto err_already_registered;
  198. }
  199. /* populate the entry */
  200. clk_client = kzalloc(sizeof(*clk_client), GFP_KERNEL);
  201. if (!clk_client) {
  202. d_mpr_e("%s: failed to allocate memory for clk_client\n",
  203. __func__);
  204. rc = -ENOMEM;
  205. goto err_fail_alloc_clk_client;
  206. }
  207. clk_client->client_uid = c;
  208. clk_client->client_type = MMRM_CLIENT_CLOCK;
  209. tbl_entry->ref_count = 1;
  210. /* copy the entries provided by client */
  211. tbl_entry->client = clk_client;
  212. strlcpy(tbl_entry->name, clk_desc.name, MMRM_CLK_CLIENT_NAME_SIZE);
  213. tbl_entry->clk = clk_desc.clk;
  214. tbl_entry->pri = priority;
  215. tbl_entry->pvt_data = pvt_data;
  216. tbl_entry->notifier_cb_fn = not_fn_cb;
  217. /* print table entry */
  218. d_mpr_h("%s: csid(0x%x) name(%s) pri(%d) pvt(%p) notifier(%p)\n",
  219. __func__,
  220. tbl_entry->clk_src_id,
  221. tbl_entry->name,
  222. tbl_entry->pri,
  223. tbl_entry->pvt_data,
  224. tbl_entry->notifier_cb_fn);
  225. /* determine full range of clock freq */
  226. rc = mmrm_sw_update_freq(sinfo, tbl_entry);
  227. if (rc) {
  228. d_mpr_e("%s: csid(0x%x) failed to update freq\n",
  229. __func__, tbl_entry->clk_src_id);
  230. goto err_fail_update_entry;
  231. }
  232. /* calculate current & scale power for other levels */
  233. rc = mmrm_sw_update_curr(sinfo, tbl_entry);
  234. if (rc) {
  235. d_mpr_e("%s: csid(0x%x) failed to update current\n",
  236. __func__, tbl_entry->clk_src_id);
  237. goto err_fail_update_entry;
  238. }
  239. exit_found:
  240. mutex_unlock(&sw_clk_mgr->lock);
  241. return clk_client;
  242. err_fail_update_entry:
  243. kfree(clk_client);
  244. err_fail_alloc_clk_client:
  245. tbl_entry->client = NULL;
  246. tbl_entry->clk = NULL;
  247. tbl_entry->pri = 0x0;
  248. tbl_entry->pvt_data = NULL;
  249. tbl_entry->notifier_cb_fn = NULL;
  250. err_nofree_entry:
  251. err_already_registered:
  252. mutex_unlock(&sw_clk_mgr->lock);
  253. d_mpr_e("%s: error = %d\n", __func__, rc);
  254. return NULL;
  255. }
  256. static int mmrm_sw_clk_client_deregister(struct mmrm_clk_mgr *sw_clk_mgr,
  257. struct mmrm_client *client)
  258. {
  259. int rc = 0;
  260. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  261. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  262. /* validate the client ptr */
  263. if (!client) {
  264. d_mpr_e("%s: invalid client\n");
  265. rc = -EINVAL;
  266. goto err_invalid_client;
  267. }
  268. if (client->client_uid >= sinfo->tot_clk_clients) {
  269. d_mpr_e("%s: invalid client uid (%d)\n",
  270. __func__, client->client_uid);
  271. rc = -EINVAL;
  272. goto err_invalid_client;
  273. }
  274. mutex_lock(&sw_clk_mgr->lock);
  275. tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
  276. if (tbl_entry->ref_count > 0) {
  277. tbl_entry->ref_count--;
  278. }
  279. if (tbl_entry->ref_count == 0) {
  280. kfree(tbl_entry->client);
  281. tbl_entry->vdd_level = 0;
  282. tbl_entry->clk_rate = 0;
  283. tbl_entry->client = NULL;
  284. tbl_entry->clk = NULL;
  285. tbl_entry->pri = 0x0;
  286. tbl_entry->pvt_data = NULL;
  287. tbl_entry->notifier_cb_fn = NULL;
  288. }
  289. mutex_unlock(&sw_clk_mgr->lock);
  290. return rc;
  291. err_invalid_client:
  292. d_mpr_e("%s: error = %d\n", __func__, rc);
  293. return rc;
  294. }
  295. static int mmrm_sw_get_req_level(
  296. struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
  297. unsigned long clk_val, u32 *req_level)
  298. {
  299. int rc = 0;
  300. int voltage_corner;
  301. u32 level;
  302. /* get voltage corner */
  303. voltage_corner = qcom_clk_get_voltage(tbl_entry->clk, clk_val);
  304. if (voltage_corner < 0 || voltage_corner > mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_TURBO]) {
  305. d_mpr_e("%s: csid(0x%x): invalid voltage corner(%d) for clk rate(%llu)\n",
  306. __func__,
  307. tbl_entry->clk_src_id,
  308. voltage_corner,
  309. clk_val);
  310. rc = voltage_corner;
  311. goto err_invalid_corner;
  312. }
  313. /* voltage corner is below low svs */
  314. if (voltage_corner < mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS]) {
  315. d_mpr_h("%s: csid(0x%x): lower voltage corner(%d)\n",
  316. __func__,
  317. tbl_entry->clk_src_id,
  318. voltage_corner);
  319. *req_level = MMRM_VDD_LEVEL_LOW_SVS;
  320. goto exit_no_err;
  321. }
  322. /* match vdd level */
  323. for (level = 0; level < MMRM_VDD_LEVEL_MAX; level++) {
  324. if (voltage_corner == mmrm_sw_vdd_corner[level])
  325. break;
  326. }
  327. if (level == MMRM_VDD_LEVEL_MAX) {
  328. d_mpr_e("%s: csid(0x%x): invalid voltage corner(%d) for clk rate(%llu)\n",
  329. __func__,
  330. tbl_entry->clk_src_id,
  331. voltage_corner,
  332. clk_val);
  333. rc = -EINVAL;
  334. goto err_invalid_corner;
  335. }
  336. *req_level = level;
  337. d_mpr_h("%s: req_level(%d)\n", __func__, level);
  338. exit_no_err:
  339. return rc;
  340. err_invalid_corner:
  341. return rc;
  342. }
  343. static int mmrm_sw_check_req_level(
  344. struct mmrm_sw_clk_mgr_info *sinfo,
  345. u32 clk_src_id, u32 req_level, u32 *adj_level)
  346. {
  347. int rc = 0;
  348. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  349. struct mmrm_sw_clk_client_tbl_entry *tbl_entry = NULL;
  350. struct mmrm_sw_clk_client_tbl_entry *next_max_entry = NULL;
  351. u32 c, level = req_level;
  352. if (req_level >= MMRM_VDD_LEVEL_MAX) {
  353. d_mpr_e("%s: invalid level %lu\n", __func__, req_level);
  354. rc = -EINVAL;
  355. goto err_invalid_level;
  356. }
  357. d_mpr_h("%s: csid(0x%x) level(%d) peak_data->aggreg_level(%d)\n",
  358. __func__, clk_src_id, level, peak_data->aggreg_level);
  359. /* req_level is rejected when another client has a higher level */
  360. if (req_level < peak_data->aggreg_level) {
  361. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  362. tbl_entry = &sinfo->clk_client_tbl[c];
  363. if (IS_ERR_OR_NULL(tbl_entry->clk) || !tbl_entry->clk_rate ||
  364. (tbl_entry->clk_src_id == clk_src_id)) {
  365. continue;
  366. }
  367. if (tbl_entry->vdd_level == peak_data->aggreg_level) {
  368. break;
  369. }
  370. if ((tbl_entry->vdd_level < peak_data->aggreg_level)
  371. && (tbl_entry->vdd_level > req_level ))
  372. next_max_entry = tbl_entry;
  373. }
  374. /* reject req level */
  375. if (c < sinfo->tot_clk_clients) {
  376. level = peak_data->aggreg_level;
  377. } else if (!IS_ERR_OR_NULL(next_max_entry)
  378. && next_max_entry->vdd_level > req_level) {
  379. level = next_max_entry->vdd_level;
  380. }
  381. }
  382. *adj_level = level;
  383. d_mpr_h("%s: adj_level(%d)\n", __func__, level);
  384. return rc;
  385. err_invalid_level:
  386. return rc;
  387. }
  388. static int mmrm_sw_calculate_total_current(
  389. struct mmrm_sw_clk_mgr_info *sinfo,
  390. u32 req_level, u32 *total_cur)
  391. {
  392. int rc = 0;
  393. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  394. u32 c, sum_cur = 0;
  395. if (req_level >= MMRM_VDD_LEVEL_MAX) {
  396. d_mpr_e("%s: invalid level %lu\n", __func__, req_level);
  397. rc = -EINVAL;
  398. goto err_invalid_level;
  399. }
  400. /* calculate sum of values (scaled by volt) */
  401. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  402. tbl_entry = &sinfo->clk_client_tbl[c];
  403. if (IS_ERR_OR_NULL(tbl_entry->clk) || !tbl_entry->clk_rate) {
  404. continue;
  405. }
  406. sum_cur += tbl_entry->current_ma[tbl_entry->vdd_level][req_level];
  407. }
  408. *total_cur = sum_cur;
  409. d_mpr_h("%s: total_cur(%d)\n", __func__, sum_cur);
  410. return rc;
  411. err_invalid_level:
  412. return rc;
  413. }
  414. static int mmrm_sw_throttle_low_priority_client(
  415. struct mmrm_sw_clk_mgr_info *sinfo, int *delta_cur)
  416. {
  417. int rc = 0, i;
  418. u64 start_ts = 0, end_ts = 0;
  419. bool found_client_throttle = false;
  420. struct mmrm_sw_clk_client_tbl_entry *tbl_entry_throttle_client;
  421. struct mmrm_client_notifier_data notifier_data;
  422. struct completion timeout;
  423. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  424. struct mmrm_sw_throttled_clients_data *tc_data;
  425. u32 now_cur_ma, min_cur_ma;
  426. long clk_min_level = MMRM_VDD_LEVEL_LOW_SVS;
  427. init_completion(&timeout);
  428. for (i = 0; i < sinfo->throttle_clients_data_length ; i++) {
  429. tbl_entry_throttle_client =
  430. &sinfo->clk_client_tbl[sinfo->throttle_clients_info[i].tbl_entry_id];
  431. if (!IS_ERR_OR_NULL(tbl_entry_throttle_client)) {
  432. now_cur_ma = tbl_entry_throttle_client->current_ma
  433. [tbl_entry_throttle_client->vdd_level]
  434. [peak_data->aggreg_level];
  435. min_cur_ma = tbl_entry_throttle_client->current_ma[clk_min_level]
  436. [peak_data->aggreg_level];
  437. d_mpr_h("%s:csid(0x%x) name(%s)\n",
  438. __func__, tbl_entry_throttle_client->clk_src_id,
  439. tbl_entry_throttle_client->name);
  440. d_mpr_h("%s:now_cur_ma(%llu) min_cur_ma(%llu) delta_cur(%d)\n",
  441. __func__, now_cur_ma, min_cur_ma, *delta_cur);
  442. if ((now_cur_ma > min_cur_ma)
  443. && (now_cur_ma - min_cur_ma > *delta_cur)) {
  444. found_client_throttle = true;
  445. d_mpr_h("%s: Throttle client csid(0x%x) name(%s)\n",
  446. __func__, tbl_entry_throttle_client->clk_src_id,
  447. tbl_entry_throttle_client->name);
  448. d_mpr_h("%s:now_cur_ma %llu-min_cur_ma %llu>delta_cur %d\n",
  449. __func__, now_cur_ma, min_cur_ma, *delta_cur);
  450. /* found client to throttle, break from here. */
  451. break;
  452. }
  453. }
  454. }
  455. /*Client to throttle is found, Throttle this client now to minimum clock rate*/
  456. if (found_client_throttle) {
  457. /* Setup notifier */
  458. notifier_data.cb_type = MMRM_CLIENT_RESOURCE_VALUE_CHANGE;
  459. notifier_data.cb_data.val_chng.old_val =
  460. tbl_entry_throttle_client->freq[tbl_entry_throttle_client->vdd_level];
  461. notifier_data.cb_data.val_chng.new_val =
  462. tbl_entry_throttle_client->freq[clk_min_level];
  463. notifier_data.pvt_data = tbl_entry_throttle_client->pvt_data;
  464. start_ts = ktime_get_ns();
  465. if (tbl_entry_throttle_client->notifier_cb_fn)
  466. rc = tbl_entry_throttle_client->notifier_cb_fn(&notifier_data);
  467. end_ts = ktime_get_ns();
  468. d_mpr_h("%s: Client notifier cbk processing time %llu ns\n",
  469. __func__, (end_ts - start_ts));
  470. if (rc) {
  471. d_mpr_e("%s: Client failed to send SUCCESS in callback(%d)\n",
  472. __func__, tbl_entry_throttle_client->clk_src_id);
  473. rc = -EINVAL;
  474. goto err_clk_set_fail;
  475. }
  476. if ((end_ts - start_ts) > NOTIFY_TIMEOUT)
  477. d_mpr_e("%s:Client notifier cbk took %llu ns more than timeout %llu ns\n",
  478. __func__, (end_ts - start_ts), NOTIFY_TIMEOUT);
  479. if (tbl_entry_throttle_client->reserve == false) {
  480. rc = clk_set_rate(tbl_entry_throttle_client->clk,
  481. tbl_entry_throttle_client->freq[clk_min_level]);
  482. if (rc) {
  483. d_mpr_e("%s: Failed to throttle the clk csid(%d)\n",
  484. __func__, tbl_entry_throttle_client->clk_src_id);
  485. rc = -EINVAL;
  486. goto err_clk_set_fail;
  487. }
  488. }
  489. d_mpr_h("%s: %s throttled to %llu\n",
  490. __func__, tbl_entry_throttle_client->name,
  491. tbl_entry_throttle_client->freq[clk_min_level]);
  492. *delta_cur -= now_cur_ma - min_cur_ma;
  493. /* Store this client for bookkeeping */
  494. tc_data = kzalloc(sizeof(*tc_data), GFP_KERNEL);
  495. if (IS_ERR_OR_NULL(tc_data)) {
  496. d_mpr_e("%s: Failed to allocate memory\n", __func__);
  497. return -ENOMEM;
  498. }
  499. tc_data->table_id = i;
  500. tc_data->delta_cu_ma = now_cur_ma - min_cur_ma;
  501. tc_data->prev_vdd_level = tbl_entry_throttle_client->vdd_level;
  502. // Add throttled client to list to access it later
  503. list_add_tail(&tc_data->list, &sinfo->throttled_clients);
  504. /* Store the throttled clock rate of client */
  505. tbl_entry_throttle_client->clk_rate =
  506. tbl_entry_throttle_client->freq[clk_min_level];
  507. /* Store the corner level of throttled client */
  508. tbl_entry_throttle_client->vdd_level = clk_min_level;
  509. /* Clearing the reserve flag */
  510. tbl_entry_throttle_client->reserve = false;
  511. }
  512. err_clk_set_fail:
  513. return rc;
  514. }
  515. static void mmrm_sw_dump_enabled_client_info(struct mmrm_sw_clk_mgr_info *sinfo)
  516. {
  517. u32 c;
  518. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  519. struct mmrm_sw_clk_client_tbl_entry *tbl_entry = NULL;
  520. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  521. tbl_entry = &sinfo->clk_client_tbl[c];
  522. if (tbl_entry->clk_rate) {
  523. d_mpr_e("%s: csid(0x%x) clk_rate(%zu) vdd_level(%zu) cur_ma(%zu)\n",
  524. __func__,
  525. tbl_entry->clk_src_id,
  526. tbl_entry->clk_rate,
  527. tbl_entry->vdd_level,
  528. tbl_entry->current_ma[tbl_entry->vdd_level]
  529. [peak_data->aggreg_level]);
  530. }
  531. }
  532. if (peak_data) {
  533. d_mpr_e("%s: aggreg_val(%zu) aggreg_level(%zu)\n", __func__,
  534. peak_data->aggreg_val, peak_data->aggreg_level);
  535. }
  536. }
  537. static int mmrm_reinstate_throttled_client(struct mmrm_sw_clk_mgr_info *sinfo) {
  538. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  539. struct mmrm_sw_throttled_clients_data *iter, *safe_iter = NULL;
  540. struct mmrm_client_notifier_data notifier_data;
  541. struct mmrm_sw_clk_client_tbl_entry *re_entry_throttle_client;
  542. int rc = 0;
  543. u64 start_ts = 0, end_ts = 0;
  544. list_for_each_entry_safe(iter, safe_iter, &sinfo->throttled_clients, list) {
  545. if (!IS_ERR_OR_NULL(iter) && peak_data->aggreg_val +
  546. iter->delta_cu_ma <= peak_data->threshold) {
  547. d_mpr_h("%s: table_id = %d\n", __func__, iter->table_id);
  548. re_entry_throttle_client =
  549. &sinfo->clk_client_tbl
  550. [sinfo->throttle_clients_info
  551. [iter->table_id].tbl_entry_id];
  552. if (!IS_ERR_OR_NULL(re_entry_throttle_client)) {
  553. d_mpr_h("%s:found throttled client name(%s) clsid (0x%x)\n",
  554. __func__, re_entry_throttle_client->name,
  555. re_entry_throttle_client->clk_src_id);
  556. notifier_data.cb_type = MMRM_CLIENT_RESOURCE_VALUE_CHANGE;
  557. notifier_data.cb_data.val_chng.old_val =
  558. re_entry_throttle_client->freq[MMRM_VDD_LEVEL_LOW_SVS];
  559. notifier_data.cb_data.val_chng.new_val =
  560. re_entry_throttle_client->freq[iter->prev_vdd_level];
  561. notifier_data.pvt_data = re_entry_throttle_client->pvt_data;
  562. start_ts = ktime_get_ns();
  563. if (re_entry_throttle_client->notifier_cb_fn) {
  564. rc = re_entry_throttle_client->notifier_cb_fn
  565. (&notifier_data);
  566. end_ts = ktime_get_ns();
  567. d_mpr_h("%s: Client notifier cbk processing time(%llu)ns\n",
  568. __func__, end_ts - start_ts);
  569. if (rc) {
  570. d_mpr_e("%s: Client notifier callback failed(%d)\n",
  571. __func__,
  572. re_entry_throttle_client->clk_src_id);
  573. }
  574. if ((end_ts - start_ts) > NOTIFY_TIMEOUT)
  575. d_mpr_e("%s: Client notifier took %llu ns\n",
  576. __func__, (end_ts - start_ts));
  577. }
  578. list_del(&iter->list);
  579. kfree(iter);
  580. }
  581. }
  582. }
  583. return 0;
  584. }
  585. static int mmrm_sw_check_peak_current(struct mmrm_sw_clk_mgr_info *sinfo,
  586. struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
  587. u32 req_level, u32 clk_val, u32 num_hw_blocks)
  588. {
  589. int rc = 0;
  590. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  591. u32 adj_level = req_level;
  592. u32 peak_cur = peak_data->aggreg_val;
  593. u32 old_cur = 0, new_cur = 0;
  594. int delta_cur;
  595. /* check the req level and adjust according to tbl entries */
  596. rc = mmrm_sw_check_req_level(sinfo, tbl_entry->clk_src_id, req_level, &adj_level);
  597. if (rc) {
  598. goto err_invalid_level;
  599. }
  600. /* recalculate aggregated current with adj level */
  601. if (adj_level != peak_data->aggreg_level) {
  602. rc = mmrm_sw_calculate_total_current(sinfo, adj_level, &peak_cur);
  603. if (rc) {
  604. goto err_invalid_level;
  605. }
  606. }
  607. /* calculate delta cur */
  608. if (tbl_entry->clk_rate) {
  609. old_cur = tbl_entry->current_ma[tbl_entry->vdd_level][adj_level];
  610. }
  611. if (clk_val) {
  612. new_cur = tbl_entry->current_ma[req_level][adj_level] * num_hw_blocks;
  613. }
  614. delta_cur = (signed)new_cur - old_cur;
  615. /* negative value, update peak data */
  616. if ((signed)peak_cur + delta_cur <= 0) {
  617. peak_data->aggreg_val = 0;
  618. peak_data->aggreg_level = adj_level;
  619. goto exit_no_err;
  620. }
  621. /* peak overshoot, do not update peak data */
  622. if ((signed)peak_cur + delta_cur >= peak_data->threshold) {
  623. /* Find low prority client and throttle it*/
  624. if ((tbl_entry->pri == MMRM_CLIENT_PRIOR_HIGH)
  625. && (msm_mmrm_enable_throttle_feature > 0)) {
  626. rc = mmrm_sw_throttle_low_priority_client(sinfo, &delta_cur);
  627. if (rc != 0) {
  628. d_mpr_e("%s: Failed to throttle the low priority client\n",
  629. __func__);
  630. mmrm_sw_dump_enabled_client_info(sinfo);
  631. goto err_peak_overshoot;
  632. }
  633. } else {
  634. d_mpr_e("%s: Client csid(0x%x) name(%s) can't request throtlling\n",
  635. __func__, tbl_entry->clk_src_id, tbl_entry->name);
  636. mmrm_sw_dump_enabled_client_info(sinfo);
  637. rc = -EINVAL;
  638. goto err_peak_overshoot;
  639. }
  640. }
  641. /* update peak data */
  642. peak_data->aggreg_val = peak_cur + delta_cur;
  643. peak_data->aggreg_level = adj_level;
  644. mmrm_reinstate_throttled_client(sinfo);
  645. exit_no_err:
  646. d_mpr_h("%s: aggreg_val(%lu) aggreg_level(%lu)\n",
  647. __func__,
  648. peak_data->aggreg_val,
  649. peak_data->aggreg_level);
  650. return rc;
  651. err_invalid_level:
  652. err_peak_overshoot:
  653. return rc;
  654. }
  655. static bool mmrm_sw_is_valid_num_hw_block(struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
  656. struct mmrm_client_data *client_data)
  657. {
  658. bool rc = false;
  659. u32 num_hw_blocks = client_data->num_hw_blocks;
  660. if (num_hw_blocks == 1) {
  661. rc = true;
  662. } else if (tbl_entry->clk_src_id == 0x10025) { // CAM_CC_IFE_CSID_CLK_SRC
  663. if (num_hw_blocks > 1 && num_hw_blocks <= 3)
  664. rc = true;
  665. } else if ((tbl_entry->clk_src_id == 0x10040) || // CAM_CC_IFE_LITE_CLK_SRC 
  666. (tbl_entry->clk_src_id == 0x10043)) { // CAM_CC_IFE_LITE_CSID_CLK_SRC
  667. if (num_hw_blocks > 1 && num_hw_blocks <= 5)
  668. rc = true;
  669. } else if (tbl_entry->clk_src_id == 0x1004B) { // CAM_CC_JPEG_CLK_SRC
  670. if (num_hw_blocks > 1 && num_hw_blocks <= 2)
  671. rc = true;
  672. } else if (tbl_entry->clk_src_id == 0x10017) { // CAM_CC_CPHY_RX_CLK_SRC
  673. if (num_hw_blocks > 1 && num_hw_blocks <= 9)
  674. rc = true;
  675. }
  676. return rc;
  677. }
  678. static int mmrm_sw_clk_client_setval(struct mmrm_clk_mgr *sw_clk_mgr,
  679. struct mmrm_client *client,
  680. struct mmrm_client_data *client_data,
  681. unsigned long clk_val)
  682. {
  683. int rc = 0;
  684. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  685. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  686. bool req_reserve;
  687. u32 req_level;
  688. /* validate input params */
  689. if (!client) {
  690. d_mpr_e("%s: invalid client\n");
  691. rc = -EINVAL;
  692. goto err_invalid_client;
  693. }
  694. if (client->client_uid >= sinfo->tot_clk_clients) {
  695. d_mpr_e("%s: invalid client uid (%d)\n",
  696. __func__, client->client_uid);
  697. rc = -EINVAL;
  698. goto err_invalid_client;
  699. }
  700. if (!client_data) {
  701. d_mpr_e("%s: invalid client data\n", __func__);
  702. rc = -EINVAL;
  703. goto err_invalid_client_data;
  704. }
  705. /* get table entry */
  706. tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
  707. if (IS_ERR_OR_NULL(tbl_entry->clk)) {
  708. d_mpr_e("%s: clk src not registered\n");
  709. rc = -EINVAL;
  710. goto err_invalid_client;
  711. }
  712. d_mpr_h("%s: csid(0x%x) clk rate %llu\n",
  713. __func__, tbl_entry->clk_src_id, clk_val);
  714. /* Check if the requested clk rate is the same as the current clk rate.
  715. * When clk rates are the same, compare this with the current state.
  716. * Skip when duplicate calculations will be made.
  717. * --- current ---- requested --- action ---
  718. * a. reserve && req_reserve: skip
  719. * b. !reserve && !req_reserve: skip
  720. * c. !reserve && req_reserve: skip
  721. * d. reserve && !req_reserve: set clk rate
  722. */
  723. req_reserve = client_data->flags & MMRM_CLIENT_DATA_FLAG_RESERVE_ONLY;
  724. if (tbl_entry->clk_rate == clk_val &&
  725. tbl_entry->num_hw_blocks == client_data->num_hw_blocks) {
  726. d_mpr_h("%s: csid(0x%x) same as previous clk rate %llu\n",
  727. __func__, tbl_entry->clk_src_id, clk_val);
  728. /* a & b */
  729. if (tbl_entry->reserve == req_reserve)
  730. goto exit_no_err;
  731. /* c & d */
  732. mutex_lock(&sw_clk_mgr->lock);
  733. tbl_entry->reserve = req_reserve;
  734. mutex_unlock(&sw_clk_mgr->lock);
  735. /* skip or set clk rate */
  736. if (req_reserve)
  737. goto exit_no_err;
  738. else
  739. goto set_clk_rate;
  740. }
  741. /* get corresponding level */
  742. if (clk_val) {
  743. rc = mmrm_sw_get_req_level(tbl_entry, clk_val, &req_level);
  744. if (rc || req_level >= MMRM_VDD_LEVEL_MAX) {
  745. d_mpr_e("%s: csid(0x%x) unable to get level for clk rate %llu\n",
  746. __func__, tbl_entry->clk_src_id, clk_val);
  747. rc = -EINVAL;
  748. goto err_invalid_clk_val;
  749. }
  750. if (!mmrm_sw_is_valid_num_hw_block(tbl_entry, client_data)) {
  751. d_mpr_e("%s: csid(0x%x) num_hw_block:%d\n",
  752. __func__, tbl_entry->clk_src_id, client_data->num_hw_blocks);
  753. rc = -EINVAL;
  754. goto err_invalid_client_data;
  755. }
  756. } else {
  757. req_level = 0;
  758. }
  759. mutex_lock(&sw_clk_mgr->lock);
  760. /* check and update for peak current */
  761. rc = mmrm_sw_check_peak_current(sinfo, tbl_entry,
  762. req_level, clk_val, client_data->num_hw_blocks);
  763. if (rc) {
  764. d_mpr_e("%s: csid (0x%x) peak overshoot peak_cur(%lu)\n",
  765. __func__, tbl_entry->clk_src_id,
  766. sinfo->peak_cur_data.aggreg_val);
  767. mutex_unlock(&sw_clk_mgr->lock);
  768. goto err_peak_overshoot;
  769. }
  770. /* update table entry */
  771. tbl_entry->clk_rate = clk_val;
  772. tbl_entry->vdd_level = req_level;
  773. tbl_entry->reserve = req_reserve;
  774. tbl_entry->num_hw_blocks = client_data->num_hw_blocks;
  775. mutex_unlock(&sw_clk_mgr->lock);
  776. /* check reserve only flag (skip set clock rate) */
  777. if (req_reserve) {
  778. d_mpr_h("%s: csid(0x%x) skip setting clk rate\n",
  779. __func__, tbl_entry->clk_src_id);
  780. rc = 0;
  781. goto exit_no_err;
  782. }
  783. set_clk_rate:
  784. d_mpr_h("%s: csid(0x%x) setting clk rate %llu\n",
  785. __func__, tbl_entry->clk_src_id, clk_val);
  786. rc = clk_set_rate(tbl_entry->clk, clk_val);
  787. if (rc) {
  788. d_mpr_e("%s: csid(0x%x) failed to set clk rate %llu\n",
  789. __func__, tbl_entry->clk_src_id, clk_val);
  790. rc = -EINVAL;
  791. /* TBD: incase of failure clk_rate is invalid */
  792. goto err_clk_set_fail;
  793. }
  794. exit_no_err:
  795. d_mpr_h("%s: clk rate %lu set successfully for %s\n",
  796. __func__, clk_val, tbl_entry->name);
  797. return rc;
  798. err_invalid_client:
  799. err_invalid_client_data:
  800. err_invalid_clk_val:
  801. err_peak_overshoot:
  802. err_clk_set_fail:
  803. d_mpr_e("%s: error = %d\n", __func__, rc);
  804. return rc;
  805. }
  806. static int mmrm_sw_clk_client_setval_inrange(struct mmrm_clk_mgr *sw_clk_mgr,
  807. struct mmrm_client *client,
  808. struct mmrm_client_data *client_data,
  809. struct mmrm_client_res_value *val)
  810. {
  811. /* TBD: add support for set val in range */
  812. return mmrm_sw_clk_client_setval(sw_clk_mgr, client, client_data,
  813. val->cur);
  814. }
  815. static int mmrm_sw_clk_client_getval(struct mmrm_clk_mgr *sw_clk_mgr,
  816. struct mmrm_client *client,
  817. struct mmrm_client_res_value *val)
  818. {
  819. int rc = 0;
  820. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  821. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  822. /* validate input params */
  823. if (!client) {
  824. d_mpr_e("%s: invalid client\n");
  825. rc = -EINVAL;
  826. goto err_invalid_client;
  827. }
  828. if (client->client_uid >= sinfo->tot_clk_clients) {
  829. d_mpr_e("%s: invalid client uid (%d)\n",
  830. __func__, client->client_uid);
  831. rc = -EINVAL;
  832. goto err_invalid_client;
  833. }
  834. tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
  835. if (!tbl_entry->clk) {
  836. d_mpr_e("%s: clk src not registered\n");
  837. rc = -EINVAL;
  838. goto err_invalid_client;
  839. }
  840. /* return previously configured value */
  841. /* TBD: Identify the min & max values */
  842. val->min = tbl_entry->clk_rate;
  843. val->cur = tbl_entry->clk_rate;
  844. val->max = tbl_entry->clk_rate;
  845. return rc;
  846. err_invalid_client:
  847. d_mpr_e("%s: error = %d\n", __func__, rc);
  848. return rc;
  849. }
  850. static struct mmrm_clk_mgr_client_ops clk_client_swops = {
  851. .clk_client_reg = mmrm_sw_clk_client_register,
  852. .clk_client_dereg = mmrm_sw_clk_client_deregister,
  853. .clk_client_setval = mmrm_sw_clk_client_setval,
  854. .clk_client_setval_inrange = mmrm_sw_clk_client_setval_inrange,
  855. .clk_client_getval = mmrm_sw_clk_client_getval,
  856. };
  857. static int mmrm_sw_prepare_table(struct mmrm_clk_platform_resources *cres,
  858. struct mmrm_sw_clk_mgr_info *sinfo)
  859. {
  860. int rc = 0;
  861. u32 c;
  862. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  863. struct nom_clk_src_info *nom_tbl_entry;
  864. /* read all resource entries */
  865. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  866. tbl_entry = &sinfo->clk_client_tbl[c];
  867. nom_tbl_entry = &cres->nom_clk_set.clk_src_tbl[c];
  868. tbl_entry->clk_src_id = (nom_tbl_entry->domain << 16 |
  869. nom_tbl_entry->clk_src_id);
  870. tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM] =
  871. nom_tbl_entry->nom_dyn_pwr;
  872. tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM] =
  873. nom_tbl_entry->nom_leak_pwr;
  874. d_mpr_h("%s: updating csid(0x%x) dyn_pwr(%d) leak_pwr(%d)\n",
  875. __func__,
  876. tbl_entry->clk_src_id,
  877. tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM],
  878. tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM]);
  879. }
  880. return rc;
  881. }
  882. int mmrm_init_sw_clk_mgr(void *driver_data)
  883. {
  884. int rc = 0, i, j;
  885. struct mmrm_driver_data *drv_data =
  886. (struct mmrm_driver_data *)driver_data;
  887. struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
  888. struct mmrm_sw_clk_mgr_info *sinfo = NULL;
  889. struct mmrm_clk_mgr *sw_clk_mgr = NULL;
  890. u32 tbl_size = 0;
  891. /* mmrm_sw_clk_mgr */
  892. sw_clk_mgr = kzalloc(sizeof(*sw_clk_mgr), GFP_KERNEL);
  893. if (!sw_clk_mgr) {
  894. d_mpr_e("%s: failed to allocate memory for sw_clk_mgr\n",
  895. __func__);
  896. rc = -ENOMEM;
  897. goto err_fail_sw_clk_mgr;
  898. }
  899. /* initialize the tables */
  900. tbl_size = sizeof(struct mmrm_sw_clk_client_tbl_entry) *
  901. cres->nom_clk_set.count;
  902. sinfo = &(sw_clk_mgr->data.sw_info);
  903. sinfo->driver_data = drv_data;
  904. sinfo->clk_client_tbl = kzalloc(tbl_size, GFP_KERNEL);
  905. if (!sinfo->clk_client_tbl) {
  906. d_mpr_e(
  907. "%s: failed to allocate memory for clk_client_tbl (%d)\n",
  908. __func__, cres->nom_clk_set.count);
  909. rc = -ENOMEM;
  910. goto err_fail_clk_tbl;
  911. }
  912. sinfo->tot_clk_clients = cres->nom_clk_set.count;
  913. sinfo->enabled_clk_clients = 0;
  914. INIT_LIST_HEAD(&sinfo->throttled_clients);
  915. /* prepare table entries */
  916. rc = mmrm_sw_prepare_table(cres, sinfo);
  917. if (rc) {
  918. d_mpr_e("%s: failed to prepare clk table\n", __func__);
  919. rc = -ENOMEM;
  920. goto err_fail_prep_tbl;
  921. }
  922. /* update the peak current threshold */
  923. sinfo->peak_cur_data.threshold = cres->threshold;
  924. sinfo->peak_cur_data.aggreg_val = 0;
  925. sinfo->peak_cur_data.aggreg_level = 0;
  926. sinfo->throttle_clients_data_length = cres->throttle_clients_data_length;
  927. for (i = 0; i < sinfo->throttle_clients_data_length; i++) {
  928. for (j = 0; j < sinfo->tot_clk_clients; j++) {
  929. if (sinfo->clk_client_tbl[j].clk_src_id
  930. == cres->clsid_threshold_clients[i]) {
  931. sinfo->throttle_clients_info[i].csid_throttle_client
  932. = cres->clsid_threshold_clients[i];
  933. sinfo->throttle_clients_info[i].tbl_entry_id = j;
  934. break;
  935. }
  936. }
  937. }
  938. /* initialize mutex for sw clk mgr */
  939. mutex_init(&sw_clk_mgr->lock);
  940. sw_clk_mgr->scheme = drv_data->clk_res.scheme;
  941. /* clk client operations */
  942. sw_clk_mgr->clk_client_ops = &clk_client_swops;
  943. drv_data->clk_mgr = sw_clk_mgr;
  944. return rc;
  945. err_fail_prep_tbl:
  946. kfree(sinfo->clk_client_tbl);
  947. err_fail_clk_tbl:
  948. kfree(sw_clk_mgr);
  949. drv_data->clk_mgr = NULL;
  950. err_fail_sw_clk_mgr:
  951. d_mpr_e("%s: error = %d\n", __func__, rc);
  952. return rc;
  953. }
  954. int mmrm_destroy_sw_clk_mgr(struct mmrm_clk_mgr *sw_clk_mgr)
  955. {
  956. int rc = 0;
  957. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  958. struct mmrm_sw_throttled_clients_data *iter, *safe_iter = NULL;
  959. list_for_each_entry_safe(iter, safe_iter, &sinfo->throttled_clients, list) {
  960. list_del(&iter->list);
  961. kfree(iter);
  962. }
  963. if (!sw_clk_mgr) {
  964. d_mpr_e("%s: sw_clk_mgr null\n", __func__);
  965. return -EINVAL;
  966. }
  967. kfree(sw_clk_mgr->data.sw_info.clk_client_tbl);
  968. mutex_destroy(&sw_clk_mgr->lock);
  969. kfree(sw_clk_mgr);
  970. return rc;
  971. }