mmrm_clk_rsrc_mgr_sw.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/slab.h>
  7. #include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
  8. #include <linux/clk.h>
  9. #include <linux/clk/qcom.h>
  10. #include "mmrm_debug.h"
  11. #include "mmrm_clk_rsrc_mgr.h"
  12. #include "mmrm_fixedpoint.h"
  13. #define Q16_INT(q) ((q) >> 16)
  14. #define Q16_FRAC(q) ((((q) & 0xFFFF) * 100) >> 16)
  15. #define CLK_RATE_STEP 1000000
  16. #define NOTIFY_TIMEOUT 100000000
  17. /* Max HW DRV Instances (inst 0-5)*/
  18. #define MAX_HW_DRV_INSTANCES 6
  19. /* Max HW DRV Instances (power states 0-4)*/
  20. #define MAX_POWER_STATES 5
  21. static int mmrm_sw_update_freq(
  22. struct mmrm_sw_clk_mgr_info *sinfo, struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
  23. {
  24. int rc = 0;
  25. u32 i;
  26. struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
  27. struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
  28. struct voltage_corner_set *cset = &cres->corner_set;
  29. long clk_val_min, clk_val_max, clk_val, clk_val_round;
  30. int voltage_corner;
  31. clk_val_min = clk_round_rate(tbl_entry->clk, 1);
  32. clk_val_max = clk_round_rate(tbl_entry->clk, ~0UL);
  33. d_mpr_h("%s: csid(0x%x): min_clk_rate(%llu) max_clk_rate(%llu)\n",
  34. __func__,
  35. tbl_entry->clk_src_id,
  36. clk_val_min,
  37. clk_val_max);
  38. /* init with min val */
  39. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  40. tbl_entry->freq[i] = clk_val_min;
  41. }
  42. /* step through rates */
  43. for (clk_val = clk_val_min; clk_val < clk_val_max; clk_val += CLK_RATE_STEP) {
  44. /* get next clk rate */
  45. clk_val_round = clk_round_rate(tbl_entry->clk, clk_val);
  46. if (clk_val_round > clk_val_min) {
  47. clk_val_min = clk_val_round;
  48. /* Get voltage corner */
  49. voltage_corner = qcom_clk_get_voltage(tbl_entry->clk, clk_val_round);
  50. if (voltage_corner < 0 || voltage_corner > mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_TURBO]) {
  51. break;
  52. }
  53. /* voltage corner is below svsl1 */
  54. if (voltage_corner < mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS])
  55. voltage_corner = mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS];
  56. /* match vdd level */
  57. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  58. if (voltage_corner == mmrm_sw_vdd_corner[i])
  59. break;
  60. }
  61. /* update freq */
  62. while (i < MMRM_VDD_LEVEL_MAX) {
  63. tbl_entry->freq[i++] = clk_val_round;
  64. }
  65. }
  66. }
  67. /* print results */
  68. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  69. d_mpr_h("%s: csid(0x%x) corner(%s) clk_rate(%llu)\n",
  70. __func__,
  71. tbl_entry->clk_src_id,
  72. cset->corner_tbl[i].name,
  73. tbl_entry->freq[i]);
  74. }
  75. return rc;
  76. }
  77. static void mmrm_sw_print_client_data(struct mmrm_sw_clk_mgr_info *sinfo,
  78. struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
  79. {
  80. struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
  81. struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
  82. struct voltage_corner_set *cset = &cres->corner_set;
  83. u32 i, j;
  84. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  85. d_mpr_p("%s: csid(0x%x) corner(%s) dyn_pwr(%zu) leak_pwr(%zu)\n",
  86. __func__,
  87. tbl_entry->clk_src_id,
  88. cset->corner_tbl[i].name,
  89. tbl_entry->dyn_pwr[i],
  90. tbl_entry->leak_pwr[i]);
  91. for (j = 0; j < MMRM_VDD_LEVEL_MAX; j++) {
  92. d_mpr_p("%s: csid(0x%x) total_pwr(%zu) cur_ma(%zu)\n",
  93. __func__,
  94. tbl_entry->clk_src_id,
  95. (tbl_entry->dyn_pwr[i] + tbl_entry->leak_pwr[i]),
  96. tbl_entry->current_ma[i][j]);
  97. }
  98. }
  99. }
  100. static void mmrm_sw_print_crm_table(struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
  101. {
  102. int i;
  103. if (!tbl_entry->is_crm_client)
  104. return;
  105. for (i = 0; i < tbl_entry->crm_client_tbl_size; i++)
  106. d_mpr_h("%s: csid(0x%x) client tbl idx %d val %llu\n",
  107. __func__, tbl_entry->clk_src_id,
  108. i, tbl_entry->crm_client_tbl[i]);
  109. d_mpr_h("%s: csid(0x%x) client tbl max rate (idx %d) : %llu\n",
  110. __func__, tbl_entry->clk_src_id, tbl_entry->max_rate_idx,
  111. tbl_entry->clk_rate);
  112. }
  113. static u64 mmrm_sw_get_max_crm_rate(
  114. struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
  115. struct mmrm_client_data *client_data, unsigned long new_clk_val,
  116. int *new_max_rate_idx)
  117. {
  118. u32 crm_max_rate, new_val_idx;
  119. crm_max_rate = tbl_entry->clk_rate;
  120. *new_max_rate_idx = tbl_entry->max_rate_idx;
  121. new_val_idx = (client_data->drv_type == MMRM_CRM_SW_DRV) ?
  122. (tbl_entry->crm_client_tbl_size - 1) : (tbl_entry->num_pwr_states *
  123. client_data->crm_drv_idx + client_data->pwr_st);
  124. if (new_clk_val > crm_max_rate) {
  125. crm_max_rate = new_clk_val;
  126. *new_max_rate_idx = new_val_idx;
  127. } else {
  128. /*
  129. * Get the new crm_max_rate from all SW/HW clients.
  130. * If the index with current max value is being updated with a lower value,
  131. * check if that index still has the max value or if another index has
  132. * the new max value.
  133. */
  134. if (new_val_idx == tbl_entry->max_rate_idx) {
  135. int i;
  136. crm_max_rate = 0;
  137. for (i = 0; i < tbl_entry->crm_client_tbl_size; i++) {
  138. if (i == tbl_entry->max_rate_idx)
  139. continue;
  140. if (tbl_entry->crm_client_tbl[i] > crm_max_rate) {
  141. crm_max_rate = tbl_entry->crm_client_tbl[i];
  142. *new_max_rate_idx = i;
  143. }
  144. }
  145. if (new_clk_val >= crm_max_rate) {
  146. /* New value at old max index is still the maximum value */
  147. crm_max_rate = new_clk_val;
  148. *new_max_rate_idx = tbl_entry->max_rate_idx;
  149. }
  150. }
  151. }
  152. d_mpr_h("%s: csid(0x%x) new clk rate(idx %d) = %llu, crm_max_rate(idx %d) = %llu\n",
  153. __func__, tbl_entry->clk_src_id, new_val_idx, new_clk_val,
  154. *new_max_rate_idx, crm_max_rate);
  155. return crm_max_rate;
  156. }
  157. static int mmrm_sw_update_curr(struct mmrm_sw_clk_mgr_info *sinfo,
  158. struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
  159. {
  160. u32 i, j;
  161. struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
  162. struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
  163. struct voltage_corner_set *cset = &cres->corner_set;
  164. u32 scaling_factor = 0, voltage_factor = 0;
  165. fp_t nom_dyn_pwr, nom_leak_pwr, dyn_sc, leak_sc,
  166. volt, dyn_pwr, leak_pwr, pwr_mw, nom_freq;
  167. u32 c;
  168. struct nom_clk_src_info *nom_tbl_entry = NULL;
  169. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  170. if (tbl_entry->clk_src_id == sinfo->clk_client_tbl[c].clk_src_id) {
  171. nom_tbl_entry = &cres->nom_clk_set.clk_src_tbl[c];
  172. break;
  173. }
  174. }
  175. if (nom_tbl_entry == NULL) {
  176. d_mpr_h("%s: can't find 0x%x clock src ID\n",
  177. __func__,
  178. tbl_entry->clk_src_id);
  179. return -EINVAL;
  180. }
  181. nom_dyn_pwr = FP(Q16_INT(nom_tbl_entry->nom_dyn_pwr),
  182. Q16_FRAC(nom_tbl_entry->nom_dyn_pwr), 100);
  183. nom_leak_pwr = FP(Q16_INT(nom_tbl_entry->nom_leak_pwr),
  184. Q16_FRAC(nom_tbl_entry->nom_leak_pwr), 100);
  185. nom_freq = tbl_entry->freq[MMRM_VDD_LEVEL_NOM];
  186. /* update power & current entries for all levels */
  187. for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
  188. scaling_factor = cset->corner_tbl[i].scaling_factor_dyn;
  189. dyn_sc = FP(
  190. Q16_INT(scaling_factor), Q16_FRAC(scaling_factor), 100);
  191. scaling_factor = cset->corner_tbl[i].scaling_factor_leak;
  192. leak_sc = FP(
  193. Q16_INT(scaling_factor), Q16_FRAC(scaling_factor), 100);
  194. /* Frequency scaling */
  195. pwr_mw = fp_mult(nom_dyn_pwr, tbl_entry->freq[i]);
  196. pwr_mw = fp_div(pwr_mw, nom_freq);
  197. /* Scaling factor */
  198. dyn_pwr = fp_mult(pwr_mw, dyn_sc);
  199. leak_pwr = fp_mult(nom_leak_pwr, leak_sc);
  200. tbl_entry->dyn_pwr[i] = fp_round(dyn_pwr);
  201. tbl_entry->leak_pwr[i] = fp_round(leak_pwr);
  202. for (j = 0; j < MMRM_VDD_LEVEL_MAX; j++) {
  203. voltage_factor = cset->corner_tbl[j].volt_factor;
  204. volt = FP(Q16_INT(voltage_factor), Q16_FRAC(voltage_factor), 100);
  205. tbl_entry->current_ma[i][j] = fp_round(fp_div((dyn_pwr+leak_pwr), volt));
  206. }
  207. }
  208. mmrm_sw_print_client_data(sinfo, tbl_entry);
  209. return 0;
  210. }
  211. static struct mmrm_client *mmrm_sw_clk_client_register(
  212. struct mmrm_clk_mgr *sw_clk_mgr,
  213. struct mmrm_clk_client_desc clk_desc,
  214. enum mmrm_client_priority priority,
  215. void *pvt_data,
  216. notifier_callback_fn_t not_fn_cb)
  217. {
  218. int rc = 0;
  219. struct mmrm_client *clk_client = NULL;
  220. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  221. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  222. u32 c = 0;
  223. u32 clk_client_src_id = 0;
  224. mutex_lock(&sw_clk_mgr->lock);
  225. /* check if entry is free in table */
  226. if (sinfo->tot_clk_clients == sinfo->enabled_clk_clients) {
  227. d_mpr_e("%s: no free entry to register a clk client\n",
  228. __func__);
  229. rc = -EINVAL;
  230. goto err_nofree_entry;
  231. }
  232. /* look for entry that matches domain and id */
  233. clk_client_src_id = (clk_desc.client_domain << 16 | clk_desc.client_id);
  234. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  235. if (clk_client_src_id == sinfo->clk_client_tbl[c].clk_src_id)
  236. break;
  237. }
  238. /* entry not found */
  239. if (c == sinfo->tot_clk_clients) {
  240. d_mpr_e("%s: unknown clk client 0x%x\n",
  241. __func__, clk_client_src_id);
  242. rc = -EINVAL;
  243. goto err_nofree_entry;
  244. }
  245. tbl_entry = &sinfo->clk_client_tbl[c];
  246. /* entry already registered */
  247. if (tbl_entry->client) {
  248. if (msm_mmrm_allow_multiple_register) {
  249. tbl_entry->ref_count++;
  250. d_mpr_h("%s: client csid(0x%x) already registered ref:%d\n",
  251. __func__, tbl_entry->clk_src_id, tbl_entry->ref_count);
  252. clk_client = tbl_entry->client;
  253. mmrm_sw_print_client_data(sinfo, tbl_entry);
  254. goto exit_found;
  255. }
  256. d_mpr_e("%s: client csid(0x%x) already registered\n",
  257. __func__, tbl_entry->clk_src_id);
  258. rc = -EINVAL;
  259. goto err_already_registered;
  260. }
  261. /* populate the entry */
  262. clk_client = kzalloc(sizeof(*clk_client), GFP_KERNEL);
  263. if (!clk_client) {
  264. d_mpr_e("%s: failed to allocate memory for clk_client\n",
  265. __func__);
  266. rc = -ENOMEM;
  267. goto err_fail_alloc_clk_client;
  268. }
  269. clk_client->client_uid = c;
  270. clk_client->client_type = MMRM_CLIENT_CLOCK;
  271. tbl_entry->ref_count = 1;
  272. /* copy the entries provided by client */
  273. tbl_entry->client = clk_client;
  274. strlcpy(tbl_entry->name, clk_desc.name, MMRM_CLK_CLIENT_NAME_SIZE);
  275. tbl_entry->clk = clk_desc.clk;
  276. tbl_entry->pri = priority;
  277. tbl_entry->pvt_data = pvt_data;
  278. tbl_entry->notifier_cb_fn = not_fn_cb;
  279. if (clk_desc.hw_drv_instances > MAX_HW_DRV_INSTANCES
  280. || clk_desc.num_pwr_states > MAX_POWER_STATES) {
  281. d_mpr_e("%s: Invalid CRM data: HW DRV instances %d power states %d\n",
  282. __func__, clk_desc.hw_drv_instances, clk_desc.num_pwr_states);
  283. rc = -EINVAL;
  284. goto err_invalid_crm_data;
  285. }
  286. /* CRM-managed client */
  287. if (clk_desc.hw_drv_instances > 0 && clk_desc.num_pwr_states > 0) {
  288. d_mpr_h("%s: CRM-managed clock client: HW DRV instances %d, power states %d\n",
  289. __func__, clk_desc.hw_drv_instances, clk_desc.num_pwr_states);
  290. tbl_entry->crm_client_tbl_size = clk_desc.hw_drv_instances *
  291. clk_desc.num_pwr_states + 1;
  292. tbl_entry->crm_client_tbl = kcalloc(tbl_entry->crm_client_tbl_size,
  293. sizeof(u64), GFP_KERNEL);
  294. if (!tbl_entry->crm_client_tbl) {
  295. d_mpr_e("%s: failed to allocate CRM client table\n", __func__);
  296. rc = -ENOMEM;
  297. goto err_fail_alloc_crm_tbl;
  298. }
  299. tbl_entry->is_crm_client = 1;
  300. tbl_entry->max_rate_idx = 0;
  301. tbl_entry->hw_drv_instances = clk_desc.hw_drv_instances;
  302. tbl_entry->num_pwr_states = clk_desc.num_pwr_states;
  303. }
  304. /* print table entry */
  305. d_mpr_h("%s: csid(0x%x) name(%s) pri(%d) pvt(%p) notifier(%p) hw_drv_instances(%d) num_pwr_states(%d)\n",
  306. __func__,
  307. tbl_entry->clk_src_id,
  308. tbl_entry->name,
  309. tbl_entry->pri,
  310. tbl_entry->pvt_data,
  311. tbl_entry->notifier_cb_fn,
  312. tbl_entry->hw_drv_instances,
  313. tbl_entry->num_pwr_states);
  314. /* determine full range of clock freq */
  315. rc = mmrm_sw_update_freq(sinfo, tbl_entry);
  316. if (rc) {
  317. d_mpr_e("%s: csid(0x%x) failed to update freq\n",
  318. __func__, tbl_entry->clk_src_id);
  319. goto err_fail_update_entry;
  320. }
  321. /* calculate current & scale power for other levels */
  322. rc = mmrm_sw_update_curr(sinfo, tbl_entry);
  323. if (rc) {
  324. d_mpr_e("%s: csid(0x%x) failed to update current\n",
  325. __func__, tbl_entry->clk_src_id);
  326. goto err_fail_update_entry;
  327. }
  328. exit_found:
  329. mutex_unlock(&sw_clk_mgr->lock);
  330. return clk_client;
  331. err_fail_update_entry:
  332. tbl_entry->is_crm_client = 0;
  333. tbl_entry->max_rate_idx = 0;
  334. tbl_entry->hw_drv_instances = 0;
  335. tbl_entry->num_pwr_states = 0;
  336. kfree(tbl_entry->crm_client_tbl);
  337. tbl_entry->crm_client_tbl = NULL;
  338. err_fail_alloc_crm_tbl:
  339. tbl_entry->crm_client_tbl_size = 0;
  340. err_invalid_crm_data:
  341. kfree(clk_client);
  342. err_fail_alloc_clk_client:
  343. tbl_entry->client = NULL;
  344. tbl_entry->clk = NULL;
  345. tbl_entry->pri = 0x0;
  346. tbl_entry->pvt_data = NULL;
  347. tbl_entry->notifier_cb_fn = NULL;
  348. err_nofree_entry:
  349. err_already_registered:
  350. mutex_unlock(&sw_clk_mgr->lock);
  351. d_mpr_e("%s: error = %d\n", __func__, rc);
  352. return NULL;
  353. }
  354. static int mmrm_sw_clk_client_deregister(struct mmrm_clk_mgr *sw_clk_mgr,
  355. struct mmrm_client *client)
  356. {
  357. int rc = 0;
  358. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  359. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  360. /* validate the client ptr */
  361. if (!client) {
  362. d_mpr_e("%s: invalid client\n", __func__);
  363. rc = -EINVAL;
  364. goto err_invalid_client;
  365. }
  366. if (client->client_uid >= sinfo->tot_clk_clients) {
  367. d_mpr_e("%s: invalid client uid (%d)\n",
  368. __func__, client->client_uid);
  369. rc = -EINVAL;
  370. goto err_invalid_client;
  371. }
  372. mutex_lock(&sw_clk_mgr->lock);
  373. tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
  374. if (tbl_entry->ref_count > 0) {
  375. tbl_entry->ref_count--;
  376. }
  377. if (tbl_entry->ref_count == 0) {
  378. kfree(tbl_entry->crm_client_tbl);
  379. tbl_entry->crm_client_tbl = NULL;
  380. kfree(tbl_entry->client);
  381. tbl_entry->vdd_level = 0;
  382. tbl_entry->clk_rate = 0;
  383. tbl_entry->client = NULL;
  384. tbl_entry->clk = NULL;
  385. tbl_entry->pri = 0x0;
  386. tbl_entry->pvt_data = NULL;
  387. tbl_entry->notifier_cb_fn = NULL;
  388. tbl_entry->is_crm_client = 0;
  389. tbl_entry->max_rate_idx = 0;
  390. tbl_entry->hw_drv_instances = 0;
  391. tbl_entry->num_pwr_states = 0;
  392. tbl_entry->crm_client_tbl_size = 0;
  393. }
  394. mutex_unlock(&sw_clk_mgr->lock);
  395. return rc;
  396. err_invalid_client:
  397. d_mpr_e("%s: error = %d\n", __func__, rc);
  398. return rc;
  399. }
  400. static int mmrm_sw_get_req_level(
  401. struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
  402. unsigned long clk_val, u32 *req_level)
  403. {
  404. int rc = 0;
  405. int voltage_corner;
  406. unsigned long clk_round_val = 0;
  407. u32 level;
  408. /*
  409. * Clients may set rates that are higher than max supported rate for a clock.
  410. * Round the rate to get the max supported corner.
  411. */
  412. clk_round_val = clk_round_rate(tbl_entry->clk, clk_val);
  413. /* get voltage corner */
  414. voltage_corner = qcom_clk_get_voltage(tbl_entry->clk, clk_round_val);
  415. if (voltage_corner < 0 || voltage_corner > mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_TURBO]) {
  416. d_mpr_e("%s: csid(0x%x): invalid voltage corner(%d) for rounded clk rate(%llu)\n",
  417. __func__,
  418. tbl_entry->clk_src_id,
  419. voltage_corner,
  420. clk_round_val);
  421. rc = voltage_corner;
  422. goto err_invalid_corner;
  423. }
  424. /* voltage corner is below low svs */
  425. if (voltage_corner < mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS]) {
  426. d_mpr_h("%s: csid(0x%x): lower voltage corner(%d)\n",
  427. __func__,
  428. tbl_entry->clk_src_id,
  429. voltage_corner);
  430. *req_level = MMRM_VDD_LEVEL_LOW_SVS;
  431. goto exit_no_err;
  432. }
  433. /* match vdd level */
  434. for (level = 0; level < MMRM_VDD_LEVEL_MAX; level++) {
  435. if (voltage_corner == mmrm_sw_vdd_corner[level])
  436. break;
  437. }
  438. if (level == MMRM_VDD_LEVEL_MAX) {
  439. d_mpr_e("%s: csid(0x%x): invalid voltage corner(%d) for rounded clk rate(%llu)\n",
  440. __func__,
  441. tbl_entry->clk_src_id,
  442. voltage_corner,
  443. clk_round_val);
  444. rc = -EINVAL;
  445. goto err_invalid_corner;
  446. }
  447. *req_level = level;
  448. d_mpr_h("%s: req_level(%d)\n", __func__, level);
  449. exit_no_err:
  450. return rc;
  451. err_invalid_corner:
  452. return rc;
  453. }
  454. static int mmrm_sw_check_req_level(
  455. struct mmrm_sw_clk_mgr_info *sinfo,
  456. u32 clk_src_id, u32 req_level, u32 *adj_level)
  457. {
  458. int rc = 0;
  459. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  460. struct mmrm_sw_clk_client_tbl_entry *tbl_entry = NULL;
  461. struct mmrm_sw_clk_client_tbl_entry *next_max_entry = NULL;
  462. u32 c, level = req_level;
  463. if (req_level >= MMRM_VDD_LEVEL_MAX) {
  464. d_mpr_e("%s: invalid level %lu\n", __func__, req_level);
  465. rc = -EINVAL;
  466. goto err_invalid_level;
  467. }
  468. d_mpr_h("%s: csid(0x%x) level(%d) peak_data->aggreg_level(%d)\n",
  469. __func__, clk_src_id, level, peak_data->aggreg_level);
  470. /* req_level is rejected when another client has a higher level */
  471. if (req_level < peak_data->aggreg_level) {
  472. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  473. tbl_entry = &sinfo->clk_client_tbl[c];
  474. if (IS_ERR_OR_NULL(tbl_entry->clk) || !tbl_entry->clk_rate ||
  475. (tbl_entry->clk_src_id == clk_src_id)) {
  476. continue;
  477. }
  478. if (tbl_entry->vdd_level == peak_data->aggreg_level) {
  479. break;
  480. }
  481. if ((tbl_entry->vdd_level < peak_data->aggreg_level)
  482. && (tbl_entry->vdd_level > req_level))
  483. next_max_entry = tbl_entry;
  484. }
  485. /* reject req level */
  486. if (c < sinfo->tot_clk_clients) {
  487. level = peak_data->aggreg_level;
  488. } else if (!IS_ERR_OR_NULL(next_max_entry)
  489. && next_max_entry->vdd_level > req_level) {
  490. level = next_max_entry->vdd_level;
  491. }
  492. }
  493. *adj_level = level;
  494. d_mpr_h("%s: adj_level(%d)\n", __func__, level);
  495. return rc;
  496. err_invalid_level:
  497. return rc;
  498. }
  499. static int mmrm_sw_calculate_total_current(
  500. struct mmrm_sw_clk_mgr_info *sinfo,
  501. u32 req_level, u32 *total_cur, struct mmrm_sw_clk_client_tbl_entry *tbl_entry_new)
  502. {
  503. int rc = 0;
  504. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  505. u32 c, sum_cur = 0;
  506. if (req_level >= MMRM_VDD_LEVEL_MAX) {
  507. d_mpr_e("%s: invalid level %lu\n", __func__, req_level);
  508. rc = -EINVAL;
  509. goto err_invalid_level;
  510. }
  511. /* calculate sum of values (scaled by volt) */
  512. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  513. tbl_entry = &sinfo->clk_client_tbl[c];
  514. if (IS_ERR_OR_NULL(tbl_entry->clk) || !tbl_entry->clk_rate
  515. || (tbl_entry == tbl_entry_new)) {
  516. continue;
  517. }
  518. sum_cur += (tbl_entry->current_ma[tbl_entry->vdd_level][req_level]
  519. * tbl_entry->num_hw_blocks);
  520. }
  521. *total_cur = sum_cur;
  522. d_mpr_h("%s: total_cur(%lu)\n", __func__, *total_cur);
  523. return rc;
  524. err_invalid_level:
  525. return rc;
  526. }
  527. static int mmrm_sw_throttle_low_priority_client(
  528. struct mmrm_sw_clk_mgr_info *sinfo, int *delta_cur)
  529. {
  530. int rc = 0, i;
  531. u64 start_ts = 0, end_ts = 0;
  532. struct mmrm_sw_clk_client_tbl_entry *tbl_entry_throttle_client;
  533. struct mmrm_client_notifier_data notifier_data;
  534. struct completion timeout;
  535. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  536. struct mmrm_sw_throttled_clients_data *tc_data;
  537. u32 now_cur_ma, min_cur_ma;
  538. long clk_min_level = MMRM_VDD_LEVEL_LOW_SVS;
  539. init_completion(&timeout);
  540. for (i = 0; i < sinfo->throttle_clients_data_length ; i++) {
  541. tbl_entry_throttle_client =
  542. &sinfo->clk_client_tbl[sinfo->throttle_clients_info[i].tbl_entry_id];
  543. if (IS_ERR_OR_NULL(tbl_entry_throttle_client))
  544. continue;
  545. now_cur_ma = tbl_entry_throttle_client->current_ma
  546. [tbl_entry_throttle_client->vdd_level]
  547. [peak_data->aggreg_level];
  548. min_cur_ma = tbl_entry_throttle_client->current_ma[clk_min_level]
  549. [peak_data->aggreg_level];
  550. d_mpr_h("%s:csid(0x%x) name(%s)\n",
  551. __func__, tbl_entry_throttle_client->clk_src_id,
  552. tbl_entry_throttle_client->name);
  553. d_mpr_h("%s:now_cur_ma(%llu) min_cur_ma(%llu) delta_cur(%d)\n",
  554. __func__, now_cur_ma, min_cur_ma, *delta_cur);
  555. if ((now_cur_ma <= min_cur_ma) || (now_cur_ma - min_cur_ma <= *delta_cur))
  556. continue;
  557. d_mpr_h("%s: Throttle client csid(0x%x) name(%s)\n",
  558. __func__, tbl_entry_throttle_client->clk_src_id,
  559. tbl_entry_throttle_client->name);
  560. d_mpr_h("%s:now_cur_ma %llu-min_cur_ma %llu>delta_cur %d\n",
  561. __func__, now_cur_ma, min_cur_ma, *delta_cur);
  562. /* Setup notifier */
  563. notifier_data.cb_type = MMRM_CLIENT_RESOURCE_VALUE_CHANGE;
  564. notifier_data.cb_data.val_chng.old_val =
  565. tbl_entry_throttle_client->freq[tbl_entry_throttle_client->vdd_level];
  566. notifier_data.cb_data.val_chng.new_val =
  567. tbl_entry_throttle_client->freq[clk_min_level];
  568. notifier_data.pvt_data = tbl_entry_throttle_client->pvt_data;
  569. start_ts = ktime_get_ns();
  570. if (tbl_entry_throttle_client->notifier_cb_fn)
  571. rc = tbl_entry_throttle_client->notifier_cb_fn(&notifier_data);
  572. end_ts = ktime_get_ns();
  573. d_mpr_h("%s: Client notifier cbk processing time %llu ns\n",
  574. __func__, (end_ts - start_ts));
  575. if (rc) {
  576. d_mpr_e("%s: Client failed to send SUCCESS in callback(%d)\n",
  577. __func__, tbl_entry_throttle_client->clk_src_id);
  578. continue;
  579. }
  580. if ((end_ts - start_ts) > NOTIFY_TIMEOUT)
  581. d_mpr_e("%s:Client notifier cbk took %llu ns more than timeout %llu ns\n",
  582. __func__, (end_ts - start_ts), NOTIFY_TIMEOUT);
  583. if (tbl_entry_throttle_client->reserve == false) {
  584. rc = clk_set_rate(tbl_entry_throttle_client->clk,
  585. tbl_entry_throttle_client->freq[clk_min_level]);
  586. if (rc) {
  587. d_mpr_e("%s: Failed to throttle the clk csid(%d)\n",
  588. __func__, tbl_entry_throttle_client->clk_src_id);
  589. continue;
  590. }
  591. }
  592. d_mpr_h("%s: %s throttled to %llu\n",
  593. __func__, tbl_entry_throttle_client->name,
  594. tbl_entry_throttle_client->freq[clk_min_level]);
  595. *delta_cur -= now_cur_ma - min_cur_ma;
  596. /* Store this client for bookkeeping */
  597. tc_data = kzalloc(sizeof(*tc_data), GFP_KERNEL);
  598. if (IS_ERR_OR_NULL(tc_data)) {
  599. d_mpr_e("%s: Failed to allocate memory\n", __func__);
  600. return -ENOMEM;
  601. }
  602. tc_data->table_id = i;
  603. tc_data->delta_cu_ma = now_cur_ma - min_cur_ma;
  604. tc_data->prev_vdd_level = tbl_entry_throttle_client->vdd_level;
  605. // Add throttled client to list to access it later
  606. list_add_tail(&tc_data->list, &sinfo->throttled_clients);
  607. /* Store the throttled clock rate of client */
  608. tbl_entry_throttle_client->clk_rate =
  609. tbl_entry_throttle_client->freq[clk_min_level];
  610. /* Store the corner level of throttled client */
  611. tbl_entry_throttle_client->vdd_level = clk_min_level;
  612. /* Clearing the reserve flag */
  613. tbl_entry_throttle_client->reserve = false;
  614. break;
  615. }
  616. return rc;
  617. }
  618. static void mmrm_sw_dump_enabled_client_info(struct mmrm_sw_clk_mgr_info *sinfo)
  619. {
  620. u32 c;
  621. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  622. struct mmrm_sw_clk_client_tbl_entry *tbl_entry = NULL;
  623. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  624. tbl_entry = &sinfo->clk_client_tbl[c];
  625. if (tbl_entry->clk_rate) {
  626. d_mpr_e("%s: csid(0x%x) clk_rate(%zu) vdd_level(%zu) cur_ma(%zu) num_hw_blocks(%zu)\n",
  627. __func__,
  628. tbl_entry->clk_src_id,
  629. tbl_entry->clk_rate,
  630. tbl_entry->vdd_level,
  631. tbl_entry->current_ma[tbl_entry->vdd_level]
  632. [peak_data->aggreg_level] * tbl_entry->num_hw_blocks,
  633. tbl_entry->num_hw_blocks);
  634. }
  635. }
  636. if (peak_data) {
  637. d_mpr_e("%s: aggreg_val(%zu) aggreg_level(%zu)\n", __func__,
  638. peak_data->aggreg_val, peak_data->aggreg_level);
  639. }
  640. }
  641. static int mmrm_reinstate_throttled_client(struct mmrm_sw_clk_mgr_info *sinfo)
  642. {
  643. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  644. struct mmrm_sw_throttled_clients_data *iter, *safe_iter = NULL;
  645. struct mmrm_client_notifier_data notifier_data;
  646. struct mmrm_sw_clk_client_tbl_entry *re_entry_throttle_client;
  647. int rc = 0;
  648. u64 start_ts = 0, end_ts = 0;
  649. list_for_each_entry_safe(iter, safe_iter, &sinfo->throttled_clients, list) {
  650. if (!IS_ERR_OR_NULL(iter) && peak_data->aggreg_val +
  651. iter->delta_cu_ma <= peak_data->threshold) {
  652. d_mpr_h("%s: table_id = %d\n", __func__, iter->table_id);
  653. re_entry_throttle_client =
  654. &sinfo->clk_client_tbl
  655. [sinfo->throttle_clients_info
  656. [iter->table_id].tbl_entry_id];
  657. if (!IS_ERR_OR_NULL(re_entry_throttle_client)) {
  658. d_mpr_h("%s:found throttled client name(%s) clsid (0x%x)\n",
  659. __func__, re_entry_throttle_client->name,
  660. re_entry_throttle_client->clk_src_id);
  661. notifier_data.cb_type = MMRM_CLIENT_RESOURCE_VALUE_CHANGE;
  662. notifier_data.cb_data.val_chng.old_val =
  663. re_entry_throttle_client->freq[MMRM_VDD_LEVEL_LOW_SVS];
  664. notifier_data.cb_data.val_chng.new_val =
  665. re_entry_throttle_client->freq[iter->prev_vdd_level];
  666. notifier_data.pvt_data = re_entry_throttle_client->pvt_data;
  667. start_ts = ktime_get_ns();
  668. if (re_entry_throttle_client->notifier_cb_fn) {
  669. rc = re_entry_throttle_client->notifier_cb_fn
  670. (&notifier_data);
  671. end_ts = ktime_get_ns();
  672. d_mpr_h("%s: Client notifier cbk processing time(%llu)ns\n",
  673. __func__, end_ts - start_ts);
  674. if (rc) {
  675. d_mpr_e("%s: Client notifier callback failed(%d)\n",
  676. __func__,
  677. re_entry_throttle_client->clk_src_id);
  678. }
  679. if ((end_ts - start_ts) > NOTIFY_TIMEOUT)
  680. d_mpr_e("%s: Client notifier took %llu ns\n",
  681. __func__, (end_ts - start_ts));
  682. }
  683. list_del(&iter->list);
  684. kfree(iter);
  685. }
  686. }
  687. }
  688. return 0;
  689. }
  690. static int mmrm_sw_check_peak_current(struct mmrm_sw_clk_mgr_info *sinfo,
  691. struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
  692. u32 req_level, u32 clk_val, u32 num_hw_blocks)
  693. {
  694. int rc = 0;
  695. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  696. u32 adj_level = req_level;
  697. u32 peak_cur = peak_data->aggreg_val;
  698. u32 old_cur = 0, new_cur = 0;
  699. int delta_cur = 0;
  700. /* check the req level and adjust according to tbl entries */
  701. rc = mmrm_sw_check_req_level(sinfo, tbl_entry->clk_src_id, req_level, &adj_level);
  702. if (rc) {
  703. goto err_invalid_level;
  704. }
  705. /* calculate new cur val as per adj_val */
  706. if (clk_val)
  707. new_cur = tbl_entry->current_ma[req_level][adj_level] * num_hw_blocks;
  708. /* calculate old cur */
  709. if (tbl_entry->clk_rate) {
  710. //old_cur = tbl_entry->current_ma[tbl_entry->vdd_level][adj_level];
  711. old_cur = tbl_entry->current_ma[tbl_entry->vdd_level]
  712. [peak_data->aggreg_level] * tbl_entry->num_hw_blocks;
  713. }
  714. /* 1. adj_level increase: recalculated peak_cur other clients + new_cur
  715. * 2. adj_level decrease: recalculated peak_cur other clients + new_cur
  716. * 3. clk_val increase: aggreg_val + (new_cur - old_cur)
  717. * 4. clk_val decrease: aggreg_val + (new_cur - old_cur)
  718. * 5. clk_val 0: aggreg_val - old_cur
  719. */
  720. /* recalculate aggregated current with adj level */
  721. if (adj_level != peak_data->aggreg_level) {
  722. rc = mmrm_sw_calculate_total_current(sinfo, adj_level, &peak_cur, tbl_entry);
  723. if (rc) {
  724. goto err_invalid_level;
  725. }
  726. peak_cur += new_cur;
  727. } else {
  728. delta_cur = (signed int)new_cur - old_cur;
  729. }
  730. d_mpr_h("%s: csid (0x%x) peak_cur(%zu) new_cur(%zu) old_cur(%zu) delta_cur(%d)\n",
  731. __func__, tbl_entry->clk_src_id, peak_cur, new_cur, old_cur, delta_cur);
  732. /* negative value, update peak data */
  733. if ((signed)peak_cur + delta_cur <= 0) {
  734. peak_data->aggreg_val = 0;
  735. peak_data->aggreg_level = adj_level;
  736. goto exit_no_err;
  737. }
  738. /* peak overshoot, do not update peak data */
  739. if ((signed)peak_cur + delta_cur >= peak_data->threshold) {
  740. /* Find low prority client and throttle it*/
  741. if ((tbl_entry->pri == MMRM_CLIENT_PRIOR_HIGH)
  742. && (msm_mmrm_enable_throttle_feature > 0)) {
  743. rc = mmrm_sw_throttle_low_priority_client(sinfo, &delta_cur);
  744. if (rc != 0) {
  745. d_mpr_e("%s: Failed to throttle the low priority client\n",
  746. __func__);
  747. mmrm_sw_dump_enabled_client_info(sinfo);
  748. goto err_peak_overshoot;
  749. }
  750. } else {
  751. d_mpr_e("%s: Client csid(0x%x) name(%s) can't request throtlling\n",
  752. __func__, tbl_entry->clk_src_id, tbl_entry->name);
  753. mmrm_sw_dump_enabled_client_info(sinfo);
  754. rc = -EINVAL;
  755. goto err_peak_overshoot;
  756. }
  757. }
  758. /* update peak data */
  759. peak_data->aggreg_val = peak_cur + delta_cur;
  760. peak_data->aggreg_level = adj_level;
  761. mmrm_reinstate_throttled_client(sinfo);
  762. exit_no_err:
  763. d_mpr_h("%s: aggreg_val(%lu) aggreg_level(%lu)\n",
  764. __func__,
  765. peak_data->aggreg_val,
  766. peak_data->aggreg_level);
  767. return rc;
  768. err_invalid_level:
  769. err_peak_overshoot:
  770. return rc;
  771. }
  772. static int mmrm_sw_clk_client_setval(struct mmrm_clk_mgr *sw_clk_mgr,
  773. struct mmrm_client *client,
  774. struct mmrm_client_data *client_data,
  775. unsigned long clk_val)
  776. {
  777. int rc = 0;
  778. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  779. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  780. bool req_reserve;
  781. u32 req_level;
  782. unsigned long crm_max_rate = 0;
  783. int max_rate_idx = 0;
  784. /* validate input params */
  785. if (!client) {
  786. d_mpr_e("%s: invalid client\n", __func__);
  787. rc = -EINVAL;
  788. goto err_invalid_client;
  789. }
  790. if (client->client_uid >= sinfo->tot_clk_clients) {
  791. d_mpr_e("%s: invalid client uid (%d)\n",
  792. __func__, client->client_uid);
  793. rc = -EINVAL;
  794. goto err_invalid_client;
  795. }
  796. if (!client_data) {
  797. d_mpr_e("%s: invalid client data\n", __func__);
  798. rc = -EINVAL;
  799. goto err_invalid_client_data;
  800. }
  801. /* get table entry */
  802. tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
  803. if (IS_ERR_OR_NULL(tbl_entry->clk)) {
  804. d_mpr_e("%s: clk src not registered\n", __func__);
  805. rc = -EINVAL;
  806. goto err_invalid_client;
  807. }
  808. d_mpr_h("%s: csid(0x%x) clk rate %llu\n",
  809. __func__, tbl_entry->clk_src_id, clk_val);
  810. if (tbl_entry->is_crm_client) {
  811. if (client_data->crm_drv_idx >= tbl_entry->hw_drv_instances ||
  812. client_data->pwr_st >= tbl_entry->num_pwr_states) {
  813. d_mpr_e("%s: invalid CRM data\n", __func__);
  814. rc = -EINVAL;
  815. goto err_invalid_client_data;
  816. }
  817. crm_max_rate = mmrm_sw_get_max_crm_rate(tbl_entry, client_data,
  818. clk_val, &max_rate_idx);
  819. }
  820. /*
  821. * Check if the requested clk rate is the same as the current clk rate.
  822. * When clk rates are the same, compare this with the current state.
  823. * Skip when duplicate calculations will be made.
  824. * CRM Clients: Always set the rate
  825. * --- current ---- requested --- action ---
  826. * a. reserve && req_reserve: skip
  827. * b. !reserve && !req_reserve: skip
  828. * c. !reserve && req_reserve: skip
  829. * d. reserve && !req_reserve: set clk rate
  830. */
  831. req_reserve = client_data->flags & MMRM_CLIENT_DATA_FLAG_RESERVE_ONLY;
  832. if (tbl_entry->clk_rate == clk_val &&
  833. tbl_entry->num_hw_blocks == client_data->num_hw_blocks &&
  834. tbl_entry->is_crm_client == false) {
  835. d_mpr_h("%s: csid(0x%x) same as previous clk rate %llu\n",
  836. __func__, tbl_entry->clk_src_id, clk_val);
  837. /* a & b */
  838. if (tbl_entry->reserve == req_reserve)
  839. goto exit_no_err;
  840. /* c & d */
  841. mutex_lock(&sw_clk_mgr->lock);
  842. tbl_entry->reserve = req_reserve;
  843. mutex_unlock(&sw_clk_mgr->lock);
  844. /* skip or set clk rate */
  845. if (req_reserve)
  846. goto exit_no_err;
  847. else
  848. goto set_clk_rate;
  849. }
  850. /* get corresponding level */
  851. if (clk_val) {
  852. if (!tbl_entry->is_crm_client)
  853. rc = mmrm_sw_get_req_level(tbl_entry, clk_val, &req_level);
  854. else
  855. rc = mmrm_sw_get_req_level(tbl_entry, crm_max_rate, &req_level);
  856. if (rc || req_level >= MMRM_VDD_LEVEL_MAX) {
  857. d_mpr_e("%s: csid(0x%x) unable to get level for clk rate %llu crm_max_rate %llu\n",
  858. __func__, tbl_entry->clk_src_id, clk_val, crm_max_rate);
  859. rc = -EINVAL;
  860. goto err_invalid_clk_val;
  861. }
  862. if (!((client_data->num_hw_blocks >= 1) &&
  863. (client_data->num_hw_blocks <= tbl_entry->max_num_hw_blocks))) {
  864. d_mpr_e("%s: csid(0x%x) num_hw_block:%d\n",
  865. __func__, tbl_entry->clk_src_id, client_data->num_hw_blocks);
  866. rc = -EINVAL;
  867. goto err_invalid_client_data;
  868. }
  869. } else {
  870. req_level = 0;
  871. }
  872. mutex_lock(&sw_clk_mgr->lock);
  873. /* check and update for peak current */
  874. if (!tbl_entry->is_crm_client) {
  875. rc = mmrm_sw_check_peak_current(sinfo, tbl_entry,
  876. req_level, clk_val, client_data->num_hw_blocks);
  877. } else {
  878. rc = mmrm_sw_check_peak_current(sinfo, tbl_entry,
  879. req_level, crm_max_rate, client_data->num_hw_blocks);
  880. }
  881. if (rc) {
  882. d_mpr_e("%s: csid (0x%x) peak overshoot peak_cur(%lu)\n",
  883. __func__, tbl_entry->clk_src_id,
  884. sinfo->peak_cur_data.aggreg_val);
  885. mutex_unlock(&sw_clk_mgr->lock);
  886. goto err_peak_overshoot;
  887. }
  888. /* update table entry */
  889. if (!tbl_entry->is_crm_client) {
  890. tbl_entry->clk_rate = clk_val;
  891. } else {
  892. tbl_entry->max_rate_idx = max_rate_idx;
  893. tbl_entry->clk_rate = crm_max_rate;
  894. if (client_data->drv_type == MMRM_CRM_SW_DRV)
  895. tbl_entry->crm_client_tbl[tbl_entry->crm_client_tbl_size - 1] = clk_val;
  896. else
  897. tbl_entry->crm_client_tbl[tbl_entry->num_pwr_states *
  898. client_data->crm_drv_idx +
  899. client_data->pwr_st] = clk_val;
  900. mmrm_sw_print_crm_table(tbl_entry);
  901. }
  902. tbl_entry->vdd_level = req_level;
  903. tbl_entry->reserve = req_reserve;
  904. tbl_entry->num_hw_blocks = client_data->num_hw_blocks;
  905. mutex_unlock(&sw_clk_mgr->lock);
  906. /* check reserve only flag (skip set clock rate) */
  907. if (req_reserve && !tbl_entry->is_crm_client) {
  908. d_mpr_h("%s: csid(0x%x) skip setting clk rate\n",
  909. __func__, tbl_entry->clk_src_id);
  910. rc = 0;
  911. goto exit_no_err;
  912. }
  913. set_clk_rate:
  914. if (!tbl_entry->is_crm_client || client_data->drv_type == MMRM_CRM_SW_DRV) {
  915. d_mpr_h("%s: csid(0x%x) setting clk rate %llu\n",
  916. __func__, tbl_entry->clk_src_id, clk_val);
  917. rc = clk_set_rate(tbl_entry->clk, clk_val);
  918. if (rc) {
  919. d_mpr_e("%s: csid(0x%x) failed to set clk rate %llu\n",
  920. __func__, tbl_entry->clk_src_id, clk_val);
  921. rc = -EINVAL;
  922. /* TBD: incase of failure clk_rate is invalid */
  923. goto err_clk_set_fail;
  924. }
  925. } else {
  926. d_mpr_h(
  927. "%s: csid(0x%x) setting clk rate %llu drv_type %u, crm_drv_idx %u, pwr_st %u\n",
  928. __func__, tbl_entry->clk_src_id, clk_val,
  929. CRM_HW_DRV, client_data->crm_drv_idx,
  930. client_data->pwr_st);
  931. rc = qcom_clk_crm_set_rate(tbl_entry->clk, CRM_HW_DRV,
  932. client_data->crm_drv_idx,
  933. client_data->pwr_st, clk_val);
  934. if (rc) {
  935. d_mpr_e("%s: csid(0x%x) failed to set clk rate %llu\n",
  936. __func__, tbl_entry->clk_src_id, clk_val);
  937. rc = -EINVAL;
  938. /* TBD: incase of failure clk_rate is invalid */
  939. goto err_clk_set_fail;
  940. }
  941. }
  942. exit_no_err:
  943. d_mpr_h("%s: clk rate %lu set successfully for %s\n",
  944. __func__, clk_val, tbl_entry->name);
  945. return rc;
  946. err_invalid_client:
  947. err_invalid_client_data:
  948. err_invalid_clk_val:
  949. err_peak_overshoot:
  950. err_clk_set_fail:
  951. d_mpr_e("%s: error = %d\n", __func__, rc);
  952. return rc;
  953. }
  954. static int mmrm_sw_clk_client_setval_inrange(struct mmrm_clk_mgr *sw_clk_mgr,
  955. struct mmrm_client *client,
  956. struct mmrm_client_data *client_data,
  957. struct mmrm_client_res_value *val)
  958. {
  959. /* TBD: add support for set val in range */
  960. return mmrm_sw_clk_client_setval(sw_clk_mgr, client, client_data,
  961. val->cur);
  962. }
  963. static int mmrm_sw_clk_client_getval(struct mmrm_clk_mgr *sw_clk_mgr,
  964. struct mmrm_client *client,
  965. struct mmrm_client_res_value *val)
  966. {
  967. int rc = 0;
  968. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  969. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  970. /* validate input params */
  971. if (!client) {
  972. d_mpr_e("%s: invalid client\n", __func__);
  973. rc = -EINVAL;
  974. goto err_invalid_client;
  975. }
  976. if (client->client_uid >= sinfo->tot_clk_clients) {
  977. d_mpr_e("%s: invalid client uid (%d)\n",
  978. __func__, client->client_uid);
  979. rc = -EINVAL;
  980. goto err_invalid_client;
  981. }
  982. tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
  983. if (!tbl_entry->clk) {
  984. d_mpr_e("%s: clk src not registered\n", __func__);
  985. rc = -EINVAL;
  986. goto err_invalid_client;
  987. }
  988. /* return previously configured value */
  989. /* TBD: Identify the min & max values */
  990. val->min = tbl_entry->clk_rate;
  991. val->cur = tbl_entry->clk_rate;
  992. val->max = tbl_entry->clk_rate;
  993. return rc;
  994. err_invalid_client:
  995. d_mpr_e("%s: error = %d\n", __func__, rc);
  996. return rc;
  997. }
  998. static int mmrm_sw_clk_print_enabled_client_info(struct mmrm_clk_mgr *sw_clk_mgr,
  999. char *buf,
  1000. int sz)
  1001. {
  1002. u32 c, len;
  1003. u32 left_spaces = (u32)sz;
  1004. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  1005. struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
  1006. struct mmrm_sw_clk_client_tbl_entry *tbl_entry = NULL;
  1007. len = scnprintf(buf, left_spaces, " csid clk_rate vdd_level cur_ma num_hw_blocks\n");
  1008. left_spaces -= len;
  1009. buf += len;
  1010. if (sinfo != NULL && peak_data != NULL) {
  1011. for (c = 0; (c < sinfo->tot_clk_clients) && (left_spaces > 1); c++) {
  1012. tbl_entry = &sinfo->clk_client_tbl[c];
  1013. if ((tbl_entry != NULL) && (tbl_entry->clk_rate)) {
  1014. len = scnprintf(buf, left_spaces, "0x%x %zu %zu %zu %zu\n",
  1015. tbl_entry->clk_src_id,
  1016. tbl_entry->clk_rate,
  1017. tbl_entry->vdd_level,
  1018. tbl_entry->current_ma[tbl_entry->vdd_level]
  1019. [peak_data->aggreg_level] * tbl_entry->num_hw_blocks,
  1020. tbl_entry->num_hw_blocks);
  1021. left_spaces -= len;
  1022. buf += len;
  1023. }
  1024. }
  1025. if (left_spaces > 1) {
  1026. len = scnprintf(buf, left_spaces, "aggreg_val(%zu) aggreg_level(%zu)\n",
  1027. peak_data->aggreg_val, peak_data->aggreg_level);
  1028. left_spaces -= len;
  1029. }
  1030. }
  1031. return (sz - left_spaces);
  1032. }
  1033. static struct mmrm_clk_mgr_client_ops clk_client_swops = {
  1034. .clk_client_reg = mmrm_sw_clk_client_register,
  1035. .clk_client_dereg = mmrm_sw_clk_client_deregister,
  1036. .clk_client_setval = mmrm_sw_clk_client_setval,
  1037. .clk_client_setval_inrange = mmrm_sw_clk_client_setval_inrange,
  1038. .clk_client_getval = mmrm_sw_clk_client_getval,
  1039. .clk_print_enabled_client_info = mmrm_sw_clk_print_enabled_client_info,
  1040. };
  1041. static int mmrm_sw_prepare_table(struct mmrm_clk_platform_resources *cres,
  1042. struct mmrm_sw_clk_mgr_info *sinfo)
  1043. {
  1044. int rc = 0;
  1045. u32 c;
  1046. struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
  1047. struct nom_clk_src_info *nom_tbl_entry;
  1048. /* read all resource entries */
  1049. for (c = 0; c < sinfo->tot_clk_clients; c++) {
  1050. tbl_entry = &sinfo->clk_client_tbl[c];
  1051. nom_tbl_entry = &cres->nom_clk_set.clk_src_tbl[c];
  1052. tbl_entry->clk_src_id = (nom_tbl_entry->domain << 16 |
  1053. nom_tbl_entry->clk_src_id);
  1054. tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM] =
  1055. nom_tbl_entry->nom_dyn_pwr;
  1056. tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM] =
  1057. nom_tbl_entry->nom_leak_pwr;
  1058. tbl_entry->max_num_hw_blocks = nom_tbl_entry->num_hw_block;
  1059. d_mpr_h("%s: updating csid(0x%x) dyn_pwr(%d) leak_pwr(%d) num(%d)\n",
  1060. __func__,
  1061. tbl_entry->clk_src_id,
  1062. tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM],
  1063. tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM],
  1064. tbl_entry->num_hw_blocks);
  1065. }
  1066. return rc;
  1067. }
  1068. int mmrm_init_sw_clk_mgr(void *driver_data)
  1069. {
  1070. int rc = 0, i, j;
  1071. struct mmrm_driver_data *drv_data =
  1072. (struct mmrm_driver_data *)driver_data;
  1073. struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
  1074. struct mmrm_sw_clk_mgr_info *sinfo = NULL;
  1075. struct mmrm_clk_mgr *sw_clk_mgr = NULL;
  1076. u32 tbl_size = 0;
  1077. /* mmrm_sw_clk_mgr */
  1078. sw_clk_mgr = kzalloc(sizeof(*sw_clk_mgr), GFP_KERNEL);
  1079. if (!sw_clk_mgr) {
  1080. d_mpr_e("%s: failed to allocate memory for sw_clk_mgr\n",
  1081. __func__);
  1082. rc = -ENOMEM;
  1083. goto err_fail_sw_clk_mgr;
  1084. }
  1085. /* initialize the tables */
  1086. tbl_size = sizeof(struct mmrm_sw_clk_client_tbl_entry) *
  1087. cres->nom_clk_set.count;
  1088. sinfo = &(sw_clk_mgr->data.sw_info);
  1089. sinfo->driver_data = drv_data;
  1090. sinfo->clk_client_tbl = kzalloc(tbl_size, GFP_KERNEL);
  1091. if (!sinfo->clk_client_tbl) {
  1092. d_mpr_e(
  1093. "%s: failed to allocate memory for clk_client_tbl (%d)\n",
  1094. __func__, cres->nom_clk_set.count);
  1095. rc = -ENOMEM;
  1096. goto err_fail_clk_tbl;
  1097. }
  1098. sinfo->tot_clk_clients = cres->nom_clk_set.count;
  1099. sinfo->enabled_clk_clients = 0;
  1100. INIT_LIST_HEAD(&sinfo->throttled_clients);
  1101. /* prepare table entries */
  1102. rc = mmrm_sw_prepare_table(cres, sinfo);
  1103. if (rc) {
  1104. d_mpr_e("%s: failed to prepare clk table\n", __func__);
  1105. rc = -ENOMEM;
  1106. goto err_fail_prep_tbl;
  1107. }
  1108. /* update the peak current threshold */
  1109. sinfo->peak_cur_data.threshold = cres->peak_threshold;
  1110. sinfo->peak_cur_data.aggreg_val = 0;
  1111. sinfo->peak_cur_data.aggreg_level = 0;
  1112. sinfo->throttle_clients_data_length = cres->throttle_clients_data_length;
  1113. for (i = 0; i < sinfo->throttle_clients_data_length; i++) {
  1114. for (j = 0; j < sinfo->tot_clk_clients; j++) {
  1115. if (sinfo->clk_client_tbl[j].clk_src_id
  1116. == cres->clsid_threshold_clients[i]) {
  1117. sinfo->throttle_clients_info[i].csid_throttle_client
  1118. = cres->clsid_threshold_clients[i];
  1119. sinfo->throttle_clients_info[i].tbl_entry_id = j;
  1120. break;
  1121. }
  1122. }
  1123. }
  1124. /* initialize mutex for sw clk mgr */
  1125. mutex_init(&sw_clk_mgr->lock);
  1126. sw_clk_mgr->scheme = drv_data->clk_res.scheme;
  1127. /* clk client operations */
  1128. sw_clk_mgr->clk_client_ops = &clk_client_swops;
  1129. drv_data->clk_mgr = sw_clk_mgr;
  1130. return rc;
  1131. err_fail_prep_tbl:
  1132. kfree(sinfo->clk_client_tbl);
  1133. err_fail_clk_tbl:
  1134. kfree(sw_clk_mgr);
  1135. drv_data->clk_mgr = NULL;
  1136. err_fail_sw_clk_mgr:
  1137. d_mpr_e("%s: error = %d\n", __func__, rc);
  1138. return rc;
  1139. }
  1140. int mmrm_destroy_sw_clk_mgr(struct mmrm_clk_mgr *sw_clk_mgr)
  1141. {
  1142. int rc = 0;
  1143. struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
  1144. struct mmrm_sw_throttled_clients_data *iter, *safe_iter = NULL;
  1145. list_for_each_entry_safe(iter, safe_iter, &sinfo->throttled_clients, list) {
  1146. list_del(&iter->list);
  1147. kfree(iter);
  1148. }
  1149. if (!sw_clk_mgr) {
  1150. d_mpr_e("%s: sw_clk_mgr null\n", __func__);
  1151. return -EINVAL;
  1152. }
  1153. kfree(sw_clk_mgr->data.sw_info.clk_client_tbl);
  1154. mutex_destroy(&sw_clk_mgr->lock);
  1155. kfree(sw_clk_mgr);
  1156. return rc;
  1157. }