cvp_power.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /* SPDX-License-Identifier: GPL-2.0-only
  2. *
  3. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include "msm_cvp.h"
  7. #include "cvp_power.h"
  8. static inline int find_max(unsigned long *array, unsigned int num)
  9. {
  10. int i, max = 0;
  11. for (i = 0; i < num; i++)
  12. max = array[i] > max ? array[i] : max;
  13. return max;
  14. }
  15. static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
  16. {
  17. return (inst->prop.cycles[HFI_HW_OD] ||
  18. inst->prop.cycles[HFI_HW_MPU] ||
  19. inst->prop.cycles[HFI_HW_FDU] ||
  20. inst->prop.cycles[HFI_HW_ICA] ||
  21. inst->prop.cycles[HFI_HW_VADL] ||
  22. inst->prop.cycles[HFI_HW_TOF] ||
  23. inst->prop.cycles[HFI_HW_RGE] ||
  24. inst->prop.cycles[HFI_HW_XRA] ||
  25. inst->prop.cycles[HFI_HW_LSR]);
  26. }
  27. static char hw_names[HFI_MAX_HW_THREADS][8] = {{"FDU"}, {"MPU"}, {"OD"}, {"ICA"},
  28. {"VADL"}, {"TOF"}, {"RGE"}, {"XRA"},
  29. {"LSR"}};
  30. static void aggregate_power_update(struct msm_cvp_core *core,
  31. struct cvp_power_level *nrt_pwr,
  32. struct cvp_power_level *rt_pwr,
  33. unsigned int max_clk_rate)
  34. {
  35. struct msm_cvp_inst *inst;
  36. int i, j;
  37. unsigned long blocks_sum[2][HFI_MAX_HW_THREADS] = {0};
  38. unsigned long fw_sum[2] = {0}, max_cycle[2] = {0}, op_max_cycle[2] = {0};
  39. unsigned long op_blocks_max[2][HFI_MAX_HW_THREADS] = {0};
  40. unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
  41. list_for_each_entry(inst, &core->instances, list) {
  42. if (inst->state == MSM_CVP_CORE_INVALID ||
  43. inst->state == MSM_CVP_CORE_UNINIT ||
  44. !is_subblock_profile_existed(inst))
  45. continue;
  46. if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
  47. /* Non-realtime session use index 0 */
  48. i = 0;
  49. } else {
  50. i = 1;
  51. }
  52. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  53. if (inst->prop.cycles[j])
  54. dprintk(CVP_PWR, "pwrUpdate %s %u\n",
  55. hw_names[j], inst->prop.cycles[j]);
  56. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  57. if (inst->prop.op_cycles[j])
  58. dprintk(CVP_PWR, "pwrUpdate_OP %s %u\n",
  59. hw_names[j], inst->prop.op_cycles[j]);
  60. dprintk(CVP_PWR, " fw %u fw_o %u\n", inst->prop.fw_cycles,
  61. inst->prop.fw_op_cycles);
  62. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  63. blocks_sum[i][j] += inst->prop.cycles[j];
  64. fw_sum[i] += inst->prop.fw_cycles;
  65. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  66. op_blocks_max[i][j] =
  67. (op_blocks_max[i][j] >= inst->prop.op_cycles[j]) ?
  68. op_blocks_max[i][j] : inst->prop.op_cycles[j];
  69. op_fw_max[i] =
  70. (op_fw_max[i] >= inst->prop.fw_op_cycles) ?
  71. op_fw_max[i] : inst->prop.fw_op_cycles;
  72. bw_sum[i] += inst->prop.ddr_bw;
  73. op_bw_max[i] =
  74. (op_bw_max[i] >= inst->prop.ddr_op_bw) ?
  75. op_bw_max[i] : inst->prop.ddr_op_bw;
  76. for (j = 0; j < HFI_MAX_HW_THREADS; j++) {
  77. if (inst->prop.fps[j])
  78. dprintk(CVP_PWR, "fps %s %d ", hw_names[j],
  79. inst->prop.fps[j]);
  80. }
  81. }
  82. for (i = 0; i < 2; i++) {
  83. max_cycle[i] = find_max(&blocks_sum[i][0], HFI_MAX_HW_THREADS);
  84. op_max_cycle[i] = find_max(&op_blocks_max[i][0], HFI_MAX_HW_THREADS);
  85. op_max_cycle[i] =
  86. (op_max_cycle[i] > max_clk_rate) ?
  87. max_clk_rate : op_max_cycle[i];
  88. bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
  89. bw_sum[i] : op_bw_max[i];
  90. }
  91. nrt_pwr->core_sum += max_cycle[0];
  92. nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_max_cycle[0]) ?
  93. nrt_pwr->op_core_sum : op_max_cycle[0];
  94. nrt_pwr->bw_sum += bw_sum[0];
  95. rt_pwr->core_sum += max_cycle[1];
  96. rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_max_cycle[1]) ?
  97. rt_pwr->op_core_sum : op_max_cycle[1];
  98. rt_pwr->bw_sum += bw_sum[1];
  99. }
  100. /**
  101. * adjust_bw_freqs(): calculate CVP clock freq and bw required to sustain
  102. * required use case.
  103. * Bandwidth vote will be best-effort, not returning error if the request
  104. * b/w exceeds max limit.
  105. * Clock vote from non-realtime sessions will be best effort, not returning
  106. * error if the aggreated session clock request exceeds max limit.
  107. * Clock vote from realtime session will be hard request. If aggregated
  108. * session clock request exceeds max limit, the function will return
  109. * error.
  110. *
  111. * Ensure caller acquires clk_lock!
  112. */
  113. static int adjust_bw_freqs(unsigned int max_bw, unsigned int min_bw)
  114. {
  115. struct msm_cvp_core *core;
  116. struct iris_hfi_device *hdev;
  117. struct allowed_clock_rates_table *tbl = NULL;
  118. unsigned int tbl_size;
  119. unsigned int cvp_min_rate, cvp_max_rate;
  120. struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
  121. unsigned long tmp, core_sum, op_core_sum, bw_sum;
  122. int i;
  123. core = cvp_driver->cvp_core;
  124. hdev = core->dev_ops->hfi_device_data;
  125. tbl = core->resources.allowed_clks_tbl;
  126. tbl_size = core->resources.allowed_clks_tbl_size;
  127. cvp_min_rate = tbl[0].clock_rate;
  128. cvp_max_rate = tbl[tbl_size - 1].clock_rate;
  129. aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
  130. dprintk(CVP_PWR, "PwrUpdate nrt %u %u rt %u %u\n",
  131. nrt_pwr.core_sum, nrt_pwr.op_core_sum,
  132. rt_pwr.core_sum, rt_pwr.op_core_sum);
  133. if (rt_pwr.core_sum > cvp_max_rate) {
  134. dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
  135. __func__, rt_pwr.core_sum);
  136. return -ENOTSUPP;
  137. }
  138. core_sum = rt_pwr.core_sum + nrt_pwr.core_sum;
  139. op_core_sum = (rt_pwr.op_core_sum >= nrt_pwr.op_core_sum) ?
  140. rt_pwr.op_core_sum : nrt_pwr.op_core_sum;
  141. core_sum = (core_sum >= op_core_sum) ?
  142. core_sum : op_core_sum;
  143. if (core_sum > cvp_max_rate) {
  144. core_sum = cvp_max_rate;
  145. } else if (core_sum <= cvp_min_rate) {
  146. core_sum = cvp_min_rate;
  147. } else {
  148. for (i = 1; i < tbl_size; i++)
  149. if (core_sum <= tbl[i].clock_rate)
  150. break;
  151. core_sum = tbl[i].clock_rate;
  152. }
  153. bw_sum = rt_pwr.bw_sum + nrt_pwr.bw_sum;
  154. bw_sum = bw_sum >> 10;
  155. bw_sum = (bw_sum > max_bw) ? max_bw : bw_sum;
  156. bw_sum = (bw_sum < min_bw) ? min_bw : bw_sum;
  157. dprintk(CVP_PWR, "%s %lld %lld\n", __func__,
  158. core_sum, bw_sum);
  159. tmp = core->curr_freq;
  160. core->curr_freq = core_sum;
  161. core->orig_core_sum = tmp;
  162. hdev->clk_freq = core->curr_freq;
  163. core->bw_sum = bw_sum;
  164. return 0;
  165. }
  166. int msm_cvp_update_power(struct msm_cvp_inst *inst)
  167. {
  168. int rc = 0;
  169. struct msm_cvp_core *core;
  170. struct msm_cvp_inst *s;
  171. struct bus_info *bus = NULL;
  172. struct clock_set *clocks;
  173. struct clock_info *cl;
  174. int bus_count = 0;
  175. unsigned int max_bw = 0, min_bw = 0;
  176. if (!inst) {
  177. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  178. return -EINVAL;
  179. }
  180. s = cvp_get_inst_validate(inst->core, inst);
  181. if (!s)
  182. return -ECONNRESET;
  183. core = inst->core;
  184. if (!core || core->state == CVP_CORE_UNINIT) {
  185. rc = -ECONNRESET;
  186. goto adjust_exit;
  187. }
  188. clocks = &core->resources.clock_set;
  189. cl = &clocks->clock_tbl[clocks->count - 1];
  190. if (!cl->has_scaling) {
  191. dprintk(CVP_ERR, "Cannot scale CVP clock\n");
  192. rc = -EINVAL;
  193. goto adjust_exit;
  194. }
  195. for (bus_count = 0; bus_count < core->resources.bus_set.count; bus_count++) {
  196. if (!strcmp(core->resources.bus_set.bus_tbl[bus_count].name, "cvp-ddr")) {
  197. bus = &core->resources.bus_set.bus_tbl[bus_count];
  198. max_bw = bus->range[1];
  199. min_bw = max_bw/10;
  200. }
  201. }
  202. if (!bus) {
  203. dprintk(CVP_ERR, "bus node is NULL for cvp-ddr\n");
  204. rc = -EINVAL;
  205. goto adjust_exit;
  206. }
  207. mutex_lock(&core->clk_lock);
  208. rc = adjust_bw_freqs(max_bw, min_bw);
  209. mutex_unlock(&core->clk_lock);
  210. if (rc)
  211. goto adjust_exit;
  212. rc = msm_cvp_set_clocks(core);
  213. if (rc) {
  214. dprintk(CVP_ERR,
  215. "Failed to set clock rate %u %s: %d %s\n",
  216. core->curr_freq, cl->name, rc, __func__);
  217. core->curr_freq = core->orig_core_sum;
  218. goto adjust_exit;
  219. }
  220. rc = msm_cvp_set_bw(core, bus, core->bw_sum);
  221. adjust_exit:
  222. cvp_put_inst(s);
  223. return rc;
  224. }
  225. unsigned int msm_cvp_get_hw_aggregate_cycles(enum hfi_hw_thread hwblk)
  226. {
  227. struct msm_cvp_core *core;
  228. struct msm_cvp_inst *inst;
  229. unsigned long cycles_sum = 0;
  230. core = cvp_driver->cvp_core;
  231. if (!core) {
  232. dprintk(CVP_ERR, "%s: invalid core\n", __func__);
  233. return -EINVAL;
  234. }
  235. mutex_lock(&core->clk_lock);
  236. list_for_each_entry(inst, &core->instances, list) {
  237. if (inst->state == MSM_CVP_CORE_INVALID ||
  238. inst->state == MSM_CVP_CORE_UNINIT ||
  239. !is_subblock_profile_existed(inst))
  240. continue;
  241. switch (hwblk) {
  242. case HFI_HW_FDU:
  243. {
  244. cycles_sum += inst->prop.cycles[HFI_HW_FDU];
  245. break;
  246. }
  247. case HFI_HW_ICA:
  248. {
  249. cycles_sum += inst->prop.cycles[HFI_HW_ICA];
  250. break;
  251. }
  252. case HFI_HW_MPU:
  253. {
  254. cycles_sum += inst->prop.cycles[HFI_HW_MPU];
  255. break;
  256. }
  257. case HFI_HW_OD:
  258. {
  259. cycles_sum += inst->prop.cycles[HFI_HW_OD];
  260. break;
  261. }
  262. case HFI_HW_VADL:
  263. {
  264. cycles_sum += inst->prop.cycles[HFI_HW_VADL];
  265. break;
  266. }
  267. case HFI_HW_TOF:
  268. {
  269. cycles_sum += inst->prop.cycles[HFI_HW_TOF];
  270. break;
  271. }
  272. case HFI_HW_RGE:
  273. {
  274. cycles_sum += inst->prop.cycles[HFI_HW_RGE];
  275. break;
  276. }
  277. case HFI_HW_XRA:
  278. {
  279. cycles_sum += inst->prop.cycles[HFI_HW_XRA];
  280. break;
  281. }
  282. case HFI_HW_LSR:
  283. {
  284. cycles_sum += inst->prop.cycles[HFI_HW_LSR];
  285. break;
  286. }
  287. default:
  288. dprintk(CVP_ERR, "unrecognized hw block %d\n",
  289. hwblk);
  290. break;
  291. }
  292. }
  293. mutex_unlock(&core->clk_lock);
  294. cycles_sum = cycles_sum&0xFFFFFFFF;
  295. return (unsigned int)cycles_sum;
  296. }