cvp_power.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /* SPDX-License-Identifier: GPL-2.0-only
  2. *
  3. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include "msm_cvp.h"
  7. #include "cvp_power.h"
  8. static inline int find_max(unsigned long *array, unsigned int num)
  9. {
  10. int i, max = 0;
  11. for (i = 0; i < num; i++)
  12. max = array[i] > max ? array[i] : max;
  13. return max;
  14. }
  15. static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
  16. {
  17. return (inst->prop.cycles[HFI_HW_OD] ||
  18. inst->prop.cycles[HFI_HW_MPU] ||
  19. inst->prop.cycles[HFI_HW_FDU] ||
  20. inst->prop.cycles[HFI_HW_ICA] ||
  21. inst->prop.cycles[HFI_HW_VADL] ||
  22. inst->prop.cycles[HFI_HW_TOF] ||
  23. inst->prop.cycles[HFI_HW_RGE] ||
  24. inst->prop.cycles[HFI_HW_XRA] ||
  25. inst->prop.cycles[HFI_HW_LSR]);
  26. }
  27. static char hw_names[HFI_MAX_HW_THREADS][8] = {{"FDU"}, {"OD"}, {"MPU"}, {"ICA"},
  28. {"VADL"}, {"TOF"}, {"RGE"}, {"XRA"},
  29. {"LSR"}};
  30. static void aggregate_power_update(struct msm_cvp_core *core,
  31. struct cvp_power_level *nrt_pwr,
  32. struct cvp_power_level *rt_pwr,
  33. unsigned int max_clk_rate)
  34. {
  35. struct msm_cvp_inst *inst;
  36. int i, j;
  37. unsigned long blocks_sum[2][HFI_MAX_HW_THREADS] = {0};
  38. unsigned long fw_sum[2] = {0}, max_cycle[2] = {0}, op_max_cycle[2] = {0};
  39. unsigned long op_blocks_max[2][HFI_MAX_HW_THREADS] = {0};
  40. unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
  41. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  42. core->dyn_clk.sum_fps[j] = 0;
  43. list_for_each_entry(inst, &core->instances, list) {
  44. if (inst->state == MSM_CVP_CORE_INVALID ||
  45. inst->state == MSM_CVP_CORE_UNINIT ||
  46. !is_subblock_profile_existed(inst))
  47. continue;
  48. if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
  49. /* Non-realtime session use index 0 */
  50. i = 0;
  51. } else {
  52. i = 1;
  53. }
  54. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  55. if (inst->prop.cycles[j])
  56. dprintk(CVP_PWR, "pwrUpdate %s %u\n",
  57. hw_names[j], inst->prop.cycles[j]);
  58. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  59. if (inst->prop.op_cycles[j])
  60. dprintk(CVP_PWR, "pwrUpdate_OP %s %u\n",
  61. hw_names[j], inst->prop.op_cycles[j]);
  62. dprintk(CVP_PWR, " fw %u fw_o %u\n", inst->prop.fw_cycles,
  63. inst->prop.fw_op_cycles);
  64. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  65. blocks_sum[i][j] += inst->prop.cycles[j];
  66. fw_sum[i] += inst->prop.fw_cycles;
  67. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  68. op_blocks_max[i][j] =
  69. (op_blocks_max[i][j] >= inst->prop.op_cycles[j]) ?
  70. op_blocks_max[i][j] : inst->prop.op_cycles[j];
  71. op_fw_max[i] =
  72. (op_fw_max[i] >= inst->prop.fw_op_cycles) ?
  73. op_fw_max[i] : inst->prop.fw_op_cycles;
  74. bw_sum[i] += inst->prop.ddr_bw;
  75. op_bw_max[i] =
  76. (op_bw_max[i] >= inst->prop.ddr_op_bw) ?
  77. op_bw_max[i] : inst->prop.ddr_op_bw;
  78. for (j = 0; j < HFI_MAX_HW_THREADS; j++) {
  79. if (inst->prop.fps[j])
  80. dprintk(CVP_PWR, "fps %s %d ", hw_names[j],
  81. inst->prop.fps[j]);
  82. core->dyn_clk.sum_fps[j] += inst->prop.fps[j];
  83. }
  84. for (j = 0; j < HFI_MAX_HW_THREADS; j++)
  85. if (core->dyn_clk.sum_fps[j])
  86. dprintk(CVP_PWR, "sum_fps %s %d ", hw_names[j],
  87. core->dyn_clk.sum_fps[j]);
  88. }
  89. for (i = 0; i < 2; i++) {
  90. max_cycle[i] = find_max(&blocks_sum[i][0], HFI_MAX_HW_THREADS);
  91. op_max_cycle[i] = find_max(&op_blocks_max[i][0], HFI_MAX_HW_THREADS);
  92. op_max_cycle[i] =
  93. (op_max_cycle[i] > max_clk_rate) ?
  94. max_clk_rate : op_max_cycle[i];
  95. bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
  96. bw_sum[i] : op_bw_max[i];
  97. }
  98. nrt_pwr->core_sum += max_cycle[0];
  99. nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_max_cycle[0]) ?
  100. nrt_pwr->op_core_sum : op_max_cycle[0];
  101. nrt_pwr->bw_sum += bw_sum[0];
  102. rt_pwr->core_sum += max_cycle[1];
  103. rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_max_cycle[1]) ?
  104. rt_pwr->op_core_sum : op_max_cycle[1];
  105. rt_pwr->bw_sum += bw_sum[1];
  106. }
  107. /**
  108. * adjust_bw_freqs(): calculate CVP clock freq and bw required to sustain
  109. * required use case.
  110. * Bandwidth vote will be best-effort, not returning error if the request
  111. * b/w exceeds max limit.
  112. * Clock vote from non-realtime sessions will be best effort, not returning
  113. * error if the aggreated session clock request exceeds max limit.
  114. * Clock vote from realtime session will be hard request. If aggregated
  115. * session clock request exceeds max limit, the function will return
  116. * error.
  117. *
  118. * Ensure caller acquires clk_lock!
  119. */
  120. static int adjust_bw_freqs(void)
  121. {
  122. struct msm_cvp_core *core;
  123. struct iris_hfi_device *hdev;
  124. struct bus_info *bus = NULL;
  125. struct clock_set *clocks;
  126. struct clock_info *cl;
  127. struct allowed_clock_rates_table *tbl = NULL;
  128. unsigned int tbl_size;
  129. unsigned int cvp_min_rate, cvp_max_rate, max_bw = 0, min_bw = 0;
  130. struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
  131. unsigned long tmp, core_sum, op_core_sum, bw_sum;
  132. int i, rc = 0, bus_count = 0;
  133. unsigned long ctrl_freq;
  134. core = cvp_driver->cvp_core;
  135. hdev = core->device->hfi_device_data;
  136. clocks = &core->resources.clock_set;
  137. cl = &clocks->clock_tbl[clocks->count - 1];
  138. tbl = core->resources.allowed_clks_tbl;
  139. tbl_size = core->resources.allowed_clks_tbl_size;
  140. cvp_min_rate = tbl[0].clock_rate;
  141. cvp_max_rate = tbl[tbl_size - 1].clock_rate;
  142. for (bus_count = 0; bus_count < core->resources.bus_set.count; bus_count++) {
  143. if (!strcmp(core->resources.bus_set.bus_tbl[bus_count].name, "cvp-ddr")) {
  144. bus = &core->resources.bus_set.bus_tbl[bus_count];
  145. max_bw = bus->range[1];
  146. min_bw = max_bw/10;
  147. }
  148. }
  149. if (!bus) {
  150. dprintk(CVP_ERR, "bus node is NULL for cvp-ddr\n");
  151. return -EINVAL;
  152. }
  153. aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
  154. dprintk(CVP_PWR, "PwrUpdate nrt %u %u rt %u %u\n",
  155. nrt_pwr.core_sum, nrt_pwr.op_core_sum,
  156. rt_pwr.core_sum, rt_pwr.op_core_sum);
  157. if (rt_pwr.core_sum > cvp_max_rate) {
  158. dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
  159. __func__, rt_pwr.core_sum);
  160. return -ENOTSUPP;
  161. }
  162. core_sum = rt_pwr.core_sum + nrt_pwr.core_sum;
  163. op_core_sum = (rt_pwr.op_core_sum >= nrt_pwr.op_core_sum) ?
  164. rt_pwr.op_core_sum : nrt_pwr.op_core_sum;
  165. core_sum = (core_sum >= op_core_sum) ?
  166. core_sum : op_core_sum;
  167. if (core_sum > cvp_max_rate) {
  168. core_sum = cvp_max_rate;
  169. } else if (core_sum <= cvp_min_rate) {
  170. core_sum = cvp_min_rate;
  171. } else {
  172. for (i = 1; i < tbl_size; i++)
  173. if (core_sum <= tbl[i].clock_rate)
  174. break;
  175. core_sum = tbl[i].clock_rate;
  176. }
  177. bw_sum = rt_pwr.bw_sum + nrt_pwr.bw_sum;
  178. bw_sum = bw_sum >> 10;
  179. bw_sum = (bw_sum > max_bw) ? max_bw : bw_sum;
  180. bw_sum = (bw_sum < min_bw) ? min_bw : bw_sum;
  181. dprintk(CVP_PWR, "%s %lld %lld\n", __func__,
  182. core_sum, bw_sum);
  183. if (!cl->has_scaling) {
  184. dprintk(CVP_ERR, "Cannot scale CVP clock\n");
  185. return -EINVAL;
  186. }
  187. tmp = core->curr_freq;
  188. core->curr_freq = core_sum;
  189. core->orig_core_sum = core_sum;
  190. rc = msm_cvp_set_clocks(core);
  191. if (rc) {
  192. dprintk(CVP_ERR,
  193. "Failed to set clock rate %u %s: %d %s\n",
  194. core_sum, cl->name, rc, __func__);
  195. core->curr_freq = tmp;
  196. return rc;
  197. }
  198. ctrl_freq = (core->curr_freq*3)>>1;
  199. core->dyn_clk.conf_freq = core->curr_freq;
  200. for (i = 0; i < HFI_MAX_HW_THREADS; ++i) {
  201. core->dyn_clk.hi_ctrl_lim[i] = core->dyn_clk.sum_fps[i] ?
  202. ctrl_freq/core->dyn_clk.sum_fps[i] : 0;
  203. core->dyn_clk.lo_ctrl_lim[i] =
  204. core->dyn_clk.hi_ctrl_lim[i];
  205. }
  206. hdev->clk_freq = core->curr_freq;
  207. rc = msm_cvp_set_bw(bus, bw_sum);
  208. return rc;
  209. }
  210. int msm_cvp_update_power(struct msm_cvp_inst *inst)
  211. {
  212. int rc = 0;
  213. struct msm_cvp_core *core;
  214. struct msm_cvp_inst *s;
  215. if (!inst) {
  216. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  217. return -EINVAL;
  218. }
  219. s = cvp_get_inst_validate(inst->core, inst);
  220. if (!s)
  221. return -ECONNRESET;
  222. core = inst->core;
  223. mutex_lock(&core->clk_lock);
  224. rc = adjust_bw_freqs();
  225. mutex_unlock(&core->clk_lock);
  226. cvp_put_inst(s);
  227. return rc;
  228. }
  229. static int cvp_readjust_clock(struct msm_cvp_core *core,
  230. u32 avg_cycles, enum hfi_hw_thread i)
  231. {
  232. int rc = 0;
  233. struct allowed_clock_rates_table *tbl = NULL;
  234. unsigned int tbl_size = 0;
  235. unsigned int cvp_min_rate = 0, cvp_max_rate = 0;
  236. unsigned long tmp = core->curr_freq;
  237. unsigned long lo_freq = 0;
  238. u32 j;
  239. tbl = core->resources.allowed_clks_tbl;
  240. tbl_size = core->resources.allowed_clks_tbl_size;
  241. cvp_min_rate = tbl[0].clock_rate;
  242. cvp_max_rate = tbl[tbl_size - 1].clock_rate;
  243. if (!((avg_cycles > core->dyn_clk.hi_ctrl_lim[i] &&
  244. core->curr_freq != cvp_max_rate) ||
  245. (avg_cycles <= core->dyn_clk.lo_ctrl_lim[i] &&
  246. core->curr_freq != cvp_min_rate))) {
  247. return rc;
  248. }
  249. core->curr_freq = ((avg_cycles * core->dyn_clk.sum_fps[i]) << 1)/3;
  250. dprintk(CVP_PWR,
  251. "%s - cycles tot %u, avg %u. sum_fps %u, cur_freq %u\n",
  252. __func__,
  253. core->dyn_clk.cycle[i].total,
  254. avg_cycles,
  255. core->dyn_clk.sum_fps[i],
  256. core->curr_freq);
  257. if (core->curr_freq > cvp_max_rate) {
  258. core->curr_freq = cvp_max_rate;
  259. lo_freq = (tbl_size > 1) ?
  260. tbl[tbl_size - 2].clock_rate :
  261. cvp_min_rate;
  262. } else if (core->curr_freq <= cvp_min_rate) {
  263. core->curr_freq = cvp_min_rate;
  264. lo_freq = cvp_min_rate;
  265. } else {
  266. for (j = 1; j < tbl_size; j++)
  267. if (core->curr_freq <= tbl[j].clock_rate)
  268. break;
  269. core->curr_freq = tbl[j].clock_rate;
  270. lo_freq = tbl[j-1].clock_rate;
  271. }
  272. if (core->orig_core_sum > core->curr_freq) {
  273. dprintk(CVP_PWR,
  274. "%s - %d - Cancel readjust, core %u, freq %u\n",
  275. __func__, i, core->orig_core_sum, core->curr_freq);
  276. core->curr_freq = tmp;
  277. return rc;
  278. }
  279. dprintk(CVP_PWR,
  280. "%s:%d - %d - Readjust to %u\n",
  281. __func__, __LINE__, i, core->curr_freq);
  282. rc = msm_cvp_set_clocks(core);
  283. if (rc) {
  284. dprintk(CVP_ERR,
  285. "Failed to set clock rate %u: %d %s\n",
  286. core->curr_freq, rc, __func__);
  287. core->curr_freq = tmp;
  288. } else {
  289. lo_freq = (lo_freq < core->dyn_clk.conf_freq) ?
  290. core->dyn_clk.conf_freq : lo_freq;
  291. core->dyn_clk.hi_ctrl_lim[i] = core->dyn_clk.sum_fps[i] ?
  292. ((core->curr_freq*3)>>1)/core->dyn_clk.sum_fps[i] : 0;
  293. core->dyn_clk.lo_ctrl_lim[i] =
  294. core->dyn_clk.sum_fps[i] ?
  295. ((lo_freq*3)>>1)/core->dyn_clk.sum_fps[i] : 0;
  296. dprintk(CVP_PWR,
  297. "%s - Readjust clk to %u. New lim [%d] hi %u lo %u\n",
  298. __func__, core->curr_freq, i,
  299. core->dyn_clk.hi_ctrl_lim[i],
  300. core->dyn_clk.lo_ctrl_lim[i]);
  301. }
  302. return rc;
  303. }
  304. int cvp_check_clock(struct msm_cvp_inst *inst,
  305. struct cvp_hfi_msg_session_hdr_ext *hdr)
  306. {
  307. int rc = 0;
  308. u32 i, j;
  309. u32 hw_cycles[HFI_MAX_HW_THREADS] = {0};
  310. u32 fw_cycles = 0;
  311. struct msm_cvp_core *core = inst->core;
  312. for (i = 0; i < HFI_MAX_HW_ACTIVATIONS_PER_FRAME; ++i)
  313. fw_cycles += hdr->fw_cycles[i];
  314. for (i = 0; i < HFI_MAX_HW_THREADS; ++i)
  315. for (j = 0; j < HFI_MAX_HW_ACTIVATIONS_PER_FRAME; ++j)
  316. hw_cycles[i] += hdr->hw_cycles[i][j];
  317. dprintk(CVP_PWR, "%s - cycles fw %u. FDU %d MPU %d ODU %d ICA %d\n",
  318. __func__, fw_cycles, hw_cycles[0],
  319. hw_cycles[1], hw_cycles[2], hw_cycles[3]);
  320. mutex_lock(&core->clk_lock);
  321. for (i = 0; i < HFI_MAX_HW_THREADS; ++i) {
  322. dprintk(CVP_PWR, "%s - %d: hw_cycles %u, tens_thresh %u\n",
  323. __func__, i, hw_cycles[i],
  324. core->dyn_clk.hi_ctrl_lim[i]);
  325. if (core->dyn_clk.hi_ctrl_lim[i]) {
  326. if (core->dyn_clk.cycle[i].size < CVP_CYCLE_STAT_SIZE)
  327. core->dyn_clk.cycle[i].size++;
  328. else
  329. core->dyn_clk.cycle[i].total -=
  330. core->dyn_clk.cycle[i].busy[
  331. core->dyn_clk.cycle[i].idx];
  332. if (hw_cycles[i]) {
  333. core->dyn_clk.cycle[i].busy[
  334. core->dyn_clk.cycle[i].idx]
  335. = hw_cycles[i] + fw_cycles;
  336. core->dyn_clk.cycle[i].total
  337. += hw_cycles[i] + fw_cycles;
  338. dprintk(CVP_PWR,
  339. "%s: busy (hw + fw) cycles = %u\n",
  340. __func__,
  341. core->dyn_clk.cycle[i].busy[
  342. core->dyn_clk.cycle[i].idx]);
  343. dprintk(CVP_PWR, "total cycles %u\n",
  344. core->dyn_clk.cycle[i].total);
  345. } else {
  346. core->dyn_clk.cycle[i].busy[
  347. core->dyn_clk.cycle[i].idx] =
  348. hdr->busy_cycles;
  349. core->dyn_clk.cycle[i].total +=
  350. hdr->busy_cycles;
  351. dprintk(CVP_PWR,
  352. "%s - busy cycles = %u total %u\n",
  353. __func__,
  354. core->dyn_clk.cycle[i].busy[
  355. core->dyn_clk.cycle[i].idx],
  356. core->dyn_clk.cycle[i].total);
  357. }
  358. core->dyn_clk.cycle[i].idx =
  359. (core->dyn_clk.cycle[i].idx ==
  360. CVP_CYCLE_STAT_SIZE-1) ?
  361. 0 : core->dyn_clk.cycle[i].idx+1;
  362. dprintk(CVP_PWR, "%s - %d: size %u, tens_thresh %u\n",
  363. __func__, i, core->dyn_clk.cycle[i].size,
  364. core->dyn_clk.hi_ctrl_lim[i]);
  365. if (core->dyn_clk.cycle[i].size == CVP_CYCLE_STAT_SIZE
  366. && core->dyn_clk.hi_ctrl_lim[i] != 0) {
  367. u32 avg_cycles =
  368. core->dyn_clk.cycle[i].total>>3;
  369. rc = cvp_readjust_clock(core,
  370. avg_cycles,
  371. i);
  372. }
  373. }
  374. }
  375. mutex_unlock(&core->clk_lock);
  376. return rc;
  377. }
  378. unsigned int msm_cvp_get_hw_aggregate_cycles(enum hfi_hw_thread hwblk)
  379. {
  380. struct msm_cvp_core *core;
  381. struct msm_cvp_inst *inst;
  382. unsigned long cycles_sum = 0;
  383. core = cvp_driver->cvp_core;
  384. if (!core) {
  385. dprintk(CVP_ERR, "%s: invalid core\n", __func__);
  386. return -EINVAL;
  387. }
  388. mutex_lock(&core->clk_lock);
  389. list_for_each_entry(inst, &core->instances, list) {
  390. if (inst->state == MSM_CVP_CORE_INVALID ||
  391. inst->state == MSM_CVP_CORE_UNINIT ||
  392. !is_subblock_profile_existed(inst))
  393. continue;
  394. switch (hwblk) {
  395. case HFI_HW_FDU:
  396. {
  397. cycles_sum += inst->prop.cycles[HFI_HW_FDU];
  398. break;
  399. }
  400. case HFI_HW_ICA:
  401. {
  402. cycles_sum += inst->prop.cycles[HFI_HW_ICA];
  403. break;
  404. }
  405. case HFI_HW_MPU:
  406. {
  407. cycles_sum += inst->prop.cycles[HFI_HW_MPU];
  408. break;
  409. }
  410. case HFI_HW_OD:
  411. {
  412. cycles_sum += inst->prop.cycles[HFI_HW_OD];
  413. break;
  414. }
  415. case HFI_HW_VADL:
  416. {
  417. cycles_sum += inst->prop.cycles[HFI_HW_VADL];
  418. break;
  419. }
  420. case HFI_HW_TOF:
  421. {
  422. cycles_sum += inst->prop.cycles[HFI_HW_TOF];
  423. break;
  424. }
  425. case HFI_HW_RGE:
  426. {
  427. cycles_sum += inst->prop.cycles[HFI_HW_RGE];
  428. break;
  429. }
  430. case HFI_HW_XRA:
  431. {
  432. cycles_sum += inst->prop.cycles[HFI_HW_XRA];
  433. break;
  434. }
  435. case HFI_HW_LSR:
  436. {
  437. cycles_sum += inst->prop.cycles[HFI_HW_LSR];
  438. break;
  439. }
  440. default:
  441. dprintk(CVP_ERR, "unrecognized hw block %d\n",
  442. hwblk);
  443. break;
  444. }
  445. }
  446. mutex_unlock(&core->clk_lock);
  447. cycles_sum = cycles_sum&0xFFFFFFFF;
  448. return (unsigned int)cycles_sum;
  449. }
  450. bool check_clock_required(struct msm_cvp_inst *inst,
  451. struct eva_kmd_hfi_packet *hdr)
  452. {
  453. struct cvp_hfi_msg_session_hdr_ext *ehdr =
  454. (struct cvp_hfi_msg_session_hdr_ext *)hdr;
  455. bool clock_check = false;
  456. if (!msm_cvp_dcvs_disable &&
  457. ehdr->packet_type == HFI_MSG_SESSION_CVP_FD) {
  458. if (ehdr->size == sizeof(struct cvp_hfi_msg_session_hdr_ext)
  459. + sizeof(struct cvp_hfi_buf_type)) {
  460. struct msm_cvp_core *core = inst->core;
  461. dprintk(CVP_PWR, "busy cycle %d, total %d\n",
  462. ehdr->busy_cycles, ehdr->total_cycles);
  463. if (core->dyn_clk.sum_fps[HFI_HW_FDU] ||
  464. core->dyn_clk.sum_fps[HFI_HW_MPU] ||
  465. core->dyn_clk.sum_fps[HFI_HW_OD] ||
  466. core->dyn_clk.sum_fps[HFI_HW_ICA]) {
  467. clock_check = true;
  468. }
  469. } else {
  470. dprintk(CVP_WARN, "dcvs is disabled, %d != %d + %d\n",
  471. ehdr->size, sizeof(struct cvp_hfi_msg_session_hdr_ext),
  472. sizeof(struct cvp_hfi_buf_type));
  473. }
  474. }
  475. return clock_check;
  476. }