kgsl_pwrscale.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/devfreq_cooling.h>
  7. #include <linux/slab.h>
  8. #include "kgsl_bus.h"
  9. #include "kgsl_device.h"
  10. #include "kgsl_pwrscale.h"
  11. #include "kgsl_trace.h"
  12. static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
  13. .bus = {
  14. .max = 350,
  15. .floating = true,
  16. },
  17. .mod_percent = 100,
  18. };
  19. static void do_devfreq_suspend(struct work_struct *work);
  20. static void do_devfreq_resume(struct work_struct *work);
  21. static void do_devfreq_notify(struct work_struct *work);
  22. /*
  23. * These variables are used to keep the latest data
  24. * returned by kgsl_devfreq_get_dev_status
  25. */
  26. static struct xstats last_xstats;
  27. static struct devfreq_dev_status last_status = { .private_data = &last_xstats };
  28. /*
  29. * kgsl_pwrscale_fast_bus_hint - enable fast_bus_hint feature in
  30. * adreno_tz governer
  31. * @on: boolean flag to ON/OFF fast_bus_hint
  32. *
  33. * Called when fast_bus_hint feature should be enabled.
  34. */
  35. void kgsl_pwrscale_fast_bus_hint(bool on)
  36. {
  37. adreno_tz_data.fast_bus_hint = on;
  38. }
  39. /*
  40. * kgsl_pwrscale_sleep - notify governor that device is going off
  41. * @device: The device
  42. *
  43. * Called shortly after all pending work is completed.
  44. */
  45. void kgsl_pwrscale_sleep(struct kgsl_device *device)
  46. {
  47. if (!device->pwrscale.enabled)
  48. return;
  49. device->pwrscale.on_time = 0;
  50. /* to call devfreq_suspend_device() from a kernel thread */
  51. queue_work(device->pwrscale.devfreq_wq,
  52. &device->pwrscale.devfreq_suspend_ws);
  53. }
  54. /*
  55. * kgsl_pwrscale_wake - notify governor that device is going on
  56. * @device: The device
  57. *
  58. * Called when the device is returning to an active state.
  59. */
  60. void kgsl_pwrscale_wake(struct kgsl_device *device)
  61. {
  62. struct kgsl_power_stats stats;
  63. struct kgsl_pwrscale *psc = &device->pwrscale;
  64. if (!device->pwrscale.enabled)
  65. return;
  66. /* clear old stats before waking */
  67. memset(&psc->accum_stats, 0, sizeof(psc->accum_stats));
  68. memset(&last_xstats, 0, sizeof(last_xstats));
  69. /* and any hw activity from waking up*/
  70. device->ftbl->power_stats(device, &stats);
  71. psc->time = ktime_get();
  72. psc->next_governor_call = ktime_add_us(psc->time,
  73. KGSL_GOVERNOR_CALL_INTERVAL);
  74. /* to call devfreq_resume_device() from a kernel thread */
  75. queue_work(psc->devfreq_wq, &psc->devfreq_resume_ws);
  76. }
  77. /*
  78. * kgsl_pwrscale_busy - update pwrscale state for new work
  79. * @device: The device
  80. *
  81. * Called when new work is submitted to the device.
  82. * This function must be called with the device mutex locked.
  83. */
  84. void kgsl_pwrscale_busy(struct kgsl_device *device)
  85. {
  86. if (!device->pwrscale.enabled)
  87. return;
  88. if (device->pwrscale.on_time == 0)
  89. device->pwrscale.on_time = ktime_to_us(ktime_get());
  90. }
  91. /**
  92. * kgsl_pwrscale_update_stats() - update device busy statistics
  93. * @device: The device
  94. *
  95. * Read hardware busy counters and accumulate the results.
  96. */
  97. void kgsl_pwrscale_update_stats(struct kgsl_device *device)
  98. {
  99. struct kgsl_pwrctrl *pwrctrl = &device->pwrctrl;
  100. struct kgsl_pwrscale *psc = &device->pwrscale;
  101. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  102. return;
  103. if (!psc->enabled)
  104. return;
  105. if (device->state == KGSL_STATE_ACTIVE) {
  106. struct kgsl_power_stats stats;
  107. ktime_t cur_time = ktime_get();
  108. device->ftbl->power_stats(device, &stats);
  109. device->pwrscale.accum_stats.busy_time += stats.busy_time;
  110. device->pwrscale.accum_stats.ram_time += stats.ram_time;
  111. device->pwrscale.accum_stats.ram_wait += stats.ram_wait;
  112. pwrctrl->clock_times[pwrctrl->active_pwrlevel] +=
  113. stats.busy_time;
  114. pwrctrl->time_in_pwrlevel[pwrctrl->active_pwrlevel] +=
  115. ktime_us_delta(cur_time, pwrctrl->last_stat_updated);
  116. pwrctrl->last_stat_updated = cur_time;
  117. }
  118. }
  119. /**
  120. * kgsl_pwrscale_update() - update device busy statistics
  121. * @device: The device
  122. *
  123. * If enough time has passed schedule the next call to devfreq
  124. * get_dev_status.
  125. */
  126. void kgsl_pwrscale_update(struct kgsl_device *device)
  127. {
  128. ktime_t t;
  129. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  130. return;
  131. if (!device->pwrscale.enabled)
  132. return;
  133. t = ktime_get();
  134. if (ktime_compare(t, device->pwrscale.next_governor_call) < 0)
  135. return;
  136. device->pwrscale.next_governor_call = ktime_add_us(t,
  137. KGSL_GOVERNOR_CALL_INTERVAL);
  138. /* to call update_devfreq() from a kernel thread */
  139. if (device->state != KGSL_STATE_SLUMBER)
  140. queue_work(device->pwrscale.devfreq_wq,
  141. &device->pwrscale.devfreq_notify_ws);
  142. }
  143. /*
  144. * kgsl_pwrscale_disable - temporarily disable the governor
  145. * @device: The device
  146. * @turbo: Indicates if pwrlevel should be forced to turbo
  147. *
  148. * Temporarily disable the governor, to prevent interference
  149. * with profiling tools that expect a fixed clock frequency.
  150. * This function must be called with the device mutex locked.
  151. */
  152. void kgsl_pwrscale_disable(struct kgsl_device *device, bool turbo)
  153. {
  154. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  155. return;
  156. if (device->pwrscale.devfreqptr)
  157. queue_work(device->pwrscale.devfreq_wq,
  158. &device->pwrscale.devfreq_suspend_ws);
  159. device->pwrscale.enabled = false;
  160. if (turbo)
  161. kgsl_pwrctrl_pwrlevel_change(device, 0);
  162. }
  163. /*
  164. * kgsl_pwrscale_enable - re-enable the governor
  165. * @device: The device
  166. *
  167. * Reenable the governor after a kgsl_pwrscale_disable() call.
  168. * This function must be called with the device mutex locked.
  169. */
  170. void kgsl_pwrscale_enable(struct kgsl_device *device)
  171. {
  172. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  173. return;
  174. if (device->pwrscale.devfreqptr) {
  175. queue_work(device->pwrscale.devfreq_wq,
  176. &device->pwrscale.devfreq_resume_ws);
  177. device->pwrscale.enabled = true;
  178. } else {
  179. /*
  180. * Don't enable it if devfreq is not set and let the device
  181. * run at default level;
  182. */
  183. kgsl_pwrctrl_pwrlevel_change(device,
  184. device->pwrctrl.default_pwrlevel);
  185. device->pwrscale.enabled = false;
  186. }
  187. }
  188. /*
  189. * kgsl_devfreq_target - devfreq_dev_profile.target callback
  190. * @dev: see devfreq.h
  191. * @freq: see devfreq.h
  192. * @flags: see devfreq.h
  193. *
  194. * This is a devfreq callback function for dcvs recommendations and
  195. * thermal constraints. If any thermal constraints are present,
  196. * devfreq adjusts the gpu frequency range to cap the max frequency
  197. * thereby not recommending anything above the constraint.
  198. * This function expects the device mutex to be unlocked.
  199. */
  200. int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags)
  201. {
  202. struct kgsl_device *device = dev_get_drvdata(dev);
  203. struct kgsl_pwrctrl *pwr;
  204. int level;
  205. unsigned int i;
  206. unsigned long cur_freq, rec_freq;
  207. struct kgsl_pwrscale *pwrscale = &device->pwrscale;
  208. if (device == NULL)
  209. return -ENODEV;
  210. if (freq == NULL)
  211. return -EINVAL;
  212. if (!pwrscale->devfreq_enabled) {
  213. /*
  214. * When we try to use performance governor, this function
  215. * will called by devfreq driver, while adding governor using
  216. * devfreq_add_device.
  217. * To add and start performance governor successfully during
  218. * probe, return 0 when we reach here. pwrscale->enabled will
  219. * be set to true after successfully starting the governor.
  220. */
  221. if (!pwrscale->enabled)
  222. return 0;
  223. return -EPROTO;
  224. }
  225. pwr = &device->pwrctrl;
  226. rec_freq = *freq;
  227. mutex_lock(&device->mutex);
  228. cur_freq = kgsl_pwrctrl_active_freq(pwr);
  229. level = pwr->active_pwrlevel;
  230. /* If the governor recommends a new frequency, update it here */
  231. if (rec_freq != cur_freq) {
  232. for (i = 0; i < pwr->num_pwrlevels; i++)
  233. if (rec_freq == pwr->pwrlevels[i].gpu_freq) {
  234. level = i;
  235. break;
  236. }
  237. if (level != pwr->active_pwrlevel)
  238. kgsl_pwrctrl_pwrlevel_change(device, level);
  239. }
  240. *freq = kgsl_pwrctrl_active_freq(pwr);
  241. mutex_unlock(&device->mutex);
  242. return 0;
  243. }
  244. /*
  245. * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback
  246. * @dev: see devfreq.h
  247. * @freq: see devfreq.h
  248. * @flags: see devfreq.h
  249. *
  250. * This function expects the device mutex to be unlocked.
  251. */
  252. int kgsl_devfreq_get_dev_status(struct device *dev,
  253. struct devfreq_dev_status *stat)
  254. {
  255. struct kgsl_device *device = dev_get_drvdata(dev);
  256. struct kgsl_pwrctrl *pwrctrl;
  257. struct kgsl_pwrscale *pwrscale;
  258. ktime_t tmp1, tmp2;
  259. if (device == NULL)
  260. return -ENODEV;
  261. if (stat == NULL)
  262. return -EINVAL;
  263. if (!device->pwrscale.devfreq_enabled)
  264. return -EPROTO;
  265. pwrscale = &device->pwrscale;
  266. pwrctrl = &device->pwrctrl;
  267. mutex_lock(&device->mutex);
  268. tmp1 = ktime_get();
  269. /*
  270. * If the GPU clock is on grab the latest power counter
  271. * values. Otherwise the most recent ACTIVE values will
  272. * already be stored in accum_stats.
  273. */
  274. kgsl_pwrscale_update_stats(device);
  275. tmp2 = ktime_get();
  276. stat->total_time = ktime_us_delta(tmp2, pwrscale->time);
  277. pwrscale->time = tmp1;
  278. stat->busy_time = pwrscale->accum_stats.busy_time;
  279. stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl);
  280. stat->private_data = &device->active_context_count;
  281. /*
  282. * keep the latest devfreq_dev_status values
  283. * and vbif counters data
  284. * to be (re)used by kgsl_busmon_get_dev_status()
  285. */
  286. if (pwrctrl->bus_control) {
  287. struct kgsl_pwrlevel *pwrlevel;
  288. struct xstats *last_b =
  289. (struct xstats *)last_status.private_data;
  290. last_status.total_time = stat->total_time;
  291. last_status.busy_time = stat->busy_time;
  292. last_status.current_frequency = stat->current_frequency;
  293. last_b->ram_time = device->pwrscale.accum_stats.ram_time;
  294. last_b->ram_wait = device->pwrscale.accum_stats.ram_wait;
  295. last_b->buslevel = device->pwrctrl.cur_dcvs_buslevel;
  296. pwrlevel = &pwrctrl->pwrlevels[pwrctrl->min_pwrlevel];
  297. last_b->gpu_minfreq = pwrlevel->gpu_freq;
  298. }
  299. kgsl_pwrctrl_busy_time(device, stat->total_time, stat->busy_time);
  300. trace_kgsl_pwrstats(device, stat->total_time,
  301. &pwrscale->accum_stats, device->active_context_count);
  302. memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats));
  303. mutex_unlock(&device->mutex);
  304. return 0;
  305. }
  306. /*
  307. * kgsl_devfreq_get_cur_freq - devfreq_dev_profile.get_cur_freq callback
  308. * @dev: see devfreq.h
  309. * @freq: see devfreq.h
  310. * @flags: see devfreq.h
  311. *
  312. * This function expects the device mutex to be unlocked.
  313. */
  314. int kgsl_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
  315. {
  316. struct kgsl_device *device = dev_get_drvdata(dev);
  317. struct kgsl_pwrscale *pwrscale = &device->pwrscale;
  318. if (device == NULL)
  319. return -ENODEV;
  320. if (freq == NULL)
  321. return -EINVAL;
  322. if (!pwrscale->devfreq_enabled) {
  323. /*
  324. * When we try to use performance governor, this function
  325. * will called by devfreq driver, while adding governor using
  326. * devfreq_add_device.
  327. * To add and start performance governor successfully during
  328. * probe, return 0 when we reach here. pwrscale->enabled will
  329. * be set to true after successfully starting the governor.
  330. */
  331. if (!pwrscale->enabled)
  332. return 0;
  333. return -EPROTO;
  334. }
  335. mutex_lock(&device->mutex);
  336. *freq = kgsl_pwrctrl_active_freq(&device->pwrctrl);
  337. mutex_unlock(&device->mutex);
  338. return 0;
  339. }
  340. /*
  341. * kgsl_busmon_get_dev_status - devfreq_dev_profile.get_dev_status callback
  342. * @dev: see devfreq.h
  343. * @freq: see devfreq.h
  344. * @flags: see devfreq.h
  345. *
  346. * This function expects the device mutex to be unlocked.
  347. */
  348. int kgsl_busmon_get_dev_status(struct device *dev,
  349. struct devfreq_dev_status *stat)
  350. {
  351. struct xstats *b;
  352. struct kgsl_device *device = dev_get_drvdata(dev);
  353. if (!device->pwrscale.devfreq_enabled)
  354. return -EPROTO;
  355. stat->total_time = last_status.total_time;
  356. stat->busy_time = last_status.busy_time;
  357. stat->current_frequency = last_status.current_frequency;
  358. if (stat->private_data) {
  359. struct xstats *last_b =
  360. (struct xstats *)last_status.private_data;
  361. b = (struct xstats *)stat->private_data;
  362. b->ram_time = last_b->ram_time;
  363. b->ram_wait = last_b->ram_wait;
  364. b->buslevel = last_b->buslevel;
  365. b->gpu_minfreq = last_b->gpu_minfreq;
  366. }
  367. return 0;
  368. }
  369. static int _read_hint(u32 flags)
  370. {
  371. switch (flags) {
  372. case BUSMON_FLAG_FAST_HINT:
  373. return 1;
  374. case BUSMON_FLAG_SUPER_FAST_HINT:
  375. return 2;
  376. case BUSMON_FLAG_SLOW_HINT:
  377. return -1;
  378. default:
  379. return 0;
  380. }
  381. }
  382. /*
  383. * kgsl_busmon_target - devfreq_dev_profile.target callback
  384. * @dev: see devfreq.h
  385. * @freq: see devfreq.h
  386. * @flags: see devfreq.h
  387. *
  388. * This function expects the device mutex to be unlocked.
  389. */
  390. int kgsl_busmon_target(struct device *dev, unsigned long *freq, u32 flags)
  391. {
  392. struct kgsl_device *device = dev_get_drvdata(dev);
  393. struct kgsl_pwrctrl *pwr;
  394. struct kgsl_pwrlevel *pwr_level;
  395. int level, b;
  396. u32 bus_flag;
  397. unsigned long ab_mbytes;
  398. if (device == NULL)
  399. return -ENODEV;
  400. if (freq == NULL)
  401. return -EINVAL;
  402. if (!device->pwrscale.enabled)
  403. return 0;
  404. if (!device->pwrscale.devfreq_enabled)
  405. return -EPROTO;
  406. pwr = &device->pwrctrl;
  407. if (!pwr->bus_control)
  408. return 0;
  409. mutex_lock(&device->mutex);
  410. level = pwr->active_pwrlevel;
  411. pwr_level = &pwr->pwrlevels[level];
  412. bus_flag = device->pwrscale.bus_profile.flag;
  413. device->pwrscale.bus_profile.flag = 0;
  414. ab_mbytes = device->pwrscale.bus_profile.ab_mbytes;
  415. /*
  416. * Bus devfreq governor has calculated its recomendations
  417. * when gpu was running with *freq frequency.
  418. * If the gpu frequency is different now it's better to
  419. * ignore the call
  420. */
  421. if (pwr_level->gpu_freq != *freq) {
  422. mutex_unlock(&device->mutex);
  423. return 0;
  424. }
  425. b = pwr->bus_mod;
  426. pwr->bus_mod += _read_hint(bus_flag);
  427. /* trim calculated change to fit range */
  428. if (pwr_level->bus_freq + pwr->bus_mod < pwr_level->bus_min)
  429. pwr->bus_mod = -(pwr_level->bus_freq - pwr_level->bus_min);
  430. else if (pwr_level->bus_freq + pwr->bus_mod > pwr_level->bus_max)
  431. pwr->bus_mod = pwr_level->bus_max - pwr_level->bus_freq;
  432. /* Update bus vote if AB or IB is modified */
  433. if ((pwr->bus_mod != b) || (pwr->bus_ab_mbytes != ab_mbytes)) {
  434. pwr->bus_percent_ab = device->pwrscale.bus_profile.percent_ab;
  435. /*
  436. * When gpu is thermally throttled to its lowest power level,
  437. * drop GPU's AB vote as a last resort to lower CX voltage and
  438. * to prevent thermal reset.
  439. * Ignore this check when only single power level in use to
  440. * avoid setting default AB vote in normal situations too.
  441. */
  442. if (pwr->thermal_pwrlevel != pwr->num_pwrlevels - 1 ||
  443. pwr->num_pwrlevels == 1)
  444. pwr->bus_ab_mbytes = ab_mbytes;
  445. else
  446. pwr->bus_ab_mbytes = 0;
  447. kgsl_bus_update(device, KGSL_BUS_VOTE_ON);
  448. }
  449. mutex_unlock(&device->mutex);
  450. return 0;
  451. }
  452. int kgsl_busmon_get_cur_freq(struct device *dev, unsigned long *freq)
  453. {
  454. return 0;
  455. }
  456. static void busmon_dev_release(struct device *dev)
  457. {
  458. }
  459. static void pwrscale_busmon_create(struct kgsl_device *device,
  460. struct platform_device *pdev, unsigned long *table)
  461. {
  462. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  463. struct kgsl_pwrscale *pwrscale = &device->pwrscale;
  464. struct device *dev = &pwrscale->busmondev;
  465. struct msm_busmon_extended_profile *bus_profile;
  466. struct devfreq *bus_devfreq;
  467. int i, ret;
  468. bus_profile = &pwrscale->bus_profile;
  469. bus_profile->private_data = &adreno_tz_data;
  470. bus_profile->profile.target = kgsl_busmon_target;
  471. bus_profile->profile.get_dev_status = kgsl_busmon_get_dev_status;
  472. bus_profile->profile.get_cur_freq = kgsl_busmon_get_cur_freq;
  473. bus_profile->profile.max_state = pwr->num_pwrlevels;
  474. bus_profile->profile.freq_table = table;
  475. dev->parent = &pdev->dev;
  476. dev->release = busmon_dev_release;
  477. dev_set_name(dev, "kgsl-busmon");
  478. dev_set_drvdata(dev, device);
  479. if (device_register(dev)) {
  480. put_device(dev);
  481. return;
  482. }
  483. /* Build out the OPP table for the busmon device */
  484. for (i = 0; i < pwr->num_pwrlevels; i++) {
  485. if (!pwr->pwrlevels[i].gpu_freq)
  486. continue;
  487. dev_pm_opp_add(dev, pwr->pwrlevels[i].gpu_freq, 0);
  488. }
  489. ret = devfreq_gpubw_init();
  490. if (ret) {
  491. dev_err(&pdev->dev, "Failed to add busmon governor: %d\n", ret);
  492. dev_pm_opp_remove_all_dynamic(dev);
  493. device_unregister(dev);
  494. return;
  495. }
  496. bus_devfreq = devfreq_add_device(dev, &pwrscale->bus_profile.profile,
  497. "gpubw_mon", NULL);
  498. if (IS_ERR_OR_NULL(bus_devfreq)) {
  499. dev_err(&pdev->dev, "Bus scaling not enabled\n");
  500. devfreq_gpubw_exit();
  501. dev_pm_opp_remove_all_dynamic(dev);
  502. device_unregister(dev);
  503. return;
  504. }
  505. pwrscale->bus_devfreq = bus_devfreq;
  506. }
  507. static void pwrscale_of_get_ca_target_pwrlevel(struct kgsl_device *device,
  508. struct device_node *node)
  509. {
  510. u32 pwrlevel = 1;
  511. of_property_read_u32(node, "qcom,ca-target-pwrlevel", &pwrlevel);
  512. if (pwrlevel >= device->pwrctrl.num_pwrlevels)
  513. pwrlevel = 1;
  514. device->pwrscale.ctxt_aware_target_pwrlevel = pwrlevel;
  515. }
  516. /* Get context aware properties */
  517. static void pwrscale_of_ca_aware(struct kgsl_device *device)
  518. {
  519. struct kgsl_pwrscale *pwrscale = &device->pwrscale;
  520. struct device_node *parent = device->pdev->dev.of_node;
  521. struct device_node *node, *child;
  522. pwrscale->ctxt_aware_enable =
  523. of_property_read_bool(parent, "qcom,enable-ca-jump");
  524. if (!pwrscale->ctxt_aware_enable)
  525. return;
  526. pwrscale->ctxt_aware_busy_penalty = 12000;
  527. of_property_read_u32(parent, "qcom,ca-busy-penalty",
  528. &pwrscale->ctxt_aware_busy_penalty);
  529. pwrscale->ctxt_aware_target_pwrlevel = 1;
  530. node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
  531. if (node == NULL) {
  532. pwrscale_of_get_ca_target_pwrlevel(device, parent);
  533. return;
  534. }
  535. for_each_child_of_node(node, child) {
  536. u32 bin;
  537. if (of_property_read_u32(child, "qcom,speed-bin", &bin))
  538. continue;
  539. if (bin == device->speed_bin) {
  540. pwrscale_of_get_ca_target_pwrlevel(device, child);
  541. of_node_put(child);
  542. break;
  543. }
  544. }
  545. of_node_put(node);
  546. }
  547. /*
  548. * thermal_max_notifier_call - Callback function registered to receive qos max
  549. * frequency events.
  550. * @nb: The notifier block
  551. * @val: Max frequency value in KHz for GPU
  552. *
  553. * The function subscribes to GPU max frequency change and updates thermal
  554. * power level accordingly.
  555. */
  556. static int thermal_max_notifier_call(struct notifier_block *nb, unsigned long val, void *data)
  557. {
  558. struct kgsl_pwrctrl *pwr = container_of(nb, struct kgsl_pwrctrl, nb_max);
  559. struct kgsl_device *device = container_of(pwr, struct kgsl_device, pwrctrl);
  560. u32 max_freq = val * 1000;
  561. int level;
  562. if (!device->pwrscale.devfreq_enabled)
  563. return NOTIFY_DONE;
  564. for (level = pwr->num_pwrlevels - 1; level >= 0; level--) {
  565. /* get nearest power level with a maximum delta of 5MHz */
  566. if (abs(pwr->pwrlevels[level].gpu_freq - max_freq) < 5000000)
  567. break;
  568. }
  569. if (level < 0)
  570. return NOTIFY_DONE;
  571. if (level == pwr->thermal_pwrlevel)
  572. return NOTIFY_OK;
  573. trace_kgsl_thermal_constraint(max_freq);
  574. pwr->thermal_pwrlevel = level;
  575. mutex_lock(&device->mutex);
  576. /* Update the current level using the new limit */
  577. kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel);
  578. mutex_unlock(&device->mutex);
  579. return NOTIFY_OK;
  580. }
  581. int kgsl_pwrscale_init(struct kgsl_device *device, struct platform_device *pdev,
  582. const char *governor)
  583. {
  584. struct kgsl_pwrscale *pwrscale = &device->pwrscale;
  585. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  586. struct devfreq *devfreq;
  587. struct msm_adreno_extended_profile *gpu_profile;
  588. int i, ret;
  589. gpu_profile = &pwrscale->gpu_profile;
  590. gpu_profile->private_data = &adreno_tz_data;
  591. gpu_profile->profile.target = kgsl_devfreq_target;
  592. gpu_profile->profile.get_dev_status = kgsl_devfreq_get_dev_status;
  593. gpu_profile->profile.get_cur_freq = kgsl_devfreq_get_cur_freq;
  594. gpu_profile->profile.initial_freq =
  595. pwr->pwrlevels[pwr->default_pwrlevel].gpu_freq;
  596. gpu_profile->profile.polling_ms = 10;
  597. pwrscale_of_ca_aware(device);
  598. for (i = 0; i < pwr->num_pwrlevels; i++)
  599. pwrscale->freq_table[i] = pwr->pwrlevels[i].gpu_freq;
  600. /*
  601. * Max_state is the number of valid power levels.
  602. * The valid power levels range from 0 - (max_state - 1)
  603. */
  604. gpu_profile->profile.max_state = pwr->num_pwrlevels;
  605. /* link storage array to the devfreq profile pointer */
  606. gpu_profile->profile.freq_table = pwrscale->freq_table;
  607. /* if there is only 1 freq, no point in running a governor */
  608. if (gpu_profile->profile.max_state == 1)
  609. governor = "performance";
  610. /* initialize msm-adreno-tz governor specific data here */
  611. adreno_tz_data.disable_busy_time_burst =
  612. of_property_read_bool(pdev->dev.of_node,
  613. "qcom,disable-busy-time-burst");
  614. if (pwrscale->ctxt_aware_enable) {
  615. adreno_tz_data.ctxt_aware_enable = pwrscale->ctxt_aware_enable;
  616. adreno_tz_data.bin.ctxt_aware_target_pwrlevel =
  617. pwrscale->ctxt_aware_target_pwrlevel;
  618. adreno_tz_data.bin.ctxt_aware_busy_penalty =
  619. pwrscale->ctxt_aware_busy_penalty;
  620. }
  621. /*
  622. * If there is a separate GX power rail, allow
  623. * independent modification to its voltage through
  624. * the bus bandwidth vote.
  625. */
  626. if (pwr->bus_control) {
  627. adreno_tz_data.bus.num = pwr->ddr_table_count;
  628. adreno_tz_data.bus.ib_kbps = pwr->ddr_table;
  629. adreno_tz_data.bus.width = pwr->bus_width;
  630. if (!kgsl_of_property_read_ddrtype(device->pdev->dev.of_node,
  631. "qcom,bus-accesses", &adreno_tz_data.bus.max))
  632. adreno_tz_data.bus.floating = false;
  633. }
  634. pwrscale->devfreq_wq = create_freezable_workqueue("kgsl_devfreq_wq");
  635. if (!pwrscale->devfreq_wq) {
  636. dev_err(device->dev, "Failed to allocate kgsl devfreq workqueue\n");
  637. device->pwrscale.enabled = false;
  638. return -ENOMEM;
  639. }
  640. ret = msm_adreno_tz_init();
  641. if (ret) {
  642. dev_err(device->dev, "Failed to add adreno tz governor: %d\n", ret);
  643. device->pwrscale.enabled = false;
  644. return ret;
  645. }
  646. pwr->nb_max.notifier_call = thermal_max_notifier_call;
  647. ret = dev_pm_qos_add_notifier(&pdev->dev, &pwr->nb_max, DEV_PM_QOS_MAX_FREQUENCY);
  648. if (ret) {
  649. dev_err(device->dev, "Unable to register notifier call for thermal: %d\n", ret);
  650. device->pwrscale.enabled = false;
  651. msm_adreno_tz_exit();
  652. return ret;
  653. }
  654. devfreq = devfreq_add_device(&pdev->dev, &gpu_profile->profile,
  655. governor, &adreno_tz_data);
  656. if (IS_ERR_OR_NULL(devfreq)) {
  657. device->pwrscale.enabled = false;
  658. msm_adreno_tz_exit();
  659. return IS_ERR(devfreq) ? PTR_ERR(devfreq) : -EINVAL;
  660. }
  661. pwrscale->enabled = true;
  662. pwrscale->devfreqptr = devfreq;
  663. pwrscale->cooling_dev = of_devfreq_cooling_register(pdev->dev.of_node,
  664. devfreq);
  665. if (IS_ERR(pwrscale->cooling_dev))
  666. pwrscale->cooling_dev = NULL;
  667. if (adreno_tz_data.bus.num)
  668. pwrscale_busmon_create(device, pdev, pwrscale->freq_table);
  669. WARN_ON(sysfs_create_link(&device->dev->kobj,
  670. &devfreq->dev.kobj, "devfreq"));
  671. INIT_WORK(&pwrscale->devfreq_suspend_ws, do_devfreq_suspend);
  672. INIT_WORK(&pwrscale->devfreq_resume_ws, do_devfreq_resume);
  673. INIT_WORK(&pwrscale->devfreq_notify_ws, do_devfreq_notify);
  674. pwrscale->next_governor_call = ktime_add_us(ktime_get(),
  675. KGSL_GOVERNOR_CALL_INTERVAL);
  676. return 0;
  677. }
  678. /*
  679. * kgsl_pwrscale_close - clean up pwrscale
  680. * @device: the device
  681. *
  682. * This function should be called with the device mutex locked.
  683. */
  684. void kgsl_pwrscale_close(struct kgsl_device *device)
  685. {
  686. struct kgsl_pwrscale *pwrscale;
  687. struct kgsl_pwrctrl *pwr;
  688. pwr = &device->pwrctrl;
  689. pwrscale = &device->pwrscale;
  690. if (pwrscale->bus_devfreq) {
  691. devfreq_remove_device(pwrscale->bus_devfreq);
  692. pwrscale->bus_devfreq = NULL;
  693. devfreq_gpubw_exit();
  694. dev_pm_opp_remove_all_dynamic(&pwrscale->busmondev);
  695. device_unregister(&pwrscale->busmondev);
  696. }
  697. if (!pwrscale->devfreqptr)
  698. return;
  699. if (pwrscale->cooling_dev)
  700. devfreq_cooling_unregister(pwrscale->cooling_dev);
  701. if (pwrscale->devfreq_wq) {
  702. flush_workqueue(pwrscale->devfreq_wq);
  703. destroy_workqueue(pwrscale->devfreq_wq);
  704. pwrscale->devfreq_wq = NULL;
  705. }
  706. devfreq_remove_device(device->pwrscale.devfreqptr);
  707. device->pwrscale.devfreqptr = NULL;
  708. dev_pm_qos_remove_notifier(&device->pdev->dev, &pwr->nb_max, DEV_PM_QOS_MAX_FREQUENCY);
  709. msm_adreno_tz_exit();
  710. }
  711. static void do_devfreq_suspend(struct work_struct *work)
  712. {
  713. struct kgsl_pwrscale *pwrscale = container_of(work,
  714. struct kgsl_pwrscale, devfreq_suspend_ws);
  715. devfreq_suspend_device(pwrscale->devfreqptr);
  716. devfreq_suspend_device(pwrscale->bus_devfreq);
  717. }
  718. static void do_devfreq_resume(struct work_struct *work)
  719. {
  720. struct kgsl_pwrscale *pwrscale = container_of(work,
  721. struct kgsl_pwrscale, devfreq_resume_ws);
  722. devfreq_resume_device(pwrscale->devfreqptr);
  723. devfreq_resume_device(pwrscale->bus_devfreq);
  724. }
  725. static void do_devfreq_notify(struct work_struct *work)
  726. {
  727. struct kgsl_pwrscale *pwrscale = container_of(work,
  728. struct kgsl_pwrscale, devfreq_notify_ws);
  729. mutex_lock(&pwrscale->devfreqptr->lock);
  730. update_devfreq(pwrscale->devfreqptr);
  731. mutex_unlock(&pwrscale->devfreqptr->lock);
  732. if (pwrscale->bus_devfreq) {
  733. mutex_lock(&pwrscale->bus_devfreq->lock);
  734. update_devfreq(pwrscale->bus_devfreq);
  735. mutex_unlock(&pwrscale->bus_devfreq->lock);
  736. }
  737. }