kgsl_pwrctrl.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/clk/qcom.h>
  7. #include <linux/interconnect.h>
  8. #include <linux/iopoll.h>
  9. #include <linux/of_device.h>
  10. #include <linux/pm_runtime.h>
  11. #include <linux/regulator/consumer.h>
  12. #include <linux/slab.h>
  13. #include <linux/thermal.h>
  14. #include <linux/msm_kgsl.h>
  15. #include <soc/qcom/dcvs.h>
  16. #include "kgsl_device.h"
  17. #include "kgsl_bus.h"
  18. #include "kgsl_pwrscale.h"
  19. #include "kgsl_sysfs.h"
  20. #include "kgsl_trace.h"
  21. #include "kgsl_util.h"
  22. #define UPDATE_BUSY_VAL 1000000
  23. #define KGSL_MAX_BUSLEVELS 20
  24. /* Order deeply matters here because reasons. New entries go on the end */
  25. static const char * const clocks[KGSL_MAX_CLKS] = {
  26. "src_clk",
  27. "core_clk",
  28. "iface_clk",
  29. "mem_clk",
  30. "mem_iface_clk",
  31. "alt_mem_iface_clk",
  32. "rbbmtimer_clk",
  33. "gtcu_clk",
  34. "gtbu_clk",
  35. "gtcu_iface_clk",
  36. "alwayson_clk",
  37. "isense_clk",
  38. "rbcpr_clk",
  39. "iref_clk",
  40. "gmu_clk",
  41. "ahb_clk",
  42. "smmu_vote",
  43. "apb_pclk",
  44. };
  45. static void kgsl_pwrctrl_clk(struct kgsl_device *device, bool state,
  46. int requested_state);
  47. static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, bool state);
  48. static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level);
  49. static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq,
  50. const char *name);
  51. static void _gpu_clk_prepare_enable(struct kgsl_device *device,
  52. struct clk *clk, const char *name);
  53. static void _bimc_clk_prepare_enable(struct kgsl_device *device,
  54. struct clk *clk, const char *name);
  55. /**
  56. * _adjust_pwrlevel() - Given a requested power level do bounds checking on the
  57. * constraints and return the nearest possible level
  58. * @device: Pointer to the kgsl_device struct
  59. * @level: Requested level
  60. * @pwrc: Pointer to the power constraint to be applied
  61. *
  62. * Apply thermal and max/min limits first. Then force the level with a
  63. * constraint if one exists.
  64. */
  65. static unsigned int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level,
  66. struct kgsl_pwr_constraint *pwrc)
  67. {
  68. unsigned int max_pwrlevel = max_t(unsigned int, pwr->thermal_pwrlevel,
  69. pwr->max_pwrlevel);
  70. unsigned int min_pwrlevel = min_t(unsigned int,
  71. pwr->thermal_pwrlevel_floor,
  72. pwr->min_pwrlevel);
  73. /* Ensure that max/min pwrlevels are within thermal max/min limits */
  74. max_pwrlevel = min_t(unsigned int, max_pwrlevel,
  75. pwr->thermal_pwrlevel_floor);
  76. min_pwrlevel = max_t(unsigned int, min_pwrlevel,
  77. pwr->thermal_pwrlevel);
  78. switch (pwrc->type) {
  79. case KGSL_CONSTRAINT_PWRLEVEL: {
  80. switch (pwrc->sub_type) {
  81. case KGSL_CONSTRAINT_PWR_MAX:
  82. return max_pwrlevel;
  83. case KGSL_CONSTRAINT_PWR_MIN:
  84. return min_pwrlevel;
  85. default:
  86. break;
  87. }
  88. }
  89. break;
  90. }
  91. if (level < max_pwrlevel)
  92. return max_pwrlevel;
  93. if (level > min_pwrlevel)
  94. return min_pwrlevel;
  95. return level;
  96. }
  97. /**
  98. * kgsl_pwrctrl_pwrlevel_change_settings() - Program h/w during powerlevel
  99. * transitions
  100. * @device: Pointer to the kgsl_device struct
  101. * @post: flag to check if the call is before/after the clk_rate change
  102. * @wake_up: flag to check if device is active or waking up
  103. */
  104. static void kgsl_pwrctrl_pwrlevel_change_settings(struct kgsl_device *device,
  105. bool post)
  106. {
  107. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  108. unsigned int old = pwr->previous_pwrlevel;
  109. unsigned int new = pwr->active_pwrlevel;
  110. if (device->state != KGSL_STATE_ACTIVE)
  111. return;
  112. if (old == new)
  113. return;
  114. device->ftbl->pwrlevel_change_settings(device, old, new, post);
  115. }
  116. /**
  117. * kgsl_pwrctrl_adjust_pwrlevel() - Adjust the power level if
  118. * required by thermal, max/min, constraints, etc
  119. * @device: Pointer to the kgsl_device struct
  120. * @new_level: Requested powerlevel, an index into the pwrlevel array
  121. */
  122. unsigned int kgsl_pwrctrl_adjust_pwrlevel(struct kgsl_device *device,
  123. unsigned int new_level)
  124. {
  125. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  126. unsigned int old_level = pwr->active_pwrlevel;
  127. bool reset = false;
  128. /* If a pwr constraint is expired, remove it */
  129. if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) &&
  130. (time_after(jiffies, pwr->constraint.expires))) {
  131. struct kgsl_context *context = kgsl_context_get(device,
  132. pwr->constraint.owner_id);
  133. /* We couldn't get a reference, clear the constraint */
  134. if (!context) {
  135. reset = true;
  136. goto done;
  137. }
  138. /*
  139. * If the last timestamp that set the constraint has retired,
  140. * clear the constraint
  141. */
  142. if (kgsl_check_timestamp(device, context,
  143. pwr->constraint.owner_timestamp)) {
  144. reset = true;
  145. kgsl_context_put(context);
  146. goto done;
  147. }
  148. /*
  149. * Increase the timeout to keep the constraint at least till
  150. * the timestamp retires
  151. */
  152. pwr->constraint.expires = jiffies +
  153. msecs_to_jiffies(device->pwrctrl.interval_timeout);
  154. kgsl_context_put(context);
  155. }
  156. done:
  157. if (reset) {
  158. /* Trace the constraint being un-set by the driver */
  159. trace_kgsl_constraint(device, pwr->constraint.type,
  160. old_level, 0);
  161. /*Invalidate the constraint set */
  162. pwr->constraint.expires = 0;
  163. pwr->constraint.type = KGSL_CONSTRAINT_NONE;
  164. }
  165. /*
  166. * Adjust the power level if required by thermal, max/min,
  167. * constraints, etc
  168. */
  169. return _adjust_pwrlevel(pwr, new_level, &pwr->constraint);
  170. }
  171. /**
  172. * kgsl_pwrctrl_pwrlevel_change() - Validate and change power levels
  173. * @device: Pointer to the kgsl_device struct
  174. * @new_level: Requested powerlevel, an index into the pwrlevel array
  175. *
  176. * Check that any power level constraints are still valid. Update the
  177. * requested level according to any thermal, max/min, or power constraints.
  178. * If a new GPU level is going to be set, update the bus to that level's
  179. * default value. Do not change the bus if a constraint keeps the new
  180. * level at the current level. Set the new GPU frequency.
  181. */
  182. void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
  183. unsigned int new_level)
  184. {
  185. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  186. struct kgsl_pwrlevel *pwrlevel;
  187. unsigned int old_level = pwr->active_pwrlevel;
  188. new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level);
  189. if (new_level == old_level)
  190. return;
  191. kgsl_pwrscale_update_stats(device);
  192. /*
  193. * Set the active and previous powerlevel first in case the clocks are
  194. * off - if we don't do this then the pwrlevel change won't take effect
  195. * when the clocks come back
  196. */
  197. pwr->active_pwrlevel = new_level;
  198. pwr->previous_pwrlevel = old_level;
  199. /*
  200. * If the bus is running faster than its default level and the GPU
  201. * frequency is moving down keep the DDR at a relatively high level.
  202. */
  203. if (pwr->bus_mod < 0 || new_level < old_level) {
  204. pwr->bus_mod = 0;
  205. pwr->bus_percent_ab = 0;
  206. }
  207. /*
  208. * Update the bus before the GPU clock to prevent underrun during
  209. * frequency increases.
  210. */
  211. if (new_level < old_level)
  212. kgsl_bus_update(device, KGSL_BUS_VOTE_ON);
  213. pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
  214. /* Change register settings if any BEFORE pwrlevel change*/
  215. kgsl_pwrctrl_pwrlevel_change_settings(device, 0);
  216. device->ftbl->gpu_clock_set(device, pwr->active_pwrlevel);
  217. _isense_clk_set_rate(pwr, pwr->active_pwrlevel);
  218. trace_kgsl_pwrlevel(device,
  219. pwr->active_pwrlevel, pwrlevel->gpu_freq,
  220. pwr->previous_pwrlevel,
  221. pwr->pwrlevels[old_level].gpu_freq);
  222. trace_gpu_frequency(pwrlevel->gpu_freq/1000, 0);
  223. /* Update the bus after GPU clock decreases. */
  224. if (new_level > old_level)
  225. kgsl_bus_update(device, KGSL_BUS_VOTE_ON);
  226. /*
  227. * Some targets do not support the bandwidth requirement of
  228. * GPU at TURBO, for such targets we need to set GPU-BIMC
  229. * interface clocks to TURBO directly whenever GPU runs at
  230. * TURBO. The TURBO frequency of gfx-bimc need to be defined
  231. * in target device tree.
  232. */
  233. if (pwr->gpu_bimc_int_clk) {
  234. if (pwr->active_pwrlevel == 0 &&
  235. !pwr->gpu_bimc_interface_enabled) {
  236. kgsl_pwrctrl_clk_set_rate(pwr->gpu_bimc_int_clk,
  237. pwr->gpu_bimc_int_clk_freq,
  238. "bimc_gpu_clk");
  239. _bimc_clk_prepare_enable(device,
  240. pwr->gpu_bimc_int_clk,
  241. "bimc_gpu_clk");
  242. pwr->gpu_bimc_interface_enabled = true;
  243. } else if (pwr->previous_pwrlevel == 0
  244. && pwr->gpu_bimc_interface_enabled) {
  245. clk_disable_unprepare(pwr->gpu_bimc_int_clk);
  246. pwr->gpu_bimc_interface_enabled = false;
  247. }
  248. }
  249. /* Change register settings if any AFTER pwrlevel change*/
  250. kgsl_pwrctrl_pwrlevel_change_settings(device, 1);
  251. }
  252. void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
  253. struct kgsl_pwr_constraint *pwrc, uint32_t id, u32 ts)
  254. {
  255. unsigned int constraint;
  256. struct kgsl_pwr_constraint *pwrc_old;
  257. if (device == NULL || pwrc == NULL)
  258. return;
  259. constraint = _adjust_pwrlevel(&device->pwrctrl,
  260. device->pwrctrl.active_pwrlevel, pwrc);
  261. pwrc_old = &device->pwrctrl.constraint;
  262. /*
  263. * If a constraint is already set, set a new constraint only
  264. * if it is faster. If the requested constraint is the same
  265. * as the current one, update ownership and timestamp.
  266. */
  267. if ((pwrc_old->type == KGSL_CONSTRAINT_NONE) ||
  268. (pwrc_old->sub_type == KGSL_CONSTRAINT_PWR_MIN &&
  269. pwrc->sub_type == KGSL_CONSTRAINT_PWR_MAX)) {
  270. pwrc_old->type = pwrc->type;
  271. pwrc_old->sub_type = pwrc->sub_type;
  272. pwrc_old->owner_id = id;
  273. pwrc_old->expires = jiffies +
  274. msecs_to_jiffies(device->pwrctrl.interval_timeout);
  275. pwrc_old->owner_timestamp = ts;
  276. kgsl_pwrctrl_pwrlevel_change(device, constraint);
  277. /* Trace the constraint being set by the driver */
  278. trace_kgsl_constraint(device, pwrc_old->type, constraint, 1);
  279. } else if ((pwrc_old->type == pwrc->type) && (pwrc_old->sub_type == pwrc->sub_type)) {
  280. pwrc_old->owner_id = id;
  281. pwrc_old->owner_timestamp = ts;
  282. pwrc_old->expires = jiffies +
  283. msecs_to_jiffies(device->pwrctrl.interval_timeout);
  284. }
  285. }
  286. static int kgsl_pwrctrl_set_thermal_limit(struct kgsl_device *device,
  287. u32 level)
  288. {
  289. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  290. int ret = -EINVAL;
  291. if (level >= pwr->num_pwrlevels)
  292. level = pwr->num_pwrlevels - 1;
  293. if (dev_pm_qos_request_active(&pwr->sysfs_thermal_req))
  294. ret = dev_pm_qos_update_request(&pwr->sysfs_thermal_req,
  295. (pwr->pwrlevels[level].gpu_freq / 1000));
  296. return (ret < 0) ? ret : 0;
  297. }
  298. static ssize_t thermal_pwrlevel_store(struct device *dev,
  299. struct device_attribute *attr,
  300. const char *buf, size_t count)
  301. {
  302. struct kgsl_device *device = dev_get_drvdata(dev);
  303. int ret;
  304. u32 level;
  305. ret = kstrtou32(buf, 0, &level);
  306. if (ret)
  307. return ret;
  308. ret = kgsl_pwrctrl_set_thermal_limit(device, level);
  309. if (ret)
  310. return ret;
  311. return count;
  312. }
  313. static ssize_t thermal_pwrlevel_show(struct device *dev,
  314. struct device_attribute *attr, char *buf)
  315. {
  316. struct kgsl_device *device = dev_get_drvdata(dev);
  317. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  318. return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
  319. }
  320. static ssize_t max_pwrlevel_store(struct device *dev,
  321. struct device_attribute *attr,
  322. const char *buf, size_t count)
  323. {
  324. struct kgsl_device *device = dev_get_drvdata(dev);
  325. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  326. int ret;
  327. unsigned int level = 0;
  328. ret = kstrtou32(buf, 0, &level);
  329. if (ret)
  330. return ret;
  331. mutex_lock(&device->mutex);
  332. /* You can't set a maximum power level lower than the minimum */
  333. if (level > pwr->min_pwrlevel)
  334. level = pwr->min_pwrlevel;
  335. pwr->max_pwrlevel = level;
  336. /* Update the current level using the new limit */
  337. kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel);
  338. mutex_unlock(&device->mutex);
  339. return count;
  340. }
  341. static ssize_t max_pwrlevel_show(struct device *dev,
  342. struct device_attribute *attr, char *buf)
  343. {
  344. struct kgsl_device *device = dev_get_drvdata(dev);
  345. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  346. return scnprintf(buf, PAGE_SIZE, "%u\n", pwr->max_pwrlevel);
  347. }
  348. static void kgsl_pwrctrl_min_pwrlevel_set(struct kgsl_device *device,
  349. int level)
  350. {
  351. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  352. mutex_lock(&device->mutex);
  353. if (level > pwr->min_render_pwrlevel)
  354. level = pwr->min_render_pwrlevel;
  355. /* You can't set a minimum power level lower than the maximum */
  356. if (level < pwr->max_pwrlevel)
  357. level = pwr->max_pwrlevel;
  358. pwr->min_pwrlevel = level;
  359. /* Update the current level using the new limit */
  360. kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel);
  361. mutex_unlock(&device->mutex);
  362. }
  363. static ssize_t min_pwrlevel_store(struct device *dev,
  364. struct device_attribute *attr, const char *buf,
  365. size_t count)
  366. {
  367. struct kgsl_device *device = dev_get_drvdata(dev);
  368. int ret;
  369. unsigned int level = 0;
  370. ret = kstrtou32(buf, 0, &level);
  371. if (ret)
  372. return ret;
  373. kgsl_pwrctrl_min_pwrlevel_set(device, level);
  374. return count;
  375. }
  376. static ssize_t min_pwrlevel_show(struct device *dev,
  377. struct device_attribute *attr, char *buf)
  378. {
  379. struct kgsl_device *device = dev_get_drvdata(dev);
  380. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  381. return scnprintf(buf, PAGE_SIZE, "%u\n", pwr->min_pwrlevel);
  382. }
  383. static ssize_t num_pwrlevels_show(struct device *dev,
  384. struct device_attribute *attr, char *buf)
  385. {
  386. struct kgsl_device *device = dev_get_drvdata(dev);
  387. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  388. return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels);
  389. }
  390. /* Given a GPU clock value, return the lowest matching powerlevel */
  391. static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
  392. {
  393. int i;
  394. for (i = pwr->num_pwrlevels - 1; i >= 0; i--) {
  395. if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
  396. return i;
  397. }
  398. return -ERANGE;
  399. }
  400. static ssize_t max_gpuclk_store(struct device *dev,
  401. struct device_attribute *attr,
  402. const char *buf, size_t count)
  403. {
  404. struct kgsl_device *device = dev_get_drvdata(dev);
  405. u32 freq;
  406. int ret, level;
  407. ret = kstrtou32(buf, 0, &freq);
  408. if (ret)
  409. return ret;
  410. level = _get_nearest_pwrlevel(&device->pwrctrl, freq);
  411. if (level < 0)
  412. return level;
  413. /*
  414. * You would think this would set max_pwrlevel but the legacy behavior
  415. * is that it set thermal_pwrlevel instead so we don't want to mess with
  416. * that.
  417. */
  418. ret = kgsl_pwrctrl_set_thermal_limit(device, level);
  419. if (ret)
  420. return ret;
  421. return count;
  422. }
  423. static ssize_t max_gpuclk_show(struct device *dev,
  424. struct device_attribute *attr, char *buf)
  425. {
  426. struct kgsl_device *device = dev_get_drvdata(dev);
  427. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  428. return scnprintf(buf, PAGE_SIZE, "%d\n",
  429. device->pwrctrl.pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
  430. }
  431. static ssize_t gpuclk_store(struct device *dev,
  432. struct device_attribute *attr,
  433. const char *buf, size_t count)
  434. {
  435. struct kgsl_device *device = dev_get_drvdata(dev);
  436. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  437. unsigned int val = 0;
  438. int ret, level;
  439. ret = kstrtou32(buf, 0, &val);
  440. if (ret)
  441. return ret;
  442. mutex_lock(&device->mutex);
  443. level = _get_nearest_pwrlevel(pwr, val);
  444. if (level >= 0)
  445. kgsl_pwrctrl_pwrlevel_change(device, (unsigned int) level);
  446. mutex_unlock(&device->mutex);
  447. return count;
  448. }
  449. static ssize_t gpuclk_show(struct device *dev,
  450. struct device_attribute *attr,
  451. char *buf)
  452. {
  453. struct kgsl_device *device = dev_get_drvdata(dev);
  454. return scnprintf(buf, PAGE_SIZE, "%ld\n",
  455. kgsl_pwrctrl_active_freq(&device->pwrctrl));
  456. }
  457. static ssize_t idle_timer_store(struct device *dev, struct device_attribute *attr,
  458. const char *buf, size_t count)
  459. {
  460. unsigned int val = 0;
  461. struct kgsl_device *device = dev_get_drvdata(dev);
  462. int ret;
  463. ret = kstrtou32(buf, 0, &val);
  464. if (ret)
  465. return ret;
  466. /*
  467. * We don't quite accept a maximum of 0xFFFFFFFF due to internal jiffy
  468. * math, so make sure the value falls within the largest offset we can
  469. * deal with
  470. */
  471. if (val > jiffies_to_usecs(MAX_JIFFY_OFFSET))
  472. return -EINVAL;
  473. mutex_lock(&device->mutex);
  474. device->pwrctrl.interval_timeout = val;
  475. mutex_unlock(&device->mutex);
  476. return count;
  477. }
  478. static ssize_t idle_timer_show(struct device *dev,
  479. struct device_attribute *attr, char *buf)
  480. {
  481. struct kgsl_device *device = dev_get_drvdata(dev);
  482. return scnprintf(buf, PAGE_SIZE, "%u\n", device->pwrctrl.interval_timeout);
  483. }
  484. static ssize_t minbw_timer_store(struct device *dev,
  485. struct device_attribute *attr, const char *buf, size_t count)
  486. {
  487. /* minbw_timer is deprecated, so return EOPNOTSUPP */
  488. return -EOPNOTSUPP;
  489. }
  490. static ssize_t minbw_timer_show(struct device *dev,
  491. struct device_attribute *attr, char *buf)
  492. {
  493. /* minbw_timer is deprecated, so return it as always disabled */
  494. return scnprintf(buf, PAGE_SIZE, "0\n");
  495. }
  496. static ssize_t gpubusy_show(struct device *dev,
  497. struct device_attribute *attr, char *buf)
  498. {
  499. int ret;
  500. struct kgsl_device *device = dev_get_drvdata(dev);
  501. struct kgsl_clk_stats *stats = &device->pwrctrl.clk_stats;
  502. ret = scnprintf(buf, PAGE_SIZE, "%7d %7d\n",
  503. stats->busy_old, stats->total_old);
  504. if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
  505. stats->busy_old = 0;
  506. stats->total_old = 0;
  507. }
  508. return ret;
  509. }
  510. static ssize_t gpu_available_frequencies_show(struct device *dev,
  511. struct device_attribute *attr,
  512. char *buf)
  513. {
  514. struct kgsl_device *device = dev_get_drvdata(dev);
  515. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  516. int index, num_chars = 0;
  517. for (index = 0; index < pwr->num_pwrlevels; index++) {
  518. num_chars += scnprintf(buf + num_chars,
  519. PAGE_SIZE - num_chars - 1,
  520. "%d ", pwr->pwrlevels[index].gpu_freq);
  521. /* One space for trailing null and another for the newline */
  522. if (num_chars >= PAGE_SIZE - 2)
  523. break;
  524. }
  525. buf[num_chars++] = '\n';
  526. return num_chars;
  527. }
  528. static ssize_t gpu_clock_stats_show(struct device *dev,
  529. struct device_attribute *attr, char *buf)
  530. {
  531. struct kgsl_device *device = dev_get_drvdata(dev);
  532. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  533. int index, num_chars = 0;
  534. mutex_lock(&device->mutex);
  535. kgsl_pwrscale_update_stats(device);
  536. mutex_unlock(&device->mutex);
  537. for (index = 0; index < pwr->num_pwrlevels; index++)
  538. num_chars += scnprintf(buf + num_chars, PAGE_SIZE - num_chars,
  539. "%llu ", pwr->clock_times[index]);
  540. if (num_chars < PAGE_SIZE)
  541. buf[num_chars++] = '\n';
  542. return num_chars;
  543. }
  544. static ssize_t reset_count_show(struct device *dev,
  545. struct device_attribute *attr, char *buf)
  546. {
  547. struct kgsl_device *device = dev_get_drvdata(dev);
  548. return scnprintf(buf, PAGE_SIZE, "%d\n", device->reset_counter);
  549. }
  550. static void __force_on(struct kgsl_device *device, int flag, int on)
  551. {
  552. if (on) {
  553. switch (flag) {
  554. case KGSL_PWRFLAGS_CLK_ON:
  555. /* make sure pwrrail is ON before enabling clocks */
  556. kgsl_pwrctrl_pwrrail(device, true);
  557. kgsl_pwrctrl_clk(device, true,
  558. KGSL_STATE_ACTIVE);
  559. break;
  560. case KGSL_PWRFLAGS_AXI_ON:
  561. kgsl_pwrctrl_axi(device, true);
  562. break;
  563. case KGSL_PWRFLAGS_POWER_ON:
  564. kgsl_pwrctrl_pwrrail(device, true);
  565. break;
  566. }
  567. set_bit(flag, &device->pwrctrl.ctrl_flags);
  568. } else {
  569. clear_bit(flag, &device->pwrctrl.ctrl_flags);
  570. }
  571. }
  572. static ssize_t __force_on_show(struct device *dev,
  573. struct device_attribute *attr,
  574. char *buf, int flag)
  575. {
  576. struct kgsl_device *device = dev_get_drvdata(dev);
  577. return scnprintf(buf, PAGE_SIZE, "%d\n",
  578. test_bit(flag, &device->pwrctrl.ctrl_flags));
  579. }
  580. static ssize_t __force_on_store(struct device *dev,
  581. struct device_attribute *attr,
  582. const char *buf, size_t count,
  583. int flag)
  584. {
  585. unsigned int val = 0;
  586. struct kgsl_device *device = dev_get_drvdata(dev);
  587. int ret;
  588. if (gmu_core_gpmu_isenabled(device))
  589. return -EOPNOTSUPP;
  590. ret = kstrtou32(buf, 0, &val);
  591. if (ret)
  592. return ret;
  593. mutex_lock(&device->mutex);
  594. __force_on(device, flag, val);
  595. mutex_unlock(&device->mutex);
  596. return count;
  597. }
  598. static ssize_t force_clk_on_show(struct device *dev,
  599. struct device_attribute *attr, char *buf)
  600. {
  601. return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_CLK_ON);
  602. }
  603. static ssize_t force_clk_on_store(struct device *dev,
  604. struct device_attribute *attr,
  605. const char *buf, size_t count)
  606. {
  607. return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_CLK_ON);
  608. }
  609. static ssize_t force_bus_on_show(struct device *dev,
  610. struct device_attribute *attr, char *buf)
  611. {
  612. return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_AXI_ON);
  613. }
  614. static ssize_t force_bus_on_store(struct device *dev,
  615. struct device_attribute *attr,
  616. const char *buf, size_t count)
  617. {
  618. return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_AXI_ON);
  619. }
  620. static ssize_t force_rail_on_show(struct device *dev,
  621. struct device_attribute *attr, char *buf)
  622. {
  623. return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_POWER_ON);
  624. }
  625. static ssize_t force_rail_on_store(struct device *dev,
  626. struct device_attribute *attr,
  627. const char *buf, size_t count)
  628. {
  629. return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON);
  630. }
  631. static ssize_t force_no_nap_show(struct device *dev,
  632. struct device_attribute *attr, char *buf)
  633. {
  634. /* force_no_nap is deprecated, so return it as always disabled */
  635. return scnprintf(buf, PAGE_SIZE, "0\n");
  636. }
  637. static ssize_t force_no_nap_store(struct device *dev,
  638. struct device_attribute *attr,
  639. const char *buf, size_t count)
  640. {
  641. /* force_no_nap is deprecated, so return EOPNOTSUPP */
  642. return -EOPNOTSUPP;
  643. }
  644. static ssize_t bus_split_show(struct device *dev,
  645. struct device_attribute *attr, char *buf)
  646. {
  647. struct kgsl_device *device = dev_get_drvdata(dev);
  648. return scnprintf(buf, PAGE_SIZE, "%d\n",
  649. device->pwrctrl.bus_control);
  650. }
  651. static ssize_t bus_split_store(struct device *dev,
  652. struct device_attribute *attr,
  653. const char *buf, size_t count)
  654. {
  655. unsigned int val = 0;
  656. struct kgsl_device *device = dev_get_drvdata(dev);
  657. int ret;
  658. ret = kstrtou32(buf, 0, &val);
  659. if (ret)
  660. return ret;
  661. mutex_lock(&device->mutex);
  662. device->pwrctrl.bus_control = val ? true : false;
  663. mutex_unlock(&device->mutex);
  664. return count;
  665. }
  666. static ssize_t default_pwrlevel_show(struct device *dev,
  667. struct device_attribute *attr, char *buf)
  668. {
  669. struct kgsl_device *device = dev_get_drvdata(dev);
  670. return scnprintf(buf, PAGE_SIZE, "%d\n",
  671. device->pwrctrl.default_pwrlevel);
  672. }
  673. static ssize_t default_pwrlevel_store(struct device *dev,
  674. struct device_attribute *attr,
  675. const char *buf, size_t count)
  676. {
  677. struct kgsl_device *device = dev_get_drvdata(dev);
  678. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  679. struct kgsl_pwrscale *pwrscale = &device->pwrscale;
  680. int ret;
  681. unsigned int level = 0;
  682. ret = kstrtou32(buf, 0, &level);
  683. if (ret)
  684. return ret;
  685. if (level >= pwr->num_pwrlevels)
  686. return count;
  687. mutex_lock(&device->mutex);
  688. pwr->default_pwrlevel = level;
  689. pwrscale->gpu_profile.profile.initial_freq
  690. = pwr->pwrlevels[level].gpu_freq;
  691. mutex_unlock(&device->mutex);
  692. return count;
  693. }
  694. static ssize_t popp_show(struct device *dev,
  695. struct device_attribute *attr,
  696. char *buf)
  697. {
  698. /* POPP is deprecated, so return it as always disabled */
  699. return scnprintf(buf, PAGE_SIZE, "0\n");
  700. }
  701. static ssize_t _gpu_busy_show(struct kgsl_device *device,
  702. char *buf)
  703. {
  704. int ret;
  705. struct kgsl_clk_stats *stats = &device->pwrctrl.clk_stats;
  706. unsigned int busy_percent = 0;
  707. if (stats->total_old != 0)
  708. busy_percent = (stats->busy_old * 100) / stats->total_old;
  709. ret = scnprintf(buf, PAGE_SIZE, "%d %%\n", busy_percent);
  710. /* Reset the stats if GPU is OFF */
  711. if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
  712. stats->busy_old = 0;
  713. stats->total_old = 0;
  714. }
  715. return ret;
  716. }
  717. static ssize_t gpu_busy_percentage_show(struct device *dev,
  718. struct device_attribute *attr,
  719. char *buf)
  720. {
  721. struct kgsl_device *device = dev_get_drvdata(dev);
  722. return _gpu_busy_show(device, buf);
  723. }
  724. static ssize_t _min_clock_mhz_show(struct kgsl_device *device,
  725. char *buf)
  726. {
  727. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  728. return scnprintf(buf, PAGE_SIZE, "%d\n",
  729. pwr->pwrlevels[pwr->min_pwrlevel].gpu_freq / 1000000);
  730. }
  731. static ssize_t min_clock_mhz_show(struct device *dev,
  732. struct device_attribute *attr,
  733. char *buf)
  734. {
  735. struct kgsl_device *device = dev_get_drvdata(dev);
  736. return _min_clock_mhz_show(device, buf);
  737. }
  738. static ssize_t _min_clock_mhz_store(struct kgsl_device *device,
  739. const char *buf, size_t count)
  740. {
  741. int level, ret;
  742. unsigned int freq;
  743. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  744. ret = kstrtou32(buf, 0, &freq);
  745. if (ret)
  746. return ret;
  747. freq *= 1000000;
  748. level = _get_nearest_pwrlevel(pwr, freq);
  749. if (level >= 0)
  750. kgsl_pwrctrl_min_pwrlevel_set(device, level);
  751. return count;
  752. }
  753. static ssize_t min_clock_mhz_store(struct device *dev,
  754. struct device_attribute *attr,
  755. const char *buf, size_t count)
  756. {
  757. struct kgsl_device *device = dev_get_drvdata(dev);
  758. return _min_clock_mhz_store(device, buf, count);
  759. }
  760. static ssize_t _max_clock_mhz_show(struct kgsl_device *device, char *buf)
  761. {
  762. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  763. return scnprintf(buf, PAGE_SIZE, "%d\n",
  764. pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq / 1000000);
  765. }
  766. static ssize_t max_clock_mhz_show(struct device *dev,
  767. struct device_attribute *attr, char *buf)
  768. {
  769. struct kgsl_device *device = dev_get_drvdata(dev);
  770. return _max_clock_mhz_show(device, buf);
  771. }
  772. static ssize_t _max_clock_mhz_store(struct kgsl_device *device,
  773. const char *buf, size_t count)
  774. {
  775. u32 freq;
  776. int ret, level;
  777. ret = kstrtou32(buf, 0, &freq);
  778. if (ret)
  779. return ret;
  780. level = _get_nearest_pwrlevel(&device->pwrctrl, freq * 1000000);
  781. if (level < 0)
  782. return level;
  783. /*
  784. * You would think this would set max_pwrlevel but the legacy behavior
  785. * is that it set thermal_pwrlevel instead so we don't want to mess with
  786. * that.
  787. */
  788. ret = kgsl_pwrctrl_set_thermal_limit(device, level);
  789. if (ret)
  790. return ret;
  791. return count;
  792. }
  793. static ssize_t max_clock_mhz_store(struct device *dev,
  794. struct device_attribute *attr,
  795. const char *buf, size_t count)
  796. {
  797. struct kgsl_device *device = dev_get_drvdata(dev);
  798. return _max_clock_mhz_store(device, buf, count);
  799. }
  800. static ssize_t _clock_mhz_show(struct kgsl_device *device, char *buf)
  801. {
  802. return scnprintf(buf, PAGE_SIZE, "%ld\n",
  803. kgsl_pwrctrl_active_freq(&device->pwrctrl) / 1000000);
  804. }
  805. static ssize_t clock_mhz_show(struct device *dev,
  806. struct device_attribute *attr, char *buf)
  807. {
  808. struct kgsl_device *device = dev_get_drvdata(dev);
  809. return _clock_mhz_show(device, buf);
  810. }
  811. static ssize_t _freq_table_mhz_show(struct kgsl_device *device,
  812. char *buf)
  813. {
  814. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  815. int index, num_chars = 0;
  816. for (index = 0; index < pwr->num_pwrlevels; index++) {
  817. num_chars += scnprintf(buf + num_chars,
  818. PAGE_SIZE - num_chars - 1,
  819. "%d ", pwr->pwrlevels[index].gpu_freq / 1000000);
  820. /* One space for trailing null and another for the newline */
  821. if (num_chars >= PAGE_SIZE - 2)
  822. break;
  823. }
  824. buf[num_chars++] = '\n';
  825. return num_chars;
  826. }
  827. static ssize_t freq_table_mhz_show(struct device *dev,
  828. struct device_attribute *attr,
  829. char *buf)
  830. {
  831. struct kgsl_device *device = dev_get_drvdata(dev);
  832. return _freq_table_mhz_show(device, buf);
  833. }
  834. static ssize_t _gpu_tmu_show(struct kgsl_device *device,
  835. char *buf)
  836. {
  837. struct device *dev;
  838. struct thermal_zone_device *thermal_dev;
  839. int temperature = 0, max_temp = 0;
  840. const char *name;
  841. struct property *prop;
  842. dev = &device->pdev->dev;
  843. of_property_for_each_string(dev->of_node, "qcom,tzone-names", prop, name) {
  844. thermal_dev = thermal_zone_get_zone_by_name(name);
  845. if (IS_ERR(thermal_dev))
  846. continue;
  847. if (thermal_zone_get_temp(thermal_dev, &temperature))
  848. continue;
  849. max_temp = max(temperature, max_temp);
  850. }
  851. return scnprintf(buf, PAGE_SIZE, "%d\n",
  852. max_temp);
  853. }
  854. static ssize_t temp_show(struct device *dev,
  855. struct device_attribute *attr,
  856. char *buf)
  857. {
  858. struct kgsl_device *device = dev_get_drvdata(dev);
  859. return _gpu_tmu_show(device, buf);
  860. }
  861. static ssize_t pwrscale_store(struct device *dev,
  862. struct device_attribute *attr,
  863. const char *buf, size_t count)
  864. {
  865. struct kgsl_device *device = dev_get_drvdata(dev);
  866. int ret;
  867. unsigned int enable = 0;
  868. ret = kstrtou32(buf, 0, &enable);
  869. if (ret)
  870. return ret;
  871. mutex_lock(&device->mutex);
  872. if (enable)
  873. kgsl_pwrscale_enable(device);
  874. else
  875. kgsl_pwrscale_disable(device, false);
  876. mutex_unlock(&device->mutex);
  877. return count;
  878. }
  879. static ssize_t pwrscale_show(struct device *dev,
  880. struct device_attribute *attr, char *buf)
  881. {
  882. struct kgsl_device *device = dev_get_drvdata(dev);
  883. struct kgsl_pwrscale *psc = &device->pwrscale;
  884. return scnprintf(buf, PAGE_SIZE, "%u\n", psc->enabled);
  885. }
  886. static DEVICE_ATTR_RO(temp);
  887. static DEVICE_ATTR_RW(gpuclk);
  888. static DEVICE_ATTR_RW(max_gpuclk);
  889. static DEVICE_ATTR_RW(idle_timer);
  890. static DEVICE_ATTR_RW(minbw_timer);
  891. static DEVICE_ATTR_RO(gpubusy);
  892. static DEVICE_ATTR_RO(gpu_available_frequencies);
  893. static DEVICE_ATTR_RO(gpu_clock_stats);
  894. static DEVICE_ATTR_RW(max_pwrlevel);
  895. static DEVICE_ATTR_RW(min_pwrlevel);
  896. static DEVICE_ATTR_RW(thermal_pwrlevel);
  897. static DEVICE_ATTR_RO(num_pwrlevels);
  898. static DEVICE_ATTR_RO(reset_count);
  899. static DEVICE_ATTR_RW(force_clk_on);
  900. static DEVICE_ATTR_RW(force_bus_on);
  901. static DEVICE_ATTR_RW(force_rail_on);
  902. static DEVICE_ATTR_RW(bus_split);
  903. static DEVICE_ATTR_RW(default_pwrlevel);
  904. static DEVICE_ATTR_RO(popp);
  905. static DEVICE_ATTR_RW(force_no_nap);
  906. static DEVICE_ATTR_RO(gpu_busy_percentage);
  907. static DEVICE_ATTR_RW(min_clock_mhz);
  908. static DEVICE_ATTR_RW(max_clock_mhz);
  909. static DEVICE_ATTR_RO(clock_mhz);
  910. static DEVICE_ATTR_RO(freq_table_mhz);
  911. static DEVICE_ATTR_RW(pwrscale);
  912. static const struct attribute *pwrctrl_attr_list[] = {
  913. &dev_attr_gpuclk.attr,
  914. &dev_attr_max_gpuclk.attr,
  915. &dev_attr_idle_timer.attr,
  916. &dev_attr_minbw_timer.attr,
  917. &dev_attr_gpubusy.attr,
  918. &dev_attr_gpu_available_frequencies.attr,
  919. &dev_attr_gpu_clock_stats.attr,
  920. &dev_attr_max_pwrlevel.attr,
  921. &dev_attr_min_pwrlevel.attr,
  922. &dev_attr_thermal_pwrlevel.attr,
  923. &dev_attr_num_pwrlevels.attr,
  924. &dev_attr_reset_count.attr,
  925. &dev_attr_force_clk_on.attr,
  926. &dev_attr_force_bus_on.attr,
  927. &dev_attr_force_rail_on.attr,
  928. &dev_attr_force_no_nap.attr,
  929. &dev_attr_bus_split.attr,
  930. &dev_attr_default_pwrlevel.attr,
  931. &dev_attr_popp.attr,
  932. &dev_attr_gpu_busy_percentage.attr,
  933. &dev_attr_min_clock_mhz.attr,
  934. &dev_attr_max_clock_mhz.attr,
  935. &dev_attr_clock_mhz.attr,
  936. &dev_attr_freq_table_mhz.attr,
  937. &dev_attr_temp.attr,
  938. &dev_attr_pwrscale.attr,
  939. NULL,
  940. };
  941. static GPU_SYSFS_ATTR(gpu_busy, 0444, _gpu_busy_show, NULL);
  942. static GPU_SYSFS_ATTR(gpu_min_clock, 0644, _min_clock_mhz_show,
  943. _min_clock_mhz_store);
  944. static GPU_SYSFS_ATTR(gpu_max_clock, 0644, _max_clock_mhz_show,
  945. _max_clock_mhz_store);
  946. static GPU_SYSFS_ATTR(gpu_clock, 0444, _clock_mhz_show, NULL);
  947. static GPU_SYSFS_ATTR(gpu_freq_table, 0444, _freq_table_mhz_show, NULL);
  948. static GPU_SYSFS_ATTR(gpu_tmu, 0444, _gpu_tmu_show, NULL);
  949. static const struct attribute *gpu_sysfs_attr_list[] = {
  950. &gpu_sysfs_attr_gpu_busy.attr,
  951. &gpu_sysfs_attr_gpu_min_clock.attr,
  952. &gpu_sysfs_attr_gpu_max_clock.attr,
  953. &gpu_sysfs_attr_gpu_clock.attr,
  954. &gpu_sysfs_attr_gpu_freq_table.attr,
  955. &gpu_sysfs_attr_gpu_tmu.attr,
  956. NULL,
  957. };
  958. int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
  959. {
  960. int ret;
  961. ret = sysfs_create_files(&device->dev->kobj, pwrctrl_attr_list);
  962. if (ret)
  963. return ret;
  964. if (!device->gpu_sysfs_kobj.state_in_sysfs)
  965. return 0;
  966. return sysfs_create_files(&device->gpu_sysfs_kobj, gpu_sysfs_attr_list);
  967. }
  968. /*
  969. * Track the amount of time the gpu is on vs the total system time.
  970. * Regularly update the percentage of busy time displayed by sysfs.
  971. */
  972. void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy)
  973. {
  974. struct kgsl_clk_stats *stats = &device->pwrctrl.clk_stats;
  975. stats->total += time;
  976. stats->busy += busy;
  977. if (stats->total < UPDATE_BUSY_VAL)
  978. return;
  979. /* Update the output regularly and reset the counters. */
  980. stats->total_old = stats->total;
  981. stats->busy_old = stats->busy;
  982. stats->total = 0;
  983. stats->busy = 0;
  984. trace_kgsl_gpubusy(device, stats->busy_old, stats->total_old);
  985. }
  986. static void kgsl_pwrctrl_clk(struct kgsl_device *device, bool state,
  987. int requested_state)
  988. {
  989. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  990. int i = 0;
  991. if (gmu_core_gpmu_isenabled(device))
  992. return;
  993. if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
  994. return;
  995. if (!state) {
  996. if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
  997. &pwr->power_flags)) {
  998. trace_kgsl_clk(device, state,
  999. kgsl_pwrctrl_active_freq(pwr));
  1000. /* Disable gpu-bimc-interface clocks */
  1001. if (pwr->gpu_bimc_int_clk &&
  1002. pwr->gpu_bimc_interface_enabled) {
  1003. clk_disable_unprepare(pwr->gpu_bimc_int_clk);
  1004. pwr->gpu_bimc_interface_enabled = false;
  1005. }
  1006. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  1007. clk_disable(pwr->grp_clks[i]);
  1008. /* High latency clock maintenance. */
  1009. if (pwr->pwrlevels[0].gpu_freq > 0) {
  1010. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  1011. clk_unprepare(pwr->grp_clks[i]);
  1012. device->ftbl->gpu_clock_set(device,
  1013. pwr->num_pwrlevels - 1);
  1014. _isense_clk_set_rate(pwr,
  1015. pwr->num_pwrlevels - 1);
  1016. }
  1017. /* Turn off the IOMMU clocks */
  1018. kgsl_mmu_disable_clk(&device->mmu);
  1019. } else if (requested_state == KGSL_STATE_SLUMBER) {
  1020. /* High latency clock maintenance. */
  1021. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  1022. clk_unprepare(pwr->grp_clks[i]);
  1023. if ((pwr->pwrlevels[0].gpu_freq > 0)) {
  1024. device->ftbl->gpu_clock_set(device,
  1025. pwr->num_pwrlevels - 1);
  1026. _isense_clk_set_rate(pwr,
  1027. pwr->num_pwrlevels - 1);
  1028. }
  1029. }
  1030. } else {
  1031. if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
  1032. &pwr->power_flags)) {
  1033. trace_kgsl_clk(device, state,
  1034. kgsl_pwrctrl_active_freq(pwr));
  1035. if (pwr->pwrlevels[0].gpu_freq > 0) {
  1036. device->ftbl->gpu_clock_set(device,
  1037. pwr->active_pwrlevel);
  1038. _isense_clk_set_rate(pwr,
  1039. pwr->active_pwrlevel);
  1040. }
  1041. for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
  1042. _gpu_clk_prepare_enable(device,
  1043. pwr->grp_clks[i], clocks[i]);
  1044. /* Enable the gpu-bimc-interface clocks */
  1045. if (pwr->gpu_bimc_int_clk) {
  1046. if (pwr->active_pwrlevel == 0 &&
  1047. !pwr->gpu_bimc_interface_enabled) {
  1048. kgsl_pwrctrl_clk_set_rate(
  1049. pwr->gpu_bimc_int_clk,
  1050. pwr->gpu_bimc_int_clk_freq,
  1051. "bimc_gpu_clk");
  1052. _bimc_clk_prepare_enable(device,
  1053. pwr->gpu_bimc_int_clk,
  1054. "bimc_gpu_clk");
  1055. pwr->gpu_bimc_interface_enabled = true;
  1056. }
  1057. }
  1058. /* Turn on the IOMMU clocks */
  1059. kgsl_mmu_enable_clk(&device->mmu);
  1060. }
  1061. }
  1062. }
  1063. int kgsl_pwrctrl_axi(struct kgsl_device *device, bool state)
  1064. {
  1065. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1066. if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->ctrl_flags))
  1067. return 0;
  1068. if (!state) {
  1069. if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
  1070. &pwr->power_flags)) {
  1071. trace_kgsl_bus(device, state);
  1072. return kgsl_bus_update(device, KGSL_BUS_VOTE_OFF);
  1073. }
  1074. } else {
  1075. if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
  1076. &pwr->power_flags)) {
  1077. trace_kgsl_bus(device, state);
  1078. return kgsl_bus_update(device, KGSL_BUS_VOTE_ON);
  1079. }
  1080. }
  1081. return 0;
  1082. }
  1083. int kgsl_pwrctrl_enable_cx_gdsc(struct kgsl_device *device)
  1084. {
  1085. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1086. struct regulator *regulator = pwr->cx_gdsc;
  1087. int ret;
  1088. if (IS_ERR_OR_NULL(regulator))
  1089. return 0;
  1090. ret = wait_for_completion_timeout(&pwr->cx_gdsc_gate, msecs_to_jiffies(5000));
  1091. if (!ret) {
  1092. dev_err(device->dev, "GPU CX wait timeout. Dumping CX votes:\n");
  1093. /* Dump the cx regulator consumer list */
  1094. qcom_clk_dump(NULL, regulator, false);
  1095. }
  1096. ret = regulator_enable(regulator);
  1097. if (ret)
  1098. dev_err(device->dev, "Failed to enable CX regulator: %d\n", ret);
  1099. kgsl_mmu_send_tlb_hint(&device->mmu, false);
  1100. pwr->cx_gdsc_wait = false;
  1101. return ret;
  1102. }
  1103. static int kgsl_pwtctrl_enable_gx_gdsc(struct kgsl_device *device)
  1104. {
  1105. struct regulator *regulator = device->pwrctrl.gx_gdsc;
  1106. int ret;
  1107. if (IS_ERR_OR_NULL(regulator))
  1108. return 0;
  1109. ret = regulator_enable(regulator);
  1110. if (ret)
  1111. dev_err(device->dev, "Failed to enable GX regulator: %d\n", ret);
  1112. return ret;
  1113. }
  1114. void kgsl_pwrctrl_disable_cx_gdsc(struct kgsl_device *device)
  1115. {
  1116. struct regulator *regulator = device->pwrctrl.cx_gdsc;
  1117. if (IS_ERR_OR_NULL(regulator))
  1118. return;
  1119. kgsl_mmu_send_tlb_hint(&device->mmu, true);
  1120. reinit_completion(&device->pwrctrl.cx_gdsc_gate);
  1121. device->pwrctrl.cx_gdsc_wait = true;
  1122. regulator_disable(regulator);
  1123. }
  1124. static void kgsl_pwrctrl_disable_gx_gdsc(struct kgsl_device *device)
  1125. {
  1126. struct regulator *regulator = device->pwrctrl.gx_gdsc;
  1127. if (IS_ERR_OR_NULL(regulator))
  1128. return;
  1129. if (!kgsl_regulator_disable_wait(regulator, 200))
  1130. dev_err(device->dev, "Regulator vdd is stuck on\n");
  1131. }
  1132. static int enable_regulators(struct kgsl_device *device)
  1133. {
  1134. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1135. int ret;
  1136. if (test_and_set_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->power_flags))
  1137. return 0;
  1138. ret = kgsl_pwrctrl_enable_cx_gdsc(device);
  1139. if (!ret) {
  1140. /* Set parent in retention voltage to power up vdd supply */
  1141. ret = kgsl_regulator_set_voltage(device->dev,
  1142. pwr->gx_gdsc_parent,
  1143. pwr->gx_gdsc_parent_min_corner);
  1144. if (!ret)
  1145. ret = kgsl_pwtctrl_enable_gx_gdsc(device);
  1146. }
  1147. if (ret) {
  1148. clear_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->power_flags);
  1149. return ret;
  1150. }
  1151. trace_kgsl_rail(device, KGSL_PWRFLAGS_POWER_ON);
  1152. return 0;
  1153. }
  1154. int kgsl_pwrctrl_probe_regulators(struct kgsl_device *device,
  1155. struct platform_device *pdev)
  1156. {
  1157. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1158. pwr->cx_gdsc = devm_regulator_get(&pdev->dev, "vddcx");
  1159. if (IS_ERR(pwr->cx_gdsc)) {
  1160. if (PTR_ERR(pwr->cx_gdsc) != -EPROBE_DEFER)
  1161. dev_err(&pdev->dev, "Couldn't get the vddcx gdsc\n");
  1162. return PTR_ERR(pwr->cx_gdsc);
  1163. }
  1164. pwr->gx_gdsc = devm_regulator_get(&pdev->dev, "vdd");
  1165. if (IS_ERR(pwr->gx_gdsc)) {
  1166. if (PTR_ERR(pwr->gx_gdsc) != -EPROBE_DEFER)
  1167. dev_err(&pdev->dev, "Couldn't get the vdd gdsc\n");
  1168. return PTR_ERR(pwr->gx_gdsc);
  1169. }
  1170. return 0;
  1171. }
  1172. static int kgsl_cx_gdsc_event(struct notifier_block *nb,
  1173. unsigned long event, void *data)
  1174. {
  1175. struct kgsl_pwrctrl *pwr = container_of(nb, struct kgsl_pwrctrl, cx_gdsc_nb);
  1176. struct kgsl_device *device = container_of(pwr, struct kgsl_device, pwrctrl);
  1177. u32 val;
  1178. if (!(event & REGULATOR_EVENT_DISABLE) || !pwr->cx_gdsc_wait)
  1179. return 0;
  1180. if (pwr->cx_cfg_gdsc_offset) {
  1181. if (kgsl_regmap_read_poll_timeout(&device->regmap, pwr->cx_cfg_gdsc_offset,
  1182. val, (val & BIT(15)), 100, 100 * 1000))
  1183. dev_err(device->dev, "GPU CX wait timeout.\n");
  1184. }
  1185. pwr->cx_gdsc_wait = false;
  1186. complete_all(&pwr->cx_gdsc_gate);
  1187. return 0;
  1188. }
  1189. int kgsl_register_gdsc_notifier(struct kgsl_device *device)
  1190. {
  1191. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1192. if (!IS_ERR_OR_NULL(pwr->cx_gdsc)) {
  1193. pwr->cx_gdsc_nb.notifier_call = kgsl_cx_gdsc_event;
  1194. return devm_regulator_register_notifier(pwr->cx_gdsc, &pwr->cx_gdsc_nb);
  1195. }
  1196. return 0;
  1197. }
  1198. static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, bool state)
  1199. {
  1200. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1201. int status = 0;
  1202. if (gmu_core_gpmu_isenabled(device))
  1203. return 0;
  1204. /*
  1205. * Disabling the regulator means also disabling dependent clocks.
  1206. * Hence don't disable it if force clock ON is set.
  1207. */
  1208. if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags) ||
  1209. test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
  1210. return 0;
  1211. if (!state) {
  1212. if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
  1213. &pwr->power_flags)) {
  1214. kgsl_mmu_send_tlb_hint(&device->mmu, true);
  1215. trace_kgsl_rail(device, state);
  1216. /* Set the parent in retention voltage to disable CPR interrupts */
  1217. kgsl_regulator_set_voltage(device->dev, pwr->gx_gdsc_parent,
  1218. pwr->gx_gdsc_parent_min_corner);
  1219. kgsl_pwrctrl_disable_gx_gdsc(device);
  1220. /* Remove the vote for the vdd parent supply */
  1221. kgsl_regulator_set_voltage(device->dev, pwr->gx_gdsc_parent, 0);
  1222. kgsl_pwrctrl_disable_cx_gdsc(device);
  1223. }
  1224. } else {
  1225. status = enable_regulators(device);
  1226. kgsl_mmu_send_tlb_hint(&device->mmu, false);
  1227. }
  1228. return status;
  1229. }
  1230. void kgsl_pwrctrl_irq(struct kgsl_device *device, bool state)
  1231. {
  1232. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1233. if (!(device->freq_limiter_intr_num || pwr->interrupt_num))
  1234. return;
  1235. if (state) {
  1236. if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
  1237. &pwr->power_flags)) {
  1238. trace_kgsl_irq(device, state);
  1239. if (pwr->interrupt_num > 0)
  1240. enable_irq(pwr->interrupt_num);
  1241. if (device->freq_limiter_intr_num > 0)
  1242. enable_irq(device->freq_limiter_intr_num);
  1243. }
  1244. } else {
  1245. if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
  1246. &pwr->power_flags)) {
  1247. trace_kgsl_irq(device, state);
  1248. if (device->freq_limiter_intr_num > 0)
  1249. disable_irq(device->freq_limiter_intr_num);
  1250. if (in_interrupt() && (pwr->interrupt_num > 0))
  1251. disable_irq_nosync(pwr->interrupt_num);
  1252. else if (pwr->interrupt_num > 0)
  1253. disable_irq(pwr->interrupt_num);
  1254. }
  1255. }
  1256. }
  1257. static int _get_clocks(struct kgsl_device *device)
  1258. {
  1259. struct device *dev = &device->pdev->dev;
  1260. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1261. const char *name;
  1262. struct property *prop;
  1263. pwr->isense_clk_indx = 0;
  1264. of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
  1265. int i;
  1266. for (i = 0; i < KGSL_MAX_CLKS; i++) {
  1267. if (pwr->grp_clks[i] || strcmp(clocks[i], name))
  1268. continue;
  1269. /* apb_pclk should only be enabled if QCOM_KGSL_QDSS_STM is enabled */
  1270. if (!strcmp(name, "apb_pclk") && !IS_ENABLED(CONFIG_QCOM_KGSL_QDSS_STM))
  1271. continue;
  1272. pwr->grp_clks[i] = devm_clk_get(dev, name);
  1273. if (IS_ERR(pwr->grp_clks[i])) {
  1274. int ret = PTR_ERR(pwr->grp_clks[i]);
  1275. dev_err(dev, "Couldn't get clock: %s (%d)\n",
  1276. name, ret);
  1277. pwr->grp_clks[i] = NULL;
  1278. return ret;
  1279. }
  1280. if (!strcmp(name, "isense_clk"))
  1281. pwr->isense_clk_indx = i;
  1282. break;
  1283. }
  1284. }
  1285. if (pwr->isense_clk_indx && of_property_read_u32(dev->of_node,
  1286. "qcom,isense-clk-on-level", &pwr->isense_clk_on_level)) {
  1287. dev_err(dev, "Couldn't get isense clock on level\n");
  1288. return -ENXIO;
  1289. }
  1290. return 0;
  1291. }
  1292. static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level)
  1293. {
  1294. int rate;
  1295. if (!pwr->isense_clk_indx)
  1296. return -EINVAL;
  1297. rate = clk_round_rate(pwr->grp_clks[pwr->isense_clk_indx],
  1298. level > pwr->isense_clk_on_level ?
  1299. KGSL_XO_CLK_FREQ : KGSL_ISENSE_CLK_FREQ);
  1300. return kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx],
  1301. rate, clocks[pwr->isense_clk_indx]);
  1302. }
  1303. /*
  1304. * _gpu_clk_prepare_enable - Enable the specified GPU clock
  1305. * Try once to enable it and then BUG() for debug
  1306. */
  1307. static void _gpu_clk_prepare_enable(struct kgsl_device *device,
  1308. struct clk *clk, const char *name)
  1309. {
  1310. int ret = clk_prepare_enable(clk);
  1311. if (ret)
  1312. dev_err(device->dev, "GPU Clock %s enable error:%d\n", name, ret);
  1313. }
  1314. /*
  1315. * _bimc_clk_prepare_enable - Enable the specified GPU clock
  1316. * Try once to enable it and then BUG() for debug
  1317. */
  1318. static void _bimc_clk_prepare_enable(struct kgsl_device *device,
  1319. struct clk *clk, const char *name)
  1320. {
  1321. int ret = clk_prepare_enable(clk);
  1322. /* Failure is fatal so BUG() to facilitate debug */
  1323. if (ret)
  1324. dev_err(device->dev, "GPU clock %s enable error:%d\n",
  1325. name, ret);
  1326. }
  1327. static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq,
  1328. const char *name)
  1329. {
  1330. int ret = clk_set_rate(grp_clk, freq);
  1331. WARN(ret, "%s set freq %d failed:%d\n", name, freq, ret);
  1332. return ret;
  1333. }
  1334. int kgsl_pwrctrl_init(struct kgsl_device *device)
  1335. {
  1336. int i, result, freq;
  1337. struct platform_device *pdev = device->pdev;
  1338. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1339. result = _get_clocks(device);
  1340. if (result)
  1341. return result;
  1342. /* Make sure we have a source clk for freq setting */
  1343. if (pwr->grp_clks[0] == NULL)
  1344. pwr->grp_clks[0] = pwr->grp_clks[1];
  1345. /* Getting gfx-bimc-interface-clk frequency */
  1346. if (!of_property_read_u32(pdev->dev.of_node,
  1347. "qcom,gpu-bimc-interface-clk-freq",
  1348. &pwr->gpu_bimc_int_clk_freq))
  1349. pwr->gpu_bimc_int_clk = devm_clk_get(&pdev->dev,
  1350. "bimc_gpu_clk");
  1351. if (pwr->num_pwrlevels == 0) {
  1352. dev_err(device->dev, "No power levels are defined\n");
  1353. return -EINVAL;
  1354. }
  1355. init_waitqueue_head(&device->active_cnt_wq);
  1356. /* Initialize the thermal clock constraints */
  1357. pwr->thermal_pwrlevel = 0;
  1358. pwr->thermal_pwrlevel_floor = pwr->num_pwrlevels - 1;
  1359. result = dev_pm_qos_add_request(&pdev->dev, &pwr->sysfs_thermal_req,
  1360. DEV_PM_QOS_MAX_FREQUENCY,
  1361. PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
  1362. if (result < 0)
  1363. dev_err(device->dev, "PM QoS thermal request failed:%d\n", result);
  1364. for (i = 0; i < pwr->num_pwrlevels; i++) {
  1365. freq = pwr->pwrlevels[i].gpu_freq;
  1366. if (freq > 0)
  1367. freq = clk_round_rate(pwr->grp_clks[0], freq);
  1368. if (freq >= pwr->pwrlevels[i].gpu_freq)
  1369. pwr->pwrlevels[i].gpu_freq = freq;
  1370. }
  1371. clk_set_rate(pwr->grp_clks[0],
  1372. pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
  1373. freq = clk_round_rate(pwr->grp_clks[6], KGSL_XO_CLK_FREQ);
  1374. if (freq > 0)
  1375. kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6],
  1376. freq, clocks[6]);
  1377. _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1);
  1378. if (of_property_read_bool(pdev->dev.of_node, "vddcx-supply"))
  1379. pwr->cx_gdsc = devm_regulator_get(&pdev->dev, "vddcx");
  1380. if (of_property_read_bool(pdev->dev.of_node, "vdd-supply"))
  1381. pwr->gx_gdsc = devm_regulator_get(&pdev->dev, "vdd");
  1382. if (of_property_read_bool(pdev->dev.of_node, "vdd-parent-supply")) {
  1383. pwr->gx_gdsc_parent = devm_regulator_get(&pdev->dev,
  1384. "vdd-parent");
  1385. if (IS_ERR(pwr->gx_gdsc_parent)) {
  1386. dev_err(device->dev,
  1387. "Failed to get vdd-parent regulator:%ld\n",
  1388. PTR_ERR(pwr->gx_gdsc_parent));
  1389. return -ENODEV;
  1390. }
  1391. if (of_property_read_u32(pdev->dev.of_node,
  1392. "vdd-parent-min-corner",
  1393. &pwr->gx_gdsc_parent_min_corner)) {
  1394. dev_err(device->dev,
  1395. "vdd-parent-min-corner not found\n");
  1396. return -ENODEV;
  1397. }
  1398. }
  1399. init_completion(&pwr->cx_gdsc_gate);
  1400. complete_all(&pwr->cx_gdsc_gate);
  1401. result = kgsl_register_gdsc_notifier(device);
  1402. if (result) {
  1403. dev_err(&pdev->dev, "Failed to register gdsc notifier: %d\n", result);
  1404. return result;
  1405. }
  1406. pwr->power_flags = 0;
  1407. pm_runtime_enable(&pdev->dev);
  1408. return 0;
  1409. }
  1410. void kgsl_pwrctrl_close(struct kgsl_device *device)
  1411. {
  1412. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1413. pwr->power_flags = 0;
  1414. if (dev_pm_qos_request_active(&pwr->sysfs_thermal_req))
  1415. dev_pm_qos_remove_request(&pwr->sysfs_thermal_req);
  1416. pm_runtime_disable(&device->pdev->dev);
  1417. }
  1418. void kgsl_idle_check(struct work_struct *work)
  1419. {
  1420. struct kgsl_device *device = container_of(work, struct kgsl_device,
  1421. idle_check_ws);
  1422. int ret = 0;
  1423. unsigned int requested_state;
  1424. mutex_lock(&device->mutex);
  1425. /*
  1426. * After scheduling idle work for transitioning to SLUMBER, it's
  1427. * possbile that requested state can change to NONE if any new workload
  1428. * comes before kgsl_idle_check is executed or it gets the device mutex.
  1429. * In such case, no need to change state to NONE.
  1430. */
  1431. if (device->requested_state == KGSL_STATE_NONE) {
  1432. mutex_unlock(&device->mutex);
  1433. return;
  1434. }
  1435. requested_state = device->requested_state;
  1436. if (device->state == KGSL_STATE_ACTIVE) {
  1437. if (!atomic_read(&device->active_cnt)) {
  1438. spin_lock(&device->submit_lock);
  1439. if (device->submit_now) {
  1440. spin_unlock(&device->submit_lock);
  1441. goto done;
  1442. }
  1443. /* Don't allow GPU inline submission in SLUMBER */
  1444. if (requested_state == KGSL_STATE_SLUMBER)
  1445. device->skip_inline_submit = true;
  1446. spin_unlock(&device->submit_lock);
  1447. ret = kgsl_pwrctrl_change_state(device,
  1448. device->requested_state);
  1449. if (ret == -EBUSY) {
  1450. if (requested_state == KGSL_STATE_SLUMBER) {
  1451. spin_lock(&device->submit_lock);
  1452. device->skip_inline_submit = false;
  1453. spin_unlock(&device->submit_lock);
  1454. }
  1455. /*
  1456. * If the GPU is currently busy, restore
  1457. * the requested state and reschedule
  1458. * idle work.
  1459. */
  1460. kgsl_pwrctrl_request_state(device,
  1461. requested_state);
  1462. kgsl_schedule_work(&device->idle_check_ws);
  1463. }
  1464. }
  1465. done:
  1466. if (!ret)
  1467. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1468. if (device->state == KGSL_STATE_ACTIVE)
  1469. kgsl_start_idle_timer(device);
  1470. }
  1471. kgsl_pwrscale_update(device);
  1472. mutex_unlock(&device->mutex);
  1473. }
  1474. void kgsl_timer(struct timer_list *t)
  1475. {
  1476. struct kgsl_device *device = from_timer(device, t, idle_timer);
  1477. if (device->requested_state != KGSL_STATE_SUSPEND) {
  1478. kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
  1479. /* Have work run in a non-interrupt context. */
  1480. kgsl_schedule_work(&device->idle_check_ws);
  1481. }
  1482. }
  1483. static bool kgsl_pwrctrl_isenabled(struct kgsl_device *device)
  1484. {
  1485. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1486. return ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) != 0) &&
  1487. (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags) != 0));
  1488. }
  1489. /**
  1490. * kgsl_pre_hwaccess - Enforce preconditions for touching registers
  1491. * @device: The device
  1492. *
  1493. * This function ensures that the correct lock is held and that the GPU
  1494. * clock is on immediately before a register is read or written. Note
  1495. * that this function does not check active_cnt because the registers
  1496. * must be accessed during device start and stop, when the active_cnt
  1497. * may legitimately be 0.
  1498. */
  1499. void kgsl_pre_hwaccess(struct kgsl_device *device)
  1500. {
  1501. /* In order to touch a register you must hold the device mutex */
  1502. WARN_ON(!mutex_is_locked(&device->mutex));
  1503. /*
  1504. * A register access without device power will cause a fatal timeout.
  1505. * This is not valid for targets with a GMU.
  1506. */
  1507. if (!gmu_core_gpmu_isenabled(device))
  1508. WARN_ON(!kgsl_pwrctrl_isenabled(device));
  1509. }
  1510. static int kgsl_pwrctrl_enable(struct kgsl_device *device)
  1511. {
  1512. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1513. int level, status;
  1514. level = pwr->default_pwrlevel;
  1515. kgsl_pwrctrl_pwrlevel_change(device, level);
  1516. /* Order pwrrail/clk sequence based upon platform */
  1517. status = kgsl_pwrctrl_pwrrail(device, true);
  1518. if (status)
  1519. return status;
  1520. kgsl_pwrctrl_clk(device, true, KGSL_STATE_ACTIVE);
  1521. kgsl_pwrctrl_axi(device, true);
  1522. return device->ftbl->regulator_enable(device);
  1523. }
  1524. void kgsl_pwrctrl_clear_l3_vote(struct kgsl_device *device)
  1525. {
  1526. int status;
  1527. struct dcvs_freq freq = {0};
  1528. if (!device->num_l3_pwrlevels)
  1529. return;
  1530. freq.hw_type = DCVS_L3;
  1531. status = qcom_dcvs_update_votes(KGSL_L3_DEVICE, &freq, 1,
  1532. DCVS_SLOW_PATH);
  1533. if (!status)
  1534. device->cur_l3_pwrlevel = 0;
  1535. else
  1536. dev_err(device->dev, "Could not clear l3_vote: %d\n",
  1537. status);
  1538. }
  1539. static void kgsl_pwrctrl_disable(struct kgsl_device *device)
  1540. {
  1541. kgsl_pwrctrl_clear_l3_vote(device);
  1542. /* Order pwrrail/clk sequence based upon platform */
  1543. device->ftbl->regulator_disable(device);
  1544. kgsl_pwrctrl_axi(device, false);
  1545. kgsl_pwrctrl_clk(device, false, KGSL_STATE_SLUMBER);
  1546. kgsl_pwrctrl_pwrrail(device, false);
  1547. }
  1548. /**
  1549. * _init() - Get the GPU ready to start, but don't turn anything on
  1550. * @device - Pointer to the kgsl_device struct
  1551. */
  1552. static int _init(struct kgsl_device *device)
  1553. {
  1554. int status = 0;
  1555. switch (device->state) {
  1556. case KGSL_STATE_ACTIVE:
  1557. kgsl_pwrctrl_irq(device, false);
  1558. del_timer_sync(&device->idle_timer);
  1559. device->ftbl->stop(device);
  1560. fallthrough;
  1561. case KGSL_STATE_AWARE:
  1562. kgsl_pwrctrl_disable(device);
  1563. fallthrough;
  1564. case KGSL_STATE_SLUMBER:
  1565. fallthrough;
  1566. case KGSL_STATE_NONE:
  1567. kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
  1568. }
  1569. return status;
  1570. }
  1571. /**
  1572. * _wake() - Power up the GPU from a slumber state
  1573. * @device - Pointer to the kgsl_device struct
  1574. *
  1575. * Resume the GPU from a lower power state to ACTIVE.
  1576. */
  1577. static int _wake(struct kgsl_device *device)
  1578. {
  1579. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1580. int status = 0;
  1581. switch (device->state) {
  1582. case KGSL_STATE_SUSPEND:
  1583. complete_all(&device->hwaccess_gate);
  1584. /* Call the GPU specific resume function */
  1585. device->ftbl->resume(device);
  1586. fallthrough;
  1587. case KGSL_STATE_SLUMBER:
  1588. status = device->ftbl->start(device,
  1589. device->pwrctrl.superfast);
  1590. device->pwrctrl.superfast = false;
  1591. if (status) {
  1592. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1593. dev_err(device->dev, "start failed %d\n", status);
  1594. break;
  1595. }
  1596. kgsl_pwrctrl_axi(device, true);
  1597. kgsl_pwrscale_wake(device);
  1598. kgsl_pwrctrl_irq(device, true);
  1599. trace_gpu_frequency(
  1600. pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq/1000, 0);
  1601. kgsl_bus_update(device, KGSL_BUS_VOTE_ON);
  1602. /* Turn on the core clocks */
  1603. kgsl_pwrctrl_clk(device, true, KGSL_STATE_ACTIVE);
  1604. device->ftbl->deassert_gbif_halt(device);
  1605. pwr->last_stat_updated = ktime_get();
  1606. /*
  1607. * No need to turn on/off irq here as it no longer affects
  1608. * power collapse
  1609. */
  1610. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  1611. /* All settings for power level transitions are complete*/
  1612. pwr->previous_pwrlevel = pwr->active_pwrlevel;
  1613. kgsl_start_idle_timer(device);
  1614. break;
  1615. case KGSL_STATE_AWARE:
  1616. /* Enable state before turning on irq */
  1617. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  1618. kgsl_pwrctrl_irq(device, true);
  1619. kgsl_start_idle_timer(device);
  1620. break;
  1621. default:
  1622. dev_warn(device->dev, "unhandled state %s\n",
  1623. kgsl_pwrstate_to_str(device->state));
  1624. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1625. status = -EINVAL;
  1626. break;
  1627. }
  1628. return status;
  1629. }
  1630. /*
  1631. * _aware() - Put device into AWARE
  1632. * @device: Device pointer
  1633. *
  1634. * The GPU should be available for register reads/writes and able
  1635. * to communicate with the rest of the system. However disable all
  1636. * paths that allow a switch to an interrupt context (interrupts &
  1637. * timers).
  1638. * Return 0 on success else error code
  1639. */
  1640. static int
  1641. _aware(struct kgsl_device *device)
  1642. {
  1643. int status = 0;
  1644. switch (device->state) {
  1645. case KGSL_STATE_INIT:
  1646. status = kgsl_pwrctrl_enable(device);
  1647. break;
  1648. case KGSL_STATE_ACTIVE:
  1649. kgsl_pwrctrl_irq(device, false);
  1650. del_timer_sync(&device->idle_timer);
  1651. break;
  1652. case KGSL_STATE_SLUMBER:
  1653. status = kgsl_pwrctrl_enable(device);
  1654. break;
  1655. default:
  1656. status = -EINVAL;
  1657. }
  1658. if (!status)
  1659. kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE);
  1660. return status;
  1661. }
  1662. static int
  1663. _slumber(struct kgsl_device *device)
  1664. {
  1665. int status = 0;
  1666. switch (device->state) {
  1667. case KGSL_STATE_ACTIVE:
  1668. if (!device->ftbl->is_hw_collapsible(device)) {
  1669. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1670. return -EBUSY;
  1671. }
  1672. del_timer_sync(&device->idle_timer);
  1673. kgsl_pwrctrl_irq(device, false);
  1674. /* make sure power is on to stop the device*/
  1675. status = kgsl_pwrctrl_enable(device);
  1676. device->ftbl->suspend_context(device);
  1677. device->ftbl->stop(device);
  1678. kgsl_pwrctrl_disable(device);
  1679. kgsl_pwrscale_sleep(device);
  1680. trace_gpu_frequency(0, 0);
  1681. kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
  1682. break;
  1683. case KGSL_STATE_SUSPEND:
  1684. complete_all(&device->hwaccess_gate);
  1685. device->ftbl->resume(device);
  1686. kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
  1687. break;
  1688. case KGSL_STATE_AWARE:
  1689. kgsl_pwrctrl_disable(device);
  1690. trace_gpu_frequency(0, 0);
  1691. kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
  1692. break;
  1693. default:
  1694. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1695. break;
  1696. }
  1697. return status;
  1698. }
  1699. /*
  1700. * _suspend() - Put device into suspend
  1701. * @device: Device pointer
  1702. *
  1703. * Return 0 on success else error code
  1704. */
  1705. static int _suspend(struct kgsl_device *device)
  1706. {
  1707. int ret = 0;
  1708. if ((device->state == KGSL_STATE_NONE) ||
  1709. (device->state == KGSL_STATE_INIT) ||
  1710. (device->state == KGSL_STATE_SUSPEND))
  1711. return ret;
  1712. /*
  1713. * drain to prevent from more commands being submitted
  1714. * and wait for it to go idle
  1715. */
  1716. ret = device->ftbl->drain_and_idle(device);
  1717. if (ret)
  1718. goto err;
  1719. ret = _slumber(device);
  1720. if (ret)
  1721. goto err;
  1722. kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
  1723. return ret;
  1724. err:
  1725. device->ftbl->resume(device);
  1726. dev_err(device->dev, "device failed to SUSPEND %d\n", ret);
  1727. return ret;
  1728. }
  1729. /*
  1730. * kgsl_pwrctrl_change_state() changes the GPU state to the input
  1731. * @device: Pointer to a KGSL device
  1732. * @state: desired KGSL state
  1733. *
  1734. * Caller must hold the device mutex. If the requested state change
  1735. * is valid, execute it. Otherwise return an error code explaining
  1736. * why the change has not taken place. Also print an error if an
  1737. * unexpected state change failure occurs. For example, a change to
  1738. * SLUMBER may be rejected because the GPU is busy, this is not an error.
  1739. * A change to SUSPEND should go through no matter what, so if it
  1740. * fails an additional error message will be printed to dmesg.
  1741. */
  1742. int kgsl_pwrctrl_change_state(struct kgsl_device *device, int state)
  1743. {
  1744. int status = 0;
  1745. if (device->state == state)
  1746. return status;
  1747. kgsl_pwrctrl_request_state(device, state);
  1748. /* Work through the legal state transitions */
  1749. switch (state) {
  1750. case KGSL_STATE_INIT:
  1751. status = _init(device);
  1752. break;
  1753. case KGSL_STATE_AWARE:
  1754. status = _aware(device);
  1755. break;
  1756. case KGSL_STATE_ACTIVE:
  1757. status = _wake(device);
  1758. break;
  1759. case KGSL_STATE_SLUMBER:
  1760. status = _slumber(device);
  1761. break;
  1762. case KGSL_STATE_SUSPEND:
  1763. status = _suspend(device);
  1764. break;
  1765. default:
  1766. dev_err(device->dev, "bad state request 0x%x\n", state);
  1767. kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
  1768. status = -EINVAL;
  1769. break;
  1770. }
  1771. return status;
  1772. }
  1773. void kgsl_pwrctrl_set_state(struct kgsl_device *device,
  1774. unsigned int state)
  1775. {
  1776. trace_kgsl_pwr_set_state(device, state);
  1777. device->state = state;
  1778. device->requested_state = KGSL_STATE_NONE;
  1779. if (state == KGSL_STATE_SLUMBER)
  1780. device->pwrctrl.wake_on_touch = false;
  1781. spin_lock(&device->submit_lock);
  1782. if (state == KGSL_STATE_ACTIVE)
  1783. device->skip_inline_submit = false;
  1784. else
  1785. device->skip_inline_submit = true;
  1786. spin_unlock(&device->submit_lock);
  1787. }
  1788. void kgsl_pwrctrl_request_state(struct kgsl_device *device,
  1789. unsigned int state)
  1790. {
  1791. if (state != KGSL_STATE_NONE && state != device->requested_state)
  1792. trace_kgsl_pwr_request_state(device, state);
  1793. device->requested_state = state;
  1794. }
  1795. const char *kgsl_pwrstate_to_str(unsigned int state)
  1796. {
  1797. switch (state) {
  1798. case KGSL_STATE_NONE:
  1799. return "NONE";
  1800. case KGSL_STATE_INIT:
  1801. return "INIT";
  1802. case KGSL_STATE_AWARE:
  1803. return "AWARE";
  1804. case KGSL_STATE_ACTIVE:
  1805. return "ACTIVE";
  1806. case KGSL_STATE_SUSPEND:
  1807. return "SUSPEND";
  1808. case KGSL_STATE_SLUMBER:
  1809. return "SLUMBER";
  1810. default:
  1811. break;
  1812. }
  1813. return "UNKNOWN";
  1814. }
  1815. static int _check_active_count(struct kgsl_device *device, int count)
  1816. {
  1817. /* Return 0 if the active count is greater than the desired value */
  1818. return atomic_read(&device->active_cnt) > count ? 0 : 1;
  1819. }
  1820. int kgsl_active_count_wait(struct kgsl_device *device, int count,
  1821. unsigned long wait_jiffies)
  1822. {
  1823. int result = 0;
  1824. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  1825. return -EINVAL;
  1826. while (atomic_read(&device->active_cnt) > count) {
  1827. long ret;
  1828. mutex_unlock(&device->mutex);
  1829. ret = wait_event_timeout(device->active_cnt_wq,
  1830. _check_active_count(device, count), wait_jiffies);
  1831. mutex_lock(&device->mutex);
  1832. result = ret == 0 ? -ETIMEDOUT : 0;
  1833. if (!result)
  1834. wait_jiffies = ret;
  1835. else
  1836. break;
  1837. }
  1838. return result;
  1839. }
  1840. /**
  1841. * kgsl_pwrctrl_set_default_gpu_pwrlevel() - Set GPU to default power level
  1842. * @device: Pointer to the kgsl_device struct
  1843. */
  1844. int kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device)
  1845. {
  1846. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1847. unsigned int new_level = pwr->default_pwrlevel;
  1848. unsigned int old_level = pwr->active_pwrlevel;
  1849. /*
  1850. * Update the level according to any thermal,
  1851. * max/min, or power constraints.
  1852. */
  1853. new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level);
  1854. pwr->active_pwrlevel = new_level;
  1855. pwr->previous_pwrlevel = old_level;
  1856. /* Request adjusted DCVS level */
  1857. return device->ftbl->gpu_clock_set(device, pwr->active_pwrlevel);
  1858. }
  1859. u32 kgsl_pwrctrl_get_acv_perfmode_lvl(struct kgsl_device *device, u32 ddr_freq)
  1860. {
  1861. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1862. int i;
  1863. if (!ddr_freq)
  1864. return (pwr->ddr_table_count - 1);
  1865. for (i = 0; i < pwr->ddr_table_count; i++) {
  1866. if (pwr->ddr_table[i] >= ddr_freq)
  1867. return i;
  1868. }
  1869. /* If DDR frequency is not found, vote perfmode for highest DDR level */
  1870. return (pwr->ddr_table_count - 1);
  1871. }
  1872. int kgsl_gpu_num_freqs(void)
  1873. {
  1874. struct kgsl_device *device = kgsl_get_device(0);
  1875. if (!device)
  1876. return -ENODEV;
  1877. return device->pwrctrl.num_pwrlevels;
  1878. }
  1879. EXPORT_SYMBOL(kgsl_gpu_num_freqs);
  1880. int kgsl_gpu_stat(struct kgsl_gpu_freq_stat *stats, u32 numfreq)
  1881. {
  1882. struct kgsl_device *device = kgsl_get_device(0);
  1883. struct kgsl_pwrctrl *pwr;
  1884. int i;
  1885. if (!device)
  1886. return -ENODEV;
  1887. pwr = &device->pwrctrl;
  1888. if (!stats || (numfreq < pwr->num_pwrlevels))
  1889. return -EINVAL;
  1890. mutex_lock(&device->mutex);
  1891. kgsl_pwrscale_update_stats(device);
  1892. for (i = 0; i < pwr->num_pwrlevels; i++) {
  1893. stats[i].freq = pwr->pwrlevels[i].gpu_freq;
  1894. stats[i].active_time = pwr->clock_times[i];
  1895. stats[i].idle_time = pwr->time_in_pwrlevel[i] - pwr->clock_times[i];
  1896. }
  1897. mutex_unlock(&device->mutex);
  1898. return 0;
  1899. }
  1900. EXPORT_SYMBOL(kgsl_gpu_stat);