sdhci-msm-scaling.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Qualcomm Technologies, Inc. SDHCI Platform driver.
  4. *
  5. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  6. */
  7. #include <linux/of_device.h>
  8. #include "sdhci-msm.h"
  9. #include "sdhci-msm-scaling.h"
  10. #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
  11. static int mmc_dt_get_array(struct device *dev, const char *prop_name,
  12. u32 **out, int *len, u32 size)
  13. {
  14. int ret = 0;
  15. struct device_node *np = dev->of_node;
  16. size_t sz;
  17. u32 *arr = NULL;
  18. if (!of_get_property(np, prop_name, len)) {
  19. ret = -EINVAL;
  20. goto out;
  21. }
  22. sz = *len = *len / sizeof(*arr);
  23. if (sz <= 0 || (size > 0 && (sz > size))) {
  24. dev_err(dev, "%s invalid size\n", prop_name);
  25. ret = -EINVAL;
  26. goto out;
  27. }
  28. arr = devm_kcalloc(dev, sz, sizeof(*arr), GFP_KERNEL);
  29. if (!arr) {
  30. ret = -ENOMEM;
  31. goto out;
  32. }
  33. ret = of_property_read_u32_array(np, prop_name, arr, sz);
  34. if (ret < 0) {
  35. dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
  36. goto out;
  37. }
  38. *out = arr;
  39. out:
  40. if (ret)
  41. *len = 0;
  42. return ret;
  43. }
  44. void sdhci_msm_scale_parse_dt(struct device *dev, struct sdhci_msm_host *msm_host)
  45. {
  46. struct device_node *np = dev->of_node;
  47. const char *lower_bus_speed = NULL;
  48. if (mmc_dt_get_array(dev, "qcom,devfreq,freq-table",
  49. &msm_host->clk_scaling.pltfm_freq_table,
  50. &msm_host->clk_scaling.pltfm_freq_table_sz, 0))
  51. pr_debug("%s: no clock scaling frequencies were supplied\n",
  52. dev_name(dev));
  53. else if (!msm_host->clk_scaling.pltfm_freq_table ||
  54. msm_host->clk_scaling.pltfm_freq_table_sz)
  55. dev_info(dev, "bad dts clock scaling frequencies\n");
  56. /*
  57. * Few hosts can support DDR52 mode at the same lower
  58. * system voltage corner as high-speed mode. In such
  59. * cases, it is always better to put it in DDR
  60. * mode which will improve the performance
  61. * without any power impact.
  62. */
  63. if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
  64. &lower_bus_speed)) {
  65. if (!strcmp(lower_bus_speed, "DDR52"))
  66. msm_host->clk_scaling.lower_bus_speed_mode |=
  67. MMC_SCALING_LOWER_DDR52_MODE;
  68. }
  69. }
  70. EXPORT_SYMBOL_GPL(sdhci_msm_scale_parse_dt);
  71. void sdhci_msm_dec_active_req(struct mmc_host *mhost)
  72. {
  73. struct sdhci_host *shost = mmc_priv(mhost);
  74. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  75. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  76. atomic_dec(&host->active_reqs);
  77. }
  78. void sdhci_msm_inc_active_req(struct mmc_host *mhost)
  79. {
  80. struct sdhci_host *shost = mmc_priv(mhost);
  81. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  82. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  83. atomic_inc(&host->active_reqs);
  84. }
  85. void sdhci_msm_is_dcmd(int data, int *err)
  86. {
  87. if (data)
  88. *err = 1;
  89. else
  90. *err = 0;
  91. }
  92. void sdhci_msm_mmc_cqe_clk_scaling_stop_busy(struct mmc_host *mhost, struct mmc_request *mrq)
  93. {
  94. struct sdhci_host *shost = mmc_priv(mhost);
  95. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  96. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  97. struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
  98. brq.mrq);
  99. struct request *req = mmc_queue_req_to_req(mqrq);
  100. struct request_queue *q = req->q;
  101. struct mmc_queue *mq = q->queuedata;
  102. int is_dcmd;
  103. sdhci_msm_dec_active_req(mhost);
  104. is_dcmd = (mmc_issue_type(mq, req) == MMC_ISSUE_DCMD);
  105. _sdhci_msm_mmc_cqe_clk_scaling_stop_busy(host, true, is_dcmd);
  106. }
  107. EXPORT_SYMBOL_GPL(sdhci_msm_mmc_cqe_clk_scaling_stop_busy);
  108. void sdhci_msm_mmc_cqe_clk_scaling_start_busy(struct mmc_host *mhost, struct mmc_request *mrq)
  109. {
  110. struct sdhci_host *shost = mmc_priv(mhost);
  111. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  112. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  113. struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
  114. brq.mrq);
  115. struct request *req = mmc_queue_req_to_req(mqrq);
  116. struct request_queue *q = req->q;
  117. struct mmc_queue *mq = q->queuedata;
  118. sdhci_msm_mmc_init_clk_scaling(mhost);
  119. if (host->defer_clk_scaling_resume == 1) {
  120. sdhci_msm_mmc_resume_clk_scaling(mhost);
  121. host->defer_clk_scaling_resume = 0;
  122. }
  123. sdhci_msm_inc_active_req(mhost);
  124. sdhci_msm_mmc_deferred_scaling(host);
  125. _sdhci_msm_mmc_cqe_clk_scaling_start_busy(mq, host, true);
  126. }
  127. EXPORT_SYMBOL_GPL(sdhci_msm_mmc_cqe_clk_scaling_start_busy);
  128. void sdhci_msm_cqe_scaling_resume(struct mmc_host *mhost)
  129. {
  130. struct sdhci_host *shost = mmc_priv(mhost);
  131. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  132. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  133. if (host->scaling_suspended == 1) {
  134. sdhci_msm_mmc_resume_clk_scaling(mhost);
  135. host->scaling_suspended = 0;
  136. }
  137. }
  138. EXPORT_SYMBOL_GPL(sdhci_msm_cqe_scaling_resume);
  139. void sdhci_msm_set_active_reqs(struct mmc_host *mhost)
  140. {
  141. struct sdhci_host *shost = mmc_priv(mhost);
  142. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  143. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  144. atomic_set(&host->active_reqs, 0);
  145. }
  146. void sdhci_msm_set_factors(struct mmc_host *mhost)
  147. {
  148. struct sdhci_host *shost = mmc_priv(mhost);
  149. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  150. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  151. host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD;
  152. host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD;
  153. host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC;
  154. host->clk_scaling.skip_clk_scale_freq_update = false;
  155. }
  156. void sdhci_msm_mmc_init_setup_scaling(struct mmc_card *card, struct mmc_host *mhost)
  157. {
  158. struct sdhci_host *shost = mmc_priv(mhost);
  159. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  160. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  161. host->clk_scaling_lowest = mhost->f_min;
  162. if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400) ||
  163. (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200))
  164. host->clk_scaling_highest = card->ext_csd.hs200_max_dtr;
  165. else if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) ||
  166. (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
  167. host->clk_scaling_highest = card->ext_csd.hs_max_dtr;
  168. else
  169. host->clk_scaling_highest = card->csd.max_dtr;
  170. }
  171. void sdhci_msm_mmc_exit_clk_scaling(struct mmc_host *mhost)
  172. {
  173. struct sdhci_host *shost = mmc_priv(mhost);
  174. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  175. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  176. if (host->scale_caps & MMC_CAP2_CLK_SCALE)
  177. _sdhci_msm_mmc_exit_clk_scaling(host);
  178. }
  179. void sdhci_msm_mmc_suspend_clk_scaling(struct mmc_host *mhost)
  180. {
  181. struct sdhci_host *shost = mmc_priv(mhost);
  182. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  183. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  184. _sdhci_msm_mmc_suspend_clk_scaling(host);
  185. }
  186. EXPORT_SYMBOL_GPL(sdhci_msm_mmc_suspend_clk_scaling);
  187. void sdhci_msm_mmc_resume_clk_scaling(struct mmc_host *mhost)
  188. {
  189. struct sdhci_host *shost = mmc_priv(mhost);
  190. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  191. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  192. _sdhci_msm_mmc_resume_clk_scaling(host);
  193. }
  194. EXPORT_SYMBOL_GPL(sdhci_msm_mmc_resume_clk_scaling);
  195. void sdhci_msm_mmc_init_clk_scaling(struct mmc_host *mhost)
  196. {
  197. struct sdhci_host *shost = mmc_priv(mhost);
  198. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  199. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  200. if (host->clk_scale_init_done)
  201. return;
  202. sdhci_msm_set_factors(mhost);
  203. sdhci_msm_set_active_reqs(mhost);
  204. sdhci_msm_mmc_init_setup_scaling(mhost->card, mhost);
  205. _sdhci_msm_mmc_init_clk_scaling(host);
  206. host->clk_scale_init_done = 1;
  207. }
  208. EXPORT_SYMBOL_GPL(sdhci_msm_mmc_init_clk_scaling);
  209. static int sdhci_msm_mmc_select_hs_ddr52(struct sdhci_msm_host *host, unsigned long freq)
  210. {
  211. struct mmc_host *mhost = host->mmc;
  212. int err;
  213. mmc_select_hs(mhost->card);
  214. err = mmc_select_bus_width(mhost->card);
  215. if (err < 0) {
  216. pr_err("%s: %s: select_bus_width failed(%d)\n",
  217. mmc_hostname(mhost), __func__, err);
  218. return err;
  219. }
  220. err = mmc_select_hs_ddr(mhost->card);
  221. mmc_set_clock(mhost, freq);
  222. return err;
  223. }
  224. /*
  225. * Scale down from HS400 to HS in order to allow frequency change.
  226. * This is needed for cards that doesn't support changing frequency in HS400
  227. */
  228. static int sdhci_msm_mmc_scale_low(struct sdhci_msm_host *host, unsigned long freq)
  229. {
  230. struct mmc_host *mhost = host->mmc;
  231. int err = 0;
  232. mmc_set_timing(mhost, MMC_TIMING_LEGACY);
  233. mmc_set_clock(mhost, MMC_HIGH_26_MAX_DTR);
  234. if (host->clk_scaling.lower_bus_speed_mode &
  235. MMC_SCALING_LOWER_DDR52_MODE) {
  236. err = sdhci_msm_mmc_select_hs_ddr52(host, freq);
  237. if (err)
  238. pr_err("%s: %s: failed to switch to DDR52: err: %d\n",
  239. mmc_hostname(mhost), __func__, err);
  240. else
  241. return err;
  242. }
  243. err = mmc_select_hs(mhost->card);
  244. if (err) {
  245. pr_err("%s: %s: scaling low: failed (%d)\n",
  246. mmc_hostname(mhost), __func__, err);
  247. return err;
  248. }
  249. err = mmc_select_bus_width(mhost->card);
  250. if (err < 0) {
  251. pr_err("%s: %s: select_bus_width failed(%d)\n",
  252. mmc_hostname(mhost), __func__, err);
  253. return err;
  254. }
  255. mmc_set_clock(mhost, freq);
  256. return 0;
  257. }
  258. /*
  259. * Scale UP from HS to HS200/H400
  260. */
  261. static int sdhci_msm_mmc_scale_high(struct sdhci_msm_host *host)
  262. {
  263. struct mmc_host *mhost = host->mmc;
  264. int err = 0;
  265. if (mmc_card_ddr52(mhost->card)) {
  266. mmc_set_timing(mhost, MMC_TIMING_LEGACY);
  267. mmc_set_clock(mhost, MMC_HIGH_26_MAX_DTR);
  268. }
  269. mmc_set_initial_state(mhost);
  270. err = mmc_select_timing(mhost->card);
  271. if (err) {
  272. pr_err("%s: %s: select hs400 failed (%d)\n",
  273. mmc_hostname(mhost), __func__, err);
  274. return err;
  275. }
  276. if (mmc_card_hs200(mhost->card)) {
  277. err = mmc_hs200_tuning(mhost->card);
  278. if (err) {
  279. pr_err("%s: %s: hs200 tuning failed (%d)\n",
  280. mmc_hostname(mhost), __func__, err);
  281. return err;
  282. }
  283. err = mmc_select_hs400(mhost->card);
  284. if (err) {
  285. pr_err("%s: %s: select hs400 failed (%d)\n",
  286. mmc_hostname(mhost), __func__, err);
  287. return err;
  288. }
  289. }
  290. return 0;
  291. }
  292. static int sdhci_msm_mmc_set_clock_bus_speed(struct sdhci_msm_host *host, unsigned long freq)
  293. {
  294. int err = 0;
  295. if (freq == MMC_HS200_MAX_DTR)
  296. err = sdhci_msm_mmc_scale_high(host);
  297. else
  298. err = sdhci_msm_mmc_scale_low(host, freq);
  299. return err;
  300. }
  301. static inline unsigned long sdhci_msm_mmc_ddr_freq_accommodation(unsigned long freq)
  302. {
  303. if (freq == MMC_HIGH_DDR_MAX_DTR)
  304. return freq;
  305. return freq/2;
  306. }
  307. /**
  308. * mmc_change_bus_speed() - Change MMC card bus frequency at runtime
  309. * @host: pointer to mmc host structure
  310. * @freq: pointer to desired frequency to be set
  311. *
  312. * Change the MMC card bus frequency at runtime after the card is
  313. * initialized. Callers are expected to make sure of the card's
  314. * state (DATA/RCV/TRANSFER) before changing the frequency at runtime.
  315. *
  316. * If the frequency to change is greater than max. supported by card,
  317. * *freq is changed to max. supported by card. If it is less than min.
  318. * supported by host, *freq is changed to min. supported by host.
  319. * Host is assumed to be calimed while calling this funciton.
  320. */
  321. static int sdhci_msm_mmc_change_bus_speed(struct sdhci_msm_host *host, unsigned long *freq)
  322. {
  323. struct mmc_host *mhost = host->mmc;
  324. int err = 0;
  325. struct mmc_card *card;
  326. unsigned long actual_freq;
  327. card = mhost->card;
  328. if (!card || !freq) {
  329. err = -EINVAL;
  330. goto out;
  331. }
  332. actual_freq = *freq;
  333. WARN_ON(!mhost->claimed);
  334. /*
  335. * For scaling up/down HS400 we'll need special handling,
  336. * for other timings we can simply do clock frequency change
  337. */
  338. if (mmc_card_hs400(card) ||
  339. (!mmc_card_hs200(mhost->card) && *freq == MMC_HS200_MAX_DTR)) {
  340. err = sdhci_msm_mmc_set_clock_bus_speed(host, *freq);
  341. if (err) {
  342. pr_err("%s: %s: failed (%d)to set bus and clock speed (freq=%lu)\n",
  343. mmc_hostname(mhost), __func__, err, *freq);
  344. goto out;
  345. }
  346. } else if (mmc_card_hs200(mhost->card)) {
  347. mmc_set_clock(mhost, *freq);
  348. err = mmc_hs200_tuning(mhost->card);
  349. if (err) {
  350. pr_warn("%s: %s: tuning execution failed %d\n",
  351. mmc_hostname(card->host),
  352. __func__, err);
  353. mmc_set_clock(mhost, host->clk_scaling.curr_freq);
  354. }
  355. } else {
  356. if (mmc_card_ddr52(mhost->card))
  357. actual_freq = sdhci_msm_mmc_ddr_freq_accommodation(*freq);
  358. mmc_set_clock(mhost, actual_freq);
  359. }
  360. out:
  361. return err;
  362. }
  363. bool sdhci_msm_mmc_is_data_request(u32 opcode)
  364. {
  365. switch (opcode) {
  366. case MMC_READ_SINGLE_BLOCK:
  367. case MMC_READ_MULTIPLE_BLOCK:
  368. case MMC_WRITE_BLOCK:
  369. case MMC_WRITE_MULTIPLE_BLOCK:
  370. return true;
  371. default:
  372. return false;
  373. }
  374. }
  375. void _sdhci_msm_mmc_clk_scaling_start_busy(struct sdhci_msm_host *host, bool lock_needed)
  376. {
  377. struct sdhci_msm_mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
  378. unsigned long flags;
  379. if (!clk_scaling->enable)
  380. return;
  381. if (lock_needed)
  382. spin_lock_irqsave(&clk_scaling->lock, flags);
  383. clk_scaling->start_busy = ktime_get();
  384. clk_scaling->is_busy_started = true;
  385. if (lock_needed)
  386. spin_unlock_irqrestore(&clk_scaling->lock, flags);
  387. }
  388. void _sdhci_msm_mmc_clk_scaling_stop_busy(struct sdhci_msm_host *host, bool lock_needed)
  389. {
  390. struct sdhci_msm_mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
  391. unsigned long flags;
  392. if (!clk_scaling->enable)
  393. return;
  394. if (lock_needed)
  395. spin_lock_irqsave(&clk_scaling->lock, flags);
  396. if (!clk_scaling->is_busy_started) {
  397. WARN_ON(1);
  398. goto out;
  399. }
  400. clk_scaling->total_busy_time_us +=
  401. ktime_to_us(ktime_sub(ktime_get(),
  402. clk_scaling->start_busy));
  403. pr_debug("%s: accumulated busy time is %lu usec\n",
  404. mmc_hostname(host->mmc), clk_scaling->total_busy_time_us);
  405. clk_scaling->is_busy_started = false;
  406. out:
  407. if (lock_needed)
  408. spin_unlock_irqrestore(&clk_scaling->lock, flags);
  409. }
  410. /* mmc_cqe_clk_scaling_start_busy() - start busy timer for data requests
  411. * @host: pointer to mmc host structure
  412. * @lock_needed: flag indication if locking is needed
  413. *
  414. * This function starts the busy timer in case it was not already started.
  415. */
  416. void _sdhci_msm_mmc_cqe_clk_scaling_start_busy(struct mmc_queue *mq,
  417. struct sdhci_msm_host *host, bool lock_needed)
  418. {
  419. unsigned long flags;
  420. if (!host->clk_scaling.enable)
  421. return;
  422. if (lock_needed)
  423. spin_lock_irqsave(&host->clk_scaling.lock, flags);
  424. if (!host->clk_scaling.is_busy_started &&
  425. !(mq->cqe_busy & MMC_CQE_DCMD_BUSY)) {
  426. host->clk_scaling.start_busy = ktime_get();
  427. host->clk_scaling.is_busy_started = true;
  428. }
  429. if (lock_needed)
  430. spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
  431. }
  432. EXPORT_SYMBOL_GPL(_sdhci_msm_mmc_cqe_clk_scaling_start_busy);
  433. /**
  434. * mmc_cqe_clk_scaling_stop_busy() - stop busy timer for last data requests
  435. * @host: pointer to mmc host structure
  436. * @lock_needed: flag indication if locking is needed
  437. *
  438. * This function stops the busy timer in case it is the last data request.
  439. * In case the current request is not the last one, the busy time till
  440. * now will be accumulated and the counter will be restarted.
  441. */
  442. void _sdhci_msm_mmc_cqe_clk_scaling_stop_busy(struct sdhci_msm_host *host,
  443. bool lock_needed, int is_cqe_dcmd)
  444. {
  445. unsigned int cqe_active_reqs = 0;
  446. unsigned long flags;
  447. if (!host->clk_scaling.enable)
  448. return;
  449. cqe_active_reqs = atomic_read(&host->active_reqs);
  450. if (lock_needed)
  451. spin_lock_irqsave(&host->clk_scaling.lock, flags);
  452. host->clk_scaling.total_busy_time_us +=
  453. ktime_to_us(ktime_sub(ktime_get(),
  454. host->clk_scaling.start_busy));
  455. if (cqe_active_reqs) {
  456. host->clk_scaling.is_busy_started = true;
  457. host->clk_scaling.start_busy = ktime_get();
  458. } else {
  459. host->clk_scaling.is_busy_started = false;
  460. }
  461. if (lock_needed)
  462. spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
  463. }
  464. EXPORT_SYMBOL_GPL(_sdhci_msm_mmc_cqe_clk_scaling_stop_busy);
  465. /**
  466. * mmc_can_scale_clk() - Check clock scaling capability
  467. * @host: pointer to mmc host structure
  468. */
  469. bool sdhci_msm_mmc_can_scale_clk(struct sdhci_msm_host *msm_host)
  470. {
  471. struct mmc_host *host = msm_host->mmc;
  472. if (!host) {
  473. pr_err("bad host parameter\n");
  474. WARN_ON(1);
  475. return false;
  476. }
  477. return msm_host->scale_caps & MMC_CAP2_CLK_SCALE;
  478. }
  479. EXPORT_SYMBOL_GPL(sdhci_msm_mmc_can_scale_clk);
  480. static int sdhci_msm_mmc_devfreq_get_dev_status(struct device *dev,
  481. struct devfreq_dev_status *status)
  482. {
  483. struct mmc_host *mhost = container_of(dev, struct mmc_host, class_dev);
  484. struct sdhci_host *shost = mmc_priv(mhost);
  485. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  486. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  487. struct sdhci_msm_mmc_devfeq_clk_scaling *clk_scaling;
  488. unsigned long flags;
  489. if (!host) {
  490. pr_err("bad host parameter\n");
  491. WARN_ON(1);
  492. return -EINVAL;
  493. }
  494. clk_scaling = &host->clk_scaling;
  495. if (!clk_scaling->enable)
  496. return 0;
  497. spin_lock_irqsave(&host->clk_scaling.lock, flags);
  498. /* accumulate the busy time of ongoing work */
  499. memset(status, 0, sizeof(*status));
  500. if (clk_scaling->is_busy_started) {
  501. if (mhost->cqe_on) {
  502. /* the "busy-timer" will be restarted in case there
  503. * are pending data requests
  504. */
  505. _sdhci_msm_mmc_cqe_clk_scaling_stop_busy(host, false, false);
  506. } else {
  507. _sdhci_msm_mmc_clk_scaling_stop_busy(host, false);
  508. _sdhci_msm_mmc_clk_scaling_start_busy(host, false);
  509. }
  510. }
  511. status->busy_time = clk_scaling->total_busy_time_us;
  512. status->total_time = ktime_to_us(ktime_sub(ktime_get(),
  513. clk_scaling->measure_interval_start));
  514. clk_scaling->total_busy_time_us = 0;
  515. status->current_frequency = clk_scaling->curr_freq;
  516. clk_scaling->measure_interval_start = ktime_get();
  517. pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
  518. mmc_hostname(mhost),
  519. (status->busy_time*100)/status->total_time,
  520. status->total_time, status->busy_time,
  521. status->current_frequency);
  522. spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
  523. return 0;
  524. }
  525. static bool sdhci_msm_mmc_is_valid_state_for_clk_scaling(struct sdhci_msm_host *host)
  526. {
  527. struct mmc_host *mhost = host->mmc;
  528. struct mmc_card *card = mhost->card;
  529. u32 status;
  530. /*
  531. * If the current partition type is RPMB, clock switching may not
  532. * work properly as sending tuning command (CMD21) is illegal in
  533. * this mode.
  534. * For RPMB transaction cmdq would be disabled.
  535. */
  536. if (!card || (mmc_card_mmc(card) && !card->ext_csd.cmdq_en))
  537. return false;
  538. if (mmc_send_status(card, &status)) {
  539. pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
  540. return false;
  541. }
  542. return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
  543. }
  544. static int sdhci_msm_notify_load(struct sdhci_msm_host *msm_host, enum sdhci_msm_mmc_load state)
  545. {
  546. int ret = 0;
  547. u32 clk_rate = 0;
  548. if (!IS_ERR(msm_host->bulk_clks[2].clk)) {
  549. clk_rate = (state == MMC_LOAD_LOW) ?
  550. msm_host->ice_clk_min :
  551. msm_host->ice_clk_max;
  552. if (msm_host->ice_clk_rate == clk_rate)
  553. return 0;
  554. pr_debug("%s: changing ICE clk rate to %u\n",
  555. mmc_hostname(msm_host->mmc), clk_rate);
  556. ret = clk_set_rate(msm_host->bulk_clks[2].clk, clk_rate);
  557. if (ret) {
  558. pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
  559. mmc_hostname(msm_host->mmc), ret, clk_rate);
  560. return ret;
  561. }
  562. msm_host->ice_clk_rate = clk_rate;
  563. }
  564. return 0;
  565. }
  566. int sdhci_msm_mmc_clk_update_freq(struct sdhci_msm_host *host,
  567. unsigned long freq, enum sdhci_msm_mmc_load state)
  568. {
  569. struct mmc_host *mhost = host->mmc;
  570. int err = 0;
  571. if (!host) {
  572. pr_err("bad host parameter\n");
  573. WARN_ON(1);
  574. return -EINVAL;
  575. }
  576. /* make sure the card supports the frequency we want */
  577. if (unlikely(freq > host->clk_scaling_highest)) {
  578. freq = host->clk_scaling_highest;
  579. pr_warn("%s: %s: High freq was overridden to %lu\n",
  580. mmc_hostname(mhost), __func__,
  581. host->clk_scaling_highest);
  582. }
  583. if (unlikely(freq < host->clk_scaling_lowest)) {
  584. freq = host->clk_scaling_lowest;
  585. pr_warn("%s: %s: Low freq was overridden to %lu\n",
  586. mmc_hostname(mhost), __func__,
  587. host->clk_scaling_lowest);
  588. }
  589. if (freq == host->clk_scaling.curr_freq)
  590. goto out;
  591. if (mhost->cqe_on) {
  592. err = mhost->cqe_ops->cqe_wait_for_idle(mhost);
  593. if (err) {
  594. pr_err("%s: %s: CQE went in recovery path\n",
  595. mmc_hostname(mhost), __func__);
  596. goto out;
  597. }
  598. mhost->cqe_ops->cqe_off(mhost);
  599. }
  600. err = sdhci_msm_notify_load(host, state);
  601. if (err) {
  602. pr_err("%s: %s: fail on notify_load\n",
  603. mmc_hostname(mhost), __func__);
  604. goto out;
  605. }
  606. if (!sdhci_msm_mmc_is_valid_state_for_clk_scaling(host)) {
  607. pr_debug("%s: invalid state for clock scaling - skipping\n",
  608. mmc_hostname(mhost));
  609. goto out;
  610. }
  611. if (mmc_card_mmc(mhost->card))
  612. err = sdhci_msm_mmc_change_bus_speed(host, &freq);
  613. if (!err)
  614. host->clk_scaling.curr_freq = freq;
  615. else
  616. pr_err("%s: %s: failed (%d) at freq=%lu\n",
  617. mmc_hostname(mhost), __func__, err, freq);
  618. /*
  619. * CQE would be enabled as part of CQE issueing path
  620. * So no need to unhalt it explicitly
  621. */
  622. out:
  623. return err;
  624. }
  625. EXPORT_SYMBOL_GPL(sdhci_msm_mmc_clk_update_freq);
  626. static int sdhci_msm_mmc_devfreq_set_target(struct device *dev,
  627. unsigned long *freq, u32 devfreq_flags)
  628. {
  629. struct mmc_host *mhost = container_of(dev, struct mmc_host, class_dev);
  630. struct sdhci_host *shost = mmc_priv(mhost);
  631. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  632. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  633. struct sdhci_msm_mmc_devfeq_clk_scaling *clk_scaling;
  634. int err = 0;
  635. int abort;
  636. unsigned long pflags = current->flags;
  637. unsigned long flags;
  638. /* Ensure scaling would happen even in memory pressure conditions */
  639. current->flags |= PF_MEMALLOC;
  640. if (!(host && freq)) {
  641. pr_err("%s: unexpected host/freq parameter\n", __func__);
  642. err = -EINVAL;
  643. goto out;
  644. }
  645. clk_scaling = &host->clk_scaling;
  646. if (!clk_scaling->enable)
  647. goto out;
  648. pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(mhost),
  649. *freq, current->comm);
  650. spin_lock_irqsave(&clk_scaling->lock, flags);
  651. if (clk_scaling->target_freq == *freq) {
  652. spin_unlock_irqrestore(&clk_scaling->lock, flags);
  653. goto out;
  654. }
  655. clk_scaling->need_freq_change = true;
  656. clk_scaling->target_freq = *freq;
  657. clk_scaling->state = *freq < clk_scaling->curr_freq ?
  658. MMC_LOAD_LOW : MMC_LOAD_HIGH;
  659. spin_unlock_irqrestore(&clk_scaling->lock, flags);
  660. if (!clk_scaling->is_suspended && mhost->ios.clock)
  661. abort = __mmc_claim_host(mhost, NULL,
  662. &clk_scaling->devfreq_abort);
  663. else
  664. goto out;
  665. if (abort)
  666. goto out;
  667. /*
  668. * In case we were able to claim host there is no need to
  669. * defer the frequency change. It will be done now
  670. */
  671. clk_scaling->need_freq_change = false;
  672. err = sdhci_msm_mmc_clk_update_freq(host, *freq, clk_scaling->state);
  673. if (err && err != -EAGAIN)
  674. pr_err("%s: clock scale to %lu failed with error %d\n",
  675. mmc_hostname(mhost), *freq, err);
  676. else
  677. pr_debug("%s: clock change to %lu finished successfully (%s)\n",
  678. mmc_hostname(mhost), *freq, current->comm);
  679. mmc_release_host(mhost);
  680. out:
  681. current_restore_flags(pflags, PF_MEMALLOC);
  682. return err;
  683. }
  684. /**
  685. * mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
  686. * @host: pointer to mmc host structure
  687. *
  688. * This function does clock scaling in case "need_freq_change" flag was set
  689. * by the clock scaling logic.
  690. */
  691. void sdhci_msm_mmc_deferred_scaling(struct sdhci_msm_host *host)
  692. {
  693. unsigned long target_freq;
  694. int err;
  695. struct sdhci_msm_mmc_devfeq_clk_scaling clk_scaling;
  696. unsigned long flags;
  697. struct mmc_host *mhost = host->mmc;
  698. if (!host->clk_scaling.enable)
  699. return;
  700. spin_lock_irqsave(&host->clk_scaling.lock, flags);
  701. if (!host->clk_scaling.need_freq_change) {
  702. spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
  703. return;
  704. }
  705. atomic_inc(&host->clk_scaling.devfreq_abort);
  706. target_freq = host->clk_scaling.target_freq;
  707. /*
  708. * Store the clock scaling state while the lock is acquired so that
  709. * if devfreq context modifies clk_scaling, it will get reflected only
  710. * in the next deferred scaling check.
  711. */
  712. clk_scaling = host->clk_scaling;
  713. host->clk_scaling.need_freq_change = false;
  714. spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
  715. pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
  716. mmc_hostname(mhost),
  717. target_freq, current->comm);
  718. err = sdhci_msm_mmc_clk_update_freq(host, target_freq,
  719. clk_scaling.state);
  720. if (err && err != -EAGAIN)
  721. pr_err("%s: failed on deferred scale clocks (%d)\n",
  722. mmc_hostname(mhost), err);
  723. else
  724. pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
  725. mmc_hostname(mhost),
  726. target_freq, current->comm);
  727. atomic_dec(&host->clk_scaling.devfreq_abort);
  728. }
  729. EXPORT_SYMBOL_GPL(sdhci_msm_mmc_deferred_scaling);
  730. static int sdhci_msm_mmc_devfreq_create_freq_table(struct sdhci_msm_host *host)
  731. {
  732. int i;
  733. struct sdhci_msm_mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
  734. struct mmc_host *mhost = host->mmc;
  735. pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
  736. mmc_hostname(mhost),
  737. host->clk_scaling_lowest,
  738. host->clk_scaling_highest);
  739. /*
  740. * Create the frequency table and initialize it with default values.
  741. * Initialize it with platform specific frequencies if the frequency
  742. * table supplied by platform driver is present, otherwise initialize
  743. * it with min and max frequencies supported by the card.
  744. */
  745. if (!clk_scaling->freq_table) {
  746. if (clk_scaling->pltfm_freq_table_sz)
  747. clk_scaling->freq_table_sz =
  748. clk_scaling->pltfm_freq_table_sz;
  749. else
  750. clk_scaling->freq_table_sz = 2;
  751. clk_scaling->freq_table = kcalloc(
  752. clk_scaling->freq_table_sz,
  753. sizeof(*(clk_scaling->freq_table)), GFP_KERNEL);
  754. if (!clk_scaling->freq_table)
  755. return -ENOMEM;
  756. if (clk_scaling->pltfm_freq_table) {
  757. memcpy(clk_scaling->freq_table,
  758. clk_scaling->pltfm_freq_table,
  759. (clk_scaling->pltfm_freq_table_sz *
  760. sizeof(*(clk_scaling->pltfm_freq_table))));
  761. } else {
  762. pr_debug("%s: no frequency table defined - setting default\n",
  763. mmc_hostname(mhost));
  764. clk_scaling->freq_table[0] =
  765. host->clk_scaling_lowest;
  766. clk_scaling->freq_table[1] =
  767. host->clk_scaling_highest;
  768. goto out;
  769. }
  770. }
  771. if (host->clk_scaling_lowest >
  772. clk_scaling->freq_table[0])
  773. pr_debug("%s: frequency table undershot possible freq\n",
  774. mmc_hostname(mhost));
  775. for (i = 0; i < clk_scaling->freq_table_sz; i++) {
  776. if (clk_scaling->freq_table[i] <=
  777. host->clk_scaling_highest)
  778. continue;
  779. clk_scaling->freq_table[i] =
  780. host->clk_scaling_highest;
  781. clk_scaling->freq_table_sz = i + 1;
  782. pr_debug("%s: frequency table overshot possible freq (%d)\n",
  783. mmc_hostname(mhost), clk_scaling->freq_table[i]);
  784. break;
  785. }
  786. if (mmc_card_sd(mhost->card) && (clk_scaling->freq_table_sz < 2)) {
  787. clk_scaling->freq_table[clk_scaling->freq_table_sz] =
  788. host->clk_scaling_highest;
  789. clk_scaling->freq_table_sz++;
  790. }
  791. out:
  792. /**
  793. * devfreq requires unsigned long type freq_table while the
  794. * freq_table in clk_scaling is un32. Here allocates an individual
  795. * memory space for it and release it when exit clock scaling.
  796. */
  797. clk_scaling->devfreq_profile.freq_table = kcalloc(
  798. clk_scaling->freq_table_sz,
  799. sizeof(*(clk_scaling->devfreq_profile.freq_table)),
  800. GFP_KERNEL);
  801. if (!clk_scaling->devfreq_profile.freq_table) {
  802. kfree(clk_scaling->freq_table);
  803. return -ENOMEM;
  804. }
  805. clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
  806. for (i = 0; i < clk_scaling->freq_table_sz; i++) {
  807. clk_scaling->devfreq_profile.freq_table[i] =
  808. clk_scaling->freq_table[i];
  809. pr_debug("%s: freq[%d] = %u\n",
  810. mmc_hostname(mhost), i, clk_scaling->freq_table[i]);
  811. }
  812. return 0;
  813. }
  814. static ssize_t enable_show(struct device *dev,
  815. struct device_attribute *attr, char *buf)
  816. {
  817. struct mmc_host *mhost = cls_dev_to_mmc_host(dev);
  818. struct sdhci_host *shost = mmc_priv(mhost);
  819. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  820. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  821. if (!host)
  822. return -EINVAL;
  823. return scnprintf(buf, PAGE_SIZE, "%d\n", sdhci_msm_mmc_can_scale_clk(host));
  824. }
  825. static ssize_t enable_store(struct device *dev,
  826. struct device_attribute *attr, const char *buf, size_t count)
  827. {
  828. struct mmc_host *mhost = cls_dev_to_mmc_host(dev);
  829. struct sdhci_host *shost = mmc_priv(mhost);
  830. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  831. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  832. unsigned long value;
  833. if (!host || !mhost->card || kstrtoul(buf, 0, &value))
  834. return -EINVAL;
  835. mmc_get_card(mhost->card, NULL);
  836. if (!value) {
  837. /* Suspend the clock scaling and mask host capability */
  838. if (host->clk_scaling.enable)
  839. sdhci_msm_mmc_suspend_clk_scaling(mhost);
  840. host->clk_scaling.enable = false;
  841. mhost->caps2 &= ~MMC_CAP2_CLK_SCALE;
  842. host->scale_caps = mhost->caps2;
  843. host->clk_scaling.state = MMC_LOAD_HIGH;
  844. /* Set to max. frequency when disabling */
  845. sdhci_msm_mmc_clk_update_freq(host, host->clk_scaling_highest,
  846. host->clk_scaling.state);
  847. } else if (value) {
  848. /* Unmask host capability and resume scaling */
  849. mhost->caps2 |= MMC_CAP2_CLK_SCALE;
  850. host->scale_caps = mhost->caps2;
  851. if (!host->clk_scaling.enable) {
  852. host->clk_scaling.enable = true;
  853. sdhci_msm_mmc_resume_clk_scaling(mhost);
  854. }
  855. }
  856. mmc_put_card(mhost->card, NULL);
  857. return count;
  858. }
  859. static ssize_t up_threshold_show(struct device *dev,
  860. struct device_attribute *attr, char *buf)
  861. {
  862. struct mmc_host *mhost = cls_dev_to_mmc_host(dev);
  863. struct sdhci_host *shost = mmc_priv(mhost);
  864. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  865. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  866. if (!host)
  867. return -EINVAL;
  868. return scnprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold);
  869. }
  870. #define MAX_PERCENTAGE 100
  871. static ssize_t up_threshold_store(struct device *dev,
  872. struct device_attribute *attr, const char *buf, size_t count)
  873. {
  874. struct mmc_host *mhost = cls_dev_to_mmc_host(dev);
  875. struct sdhci_host *shost = mmc_priv(mhost);
  876. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  877. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  878. unsigned long value;
  879. if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
  880. return -EINVAL;
  881. host->clk_scaling.upthreshold = value;
  882. pr_debug("%s: clkscale_up_thresh set to %lu\n",
  883. mmc_hostname(mhost), value);
  884. return count;
  885. }
  886. static ssize_t down_threshold_show(struct device *dev,
  887. struct device_attribute *attr, char *buf)
  888. {
  889. struct mmc_host *mhost = cls_dev_to_mmc_host(dev);
  890. struct sdhci_host *shost = mmc_priv(mhost);
  891. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  892. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  893. if (!host)
  894. return -EINVAL;
  895. return scnprintf(buf, PAGE_SIZE, "%d\n",
  896. host->clk_scaling.downthreshold);
  897. }
  898. static ssize_t down_threshold_store(struct device *dev,
  899. struct device_attribute *attr, const char *buf, size_t count)
  900. {
  901. struct mmc_host *mhost = cls_dev_to_mmc_host(dev);
  902. struct sdhci_host *shost = mmc_priv(mhost);
  903. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  904. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  905. unsigned long value;
  906. if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
  907. return -EINVAL;
  908. host->clk_scaling.downthreshold = value;
  909. pr_debug("%s: clkscale_down_thresh set to %lu\n",
  910. mmc_hostname(mhost), value);
  911. return count;
  912. }
  913. static ssize_t polling_interval_show(struct device *dev,
  914. struct device_attribute *attr, char *buf)
  915. {
  916. struct mmc_host *mhost = cls_dev_to_mmc_host(dev);
  917. struct sdhci_host *shost = mmc_priv(mhost);
  918. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  919. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  920. if (!host)
  921. return -EINVAL;
  922. return scnprintf(buf, PAGE_SIZE, "%lu milliseconds\n",
  923. host->clk_scaling.polling_delay_ms);
  924. }
  925. static ssize_t polling_interval_store(struct device *dev,
  926. struct device_attribute *attr, const char *buf, size_t count)
  927. {
  928. struct mmc_host *mhost = cls_dev_to_mmc_host(dev);
  929. struct sdhci_host *shost = mmc_priv(mhost);
  930. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(shost);
  931. struct sdhci_msm_host *host = sdhci_pltfm_priv(pltfm_host);
  932. unsigned long value;
  933. if (!host || kstrtoul(buf, 0, &value))
  934. return -EINVAL;
  935. host->clk_scaling.polling_delay_ms = value;
  936. pr_debug("%s: clkscale_polling_delay_ms set to %lu\n",
  937. mmc_hostname(mhost), value);
  938. return count;
  939. }
  940. DEVICE_ATTR_RW(enable);
  941. DEVICE_ATTR_RW(polling_interval);
  942. DEVICE_ATTR_RW(up_threshold);
  943. DEVICE_ATTR_RW(down_threshold);
  944. static struct attribute *clk_scaling_attrs[] = {
  945. &dev_attr_enable.attr,
  946. &dev_attr_up_threshold.attr,
  947. &dev_attr_down_threshold.attr,
  948. &dev_attr_polling_interval.attr,
  949. NULL,
  950. };
  951. static struct attribute_group clk_scaling_attr_grp = {
  952. .name = "clk_scaling",
  953. .attrs = clk_scaling_attrs,
  954. };
  955. /**
  956. * mmc_init_devfreq_clk_scaling() - Initialize clock scaling
  957. * @host: pointer to mmc host structure
  958. *
  959. * Initialize clock scaling for supported hosts. It is assumed that the caller
  960. * ensure clock is running at maximum possible frequency before calling this
  961. * function. Shall use struct devfreq_simple_ondemand_data to configure
  962. * governor.
  963. */
  964. int _sdhci_msm_mmc_init_clk_scaling(struct sdhci_msm_host *host)
  965. {
  966. struct mmc_host *mhost = host->mmc;
  967. int err;
  968. struct devfreq *devfreq;
  969. if (!mhost || !mhost->card) {
  970. pr_err("%s: unexpected host/card parameters\n",
  971. __func__);
  972. return -EINVAL;
  973. }
  974. if (!sdhci_msm_mmc_can_scale_clk(host)) {
  975. pr_debug("%s: clock scaling is not supported\n",
  976. mmc_hostname(mhost));
  977. return 0;
  978. }
  979. pr_debug("registering %s dev (%pK) to devfreq\n",
  980. mmc_hostname(mhost),
  981. mmc_classdev(mhost));
  982. if (host->clk_scaling.devfreq) {
  983. pr_err("%s: dev is already registered for dev %pK\n",
  984. mmc_hostname(mhost),
  985. mmc_dev(mhost));
  986. return -EPERM;
  987. }
  988. spin_lock_init(&host->clk_scaling.lock);
  989. atomic_set(&host->clk_scaling.devfreq_abort, 0);
  990. host->clk_scaling.curr_freq = mhost->ios.clock;
  991. host->clk_scaling.need_freq_change = false;
  992. host->clk_scaling.is_busy_started = false;
  993. host->clk_scaling.devfreq_profile.polling_ms =
  994. host->clk_scaling.polling_delay_ms;
  995. host->clk_scaling.devfreq_profile.get_dev_status =
  996. sdhci_msm_mmc_devfreq_get_dev_status;
  997. host->clk_scaling.devfreq_profile.target = sdhci_msm_mmc_devfreq_set_target;
  998. host->clk_scaling.devfreq_profile.initial_freq = mhost->ios.clock;
  999. host->clk_scaling.devfreq_profile.timer = DEVFREQ_TIMER_DELAYED;
  1000. host->clk_scaling.ondemand_gov_data.upthreshold =
  1001. host->clk_scaling.upthreshold;
  1002. host->clk_scaling.ondemand_gov_data.downdifferential =
  1003. host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;
  1004. err = sdhci_msm_mmc_devfreq_create_freq_table(host);
  1005. if (err) {
  1006. pr_err("%s: fail to create devfreq frequency table\n",
  1007. mmc_hostname(mhost));
  1008. return err;
  1009. }
  1010. dev_pm_opp_add(mmc_classdev(mhost),
  1011. host->clk_scaling.devfreq_profile.freq_table[0], 0);
  1012. dev_pm_opp_add(mmc_classdev(mhost),
  1013. host->clk_scaling.devfreq_profile.freq_table[1], 0);
  1014. pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
  1015. mmc_hostname(mhost),
  1016. host->clk_scaling.ondemand_gov_data.upthreshold,
  1017. host->clk_scaling.ondemand_gov_data.downdifferential,
  1018. host->clk_scaling.devfreq_profile.polling_ms);
  1019. devfreq = devfreq_add_device(
  1020. mmc_classdev(mhost),
  1021. &host->clk_scaling.devfreq_profile,
  1022. "simple_ondemand",
  1023. &host->clk_scaling.ondemand_gov_data);
  1024. if (IS_ERR(devfreq)) {
  1025. pr_err("%s: unable to register with devfreq\n",
  1026. mmc_hostname(mhost));
  1027. dev_pm_opp_remove(mmc_classdev(mhost),
  1028. host->clk_scaling.devfreq_profile.freq_table[0]);
  1029. dev_pm_opp_remove(mmc_classdev(mhost),
  1030. host->clk_scaling.devfreq_profile.freq_table[1]);
  1031. return PTR_ERR(devfreq);
  1032. }
  1033. host->clk_scaling.devfreq = devfreq;
  1034. pr_debug("%s: clk scaling is enabled for device %s (%pK) with devfreq %pK (clock = %uHz)\n",
  1035. mmc_hostname(mhost),
  1036. dev_name(mmc_classdev(mhost)),
  1037. mmc_classdev(mhost),
  1038. host->clk_scaling.devfreq,
  1039. mhost->ios.clock);
  1040. host->clk_scaling.enable = true;
  1041. err = sysfs_create_group(&mhost->class_dev.kobj, &clk_scaling_attr_grp);
  1042. if (err)
  1043. pr_err("%s: failed to create clk scale sysfs group with err %d\n",
  1044. __func__, err);
  1045. return err;
  1046. }
  1047. EXPORT_SYMBOL_GPL(_sdhci_msm_mmc_init_clk_scaling);
  1048. /**
  1049. * mmc_suspend_clk_scaling() - suspend clock scaling
  1050. * @host: pointer to mmc host structure
  1051. *
  1052. * This API will suspend devfreq feature for the specific host.
  1053. * The statistics collected by mmc will be cleared.
  1054. * This function is intended to be called by the pm callbacks
  1055. * (e.g. runtime_suspend, suspend) of the mmc device
  1056. */
  1057. int _sdhci_msm_mmc_suspend_clk_scaling(struct sdhci_msm_host *host)
  1058. {
  1059. struct mmc_host *mhost = host->mmc;
  1060. int err;
  1061. if (!host) {
  1062. WARN(1, "bad host parameter\n");
  1063. return -EINVAL;
  1064. }
  1065. if (!sdhci_msm_mmc_can_scale_clk(host) || !host->clk_scaling.enable ||
  1066. host->clk_scaling.is_suspended)
  1067. return 0;
  1068. if (!host->clk_scaling.devfreq) {
  1069. pr_err("%s: %s: no devfreq is assosiated with this device\n",
  1070. mmc_hostname(mhost), __func__);
  1071. return -EPERM;
  1072. }
  1073. atomic_inc(&host->clk_scaling.devfreq_abort);
  1074. wake_up(&mhost->wq);
  1075. err = devfreq_suspend_device(host->clk_scaling.devfreq);
  1076. if (err) {
  1077. pr_err("%s: %s: failed to suspend devfreq\n",
  1078. mmc_hostname(mhost), __func__);
  1079. return err;
  1080. }
  1081. host->clk_scaling.is_suspended = true;
  1082. host->clk_scaling.total_busy_time_us = 0;
  1083. pr_debug("%s: devfreq was removed\n", mmc_hostname(mhost));
  1084. return 0;
  1085. }
  1086. EXPORT_SYMBOL_GPL(_sdhci_msm_mmc_suspend_clk_scaling);
  1087. /**
  1088. * mmc_resume_clk_scaling() - resume clock scaling
  1089. * @host: pointer to mmc host structure
  1090. *
  1091. * This API will resume devfreq feature for the specific host.
  1092. * This API is intended to be called by the pm callbacks
  1093. * (e.g. runtime_suspend, suspend) of the mmc device
  1094. */
  1095. int _sdhci_msm_mmc_resume_clk_scaling(struct sdhci_msm_host *host)
  1096. {
  1097. struct mmc_host *mhost = host->mmc;
  1098. int err = 0;
  1099. u32 max_clk_idx = 0;
  1100. u32 devfreq_max_clk = 0;
  1101. u32 devfreq_min_clk = 0;
  1102. if (!host) {
  1103. WARN(1, "bad host parameter\n");
  1104. return -EINVAL;
  1105. }
  1106. if (!sdhci_msm_mmc_can_scale_clk(host))
  1107. return 0;
  1108. /*
  1109. * If clock scaling is already exited when resume is called, like
  1110. * during mmc shutdown, it is not an error and should not fail the
  1111. * API calling this.
  1112. */
  1113. if (!host->clk_scaling.devfreq) {
  1114. pr_warn("%s: %s: no devfreq is assosiated with this device\n",
  1115. mmc_hostname(mhost), __func__);
  1116. return 0;
  1117. }
  1118. atomic_set(&host->clk_scaling.devfreq_abort, 0);
  1119. max_clk_idx = host->clk_scaling.freq_table_sz - 1;
  1120. devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
  1121. devfreq_min_clk = host->clk_scaling.freq_table[0];
  1122. host->clk_scaling.curr_freq = devfreq_max_clk;
  1123. if (mhost->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
  1124. host->clk_scaling.curr_freq = devfreq_min_clk;
  1125. host->clk_scaling.target_freq = host->clk_scaling.curr_freq;
  1126. err = devfreq_resume_device(host->clk_scaling.devfreq);
  1127. if (err) {
  1128. pr_err("%s: %s: failed to resume devfreq (%d)\n",
  1129. mmc_hostname(mhost), __func__, err);
  1130. } else {
  1131. host->clk_scaling.is_suspended = false;
  1132. pr_debug("%s: devfreq resumed\n", mmc_hostname(mhost));
  1133. }
  1134. return err;
  1135. }
  1136. EXPORT_SYMBOL_GPL(_sdhci_msm_mmc_resume_clk_scaling);
  1137. /**
  1138. * mmc_exit_devfreq_clk_scaling() - Disable clock scaling
  1139. * @host: pointer to mmc host structure
  1140. *
  1141. * Disable clock scaling permanently.
  1142. */
  1143. int _sdhci_msm_mmc_exit_clk_scaling(struct sdhci_msm_host *host)
  1144. {
  1145. struct mmc_host *mhost = host->mmc;
  1146. int err;
  1147. if (!host) {
  1148. pr_err("%s: bad host parameter\n", __func__);
  1149. WARN_ON(1);
  1150. return -EINVAL;
  1151. }
  1152. if (!sdhci_msm_mmc_can_scale_clk(host))
  1153. return 0;
  1154. if (!host->clk_scaling.devfreq) {
  1155. pr_err("%s: %s: no devfreq is assosiated with this device\n",
  1156. mmc_hostname(mhost), __func__);
  1157. return -EPERM;
  1158. }
  1159. err = _sdhci_msm_mmc_suspend_clk_scaling(host);
  1160. if (err) {
  1161. pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
  1162. mmc_hostname(mhost), __func__, err);
  1163. return err;
  1164. }
  1165. err = devfreq_remove_device(host->clk_scaling.devfreq);
  1166. if (err) {
  1167. pr_err("%s: remove devfreq failed (%d)\n",
  1168. mmc_hostname(mhost), err);
  1169. return err;
  1170. }
  1171. dev_pm_opp_remove(mmc_classdev(mhost),
  1172. host->clk_scaling.devfreq_profile.freq_table[0]);
  1173. dev_pm_opp_remove(mmc_classdev(mhost),
  1174. host->clk_scaling.devfreq_profile.freq_table[1]);
  1175. kfree(host->clk_scaling.devfreq_profile.freq_table);
  1176. host->clk_scaling.devfreq = NULL;
  1177. atomic_set(&host->clk_scaling.devfreq_abort, 1);
  1178. kfree(host->clk_scaling.freq_table);
  1179. host->clk_scaling.freq_table = NULL;
  1180. pr_debug("%s: devfreq was removed\n", mmc_hostname(mhost));
  1181. return 0;
  1182. }
  1183. EXPORT_SYMBOL_GPL(_sdhci_msm_mmc_exit_clk_scaling);
  1184. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SDHCI Controller Support");
  1185. MODULE_LICENSE("GPL");