cpufreq_limit.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555
  1. /*
  2. * drivers/cpufreq/cpufreq_limit.c
  3. *
  4. * Remade according to cpufreq change
  5. * (refer to commit df0eea4488081e0698b0b58ccd1e8c8823e22841
  6. * 18c49926c4bf4915e5194d1de3299c0537229f9f)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/sysfs.h>
  14. #include <linux/cpufreq.h>
  15. #include <linux/cpufreq_limit.h>
  16. #include <linux/err.h>
  17. #include <linux/suspend.h>
  18. #include <linux/cpu.h>
  19. #include <linux/kobject.h>
  20. #include <linux/timer.h>
  21. #include <linux/platform_device.h>
  22. #ifdef CONFIG_OF
  23. #include <linux/of.h>
  24. #endif
  25. #include <trace/hooks/cpufreq.h>
  26. #define MAX_BUF_SIZE 1024
  27. #define MIN(a, b) (((a) < (b)) ? (a) : (b))
  28. #define MAX(a, b) (((a) > (b)) ? (a) : (b))
  29. /* adaptive boost from walt */
  30. extern int cpufreq_walt_set_adaptive_freq(unsigned int cpu, unsigned int adaptive_low_freq,
  31. unsigned int adaptive_high_freq);
  32. extern int cpufreq_walt_get_adaptive_freq(unsigned int cpu, unsigned int *adaptive_low_freq,
  33. unsigned int *adaptive_high_freq);
  34. extern int cpufreq_walt_reset_adaptive_freq(unsigned int cpu);
  35. static unsigned int __read_mostly lpcharge;
  36. module_param(lpcharge, uint, 0444);
  37. /* voltage based freq table */
  38. #if IS_ENABLED(CONFIG_QTI_CPU_VOLTAGE_COOLING_DEVICE)
  39. extern struct freq_voltage_base cflm_vbf;
  40. #else
  41. static struct freq_voltage_base cflm_vbf;
  42. #endif
  43. static DEFINE_MUTEX(cflm_mutex);
  44. #define LIMIT_RELEASE -1
  45. /* boosted state */
  46. #define BOOSTED 1
  47. #define NOT_BOOSTED 0
  48. #define NUM_CPUS 8
  49. static unsigned int cflm_req_init[NUM_CPUS];
  50. static struct freq_qos_request max_req[NUM_CPUS][CFLM_MAX_ITEM];
  51. static struct freq_qos_request min_req[NUM_CPUS][CFLM_MAX_ITEM];
  52. static struct kobject *cflm_kobj;
  53. struct freq_map {
  54. unsigned int in;
  55. unsigned int out;
  56. };
  57. /* adaptive boost threshold - high - low freq table */
  58. struct aboost_th_table {
  59. int threshold;
  60. int high;
  61. int low;
  62. };
  63. /* input info: freq, time(TBD) */
  64. struct input_info {
  65. int boosted;
  66. int min;
  67. int max;
  68. u64 time_in_min_limit;
  69. u64 time_in_max_limit;
  70. u64 time_in_over_limit;
  71. ktime_t last_min_limit_time;
  72. ktime_t last_max_limit_time;
  73. ktime_t last_over_limit_time;
  74. };
  75. static struct input_info freq_input[CFLM_MAX_ITEM];
  76. struct cflm_parameter {
  77. /* to make virtual freq table */
  78. struct cpufreq_frequency_table *cpuftbl_L;
  79. struct cpufreq_frequency_table *cpuftbl_b;
  80. unsigned int unified_cpuftbl[50];
  81. unsigned int freq_count;
  82. bool table_initialized;
  83. /* cpu info: silver/gold/prime */
  84. unsigned int s_first;
  85. unsigned int s_fmin;
  86. unsigned int s_fmax;
  87. unsigned int g_first;
  88. unsigned int g_fmin;
  89. unsigned int g_fmax;
  90. unsigned int p_first;
  91. unsigned int p_fmin;
  92. unsigned int p_fmax;
  93. /* 4 policy arch: sm8650, titanium, same clock table as gold */
  94. bool titanium;
  95. unsigned int t_first;
  96. unsigned int t_fmin;
  97. unsigned int t_fmax;
  98. /* exceptional case */
  99. unsigned int g_fmin_up; /* fixed gold clock for performance */
  100. /* in virtual table little(silver)/big(gold & prime) */
  101. unsigned int big_min_freq;
  102. unsigned int big_max_freq;
  103. unsigned int ltl_min_freq;
  104. unsigned int ltl_max_freq;
  105. /* pre-defined value */
  106. struct freq_map *silver_boost_map;
  107. unsigned int boost_map_size;
  108. struct freq_map *silver_limit_map;
  109. unsigned int limit_map_size;
  110. unsigned int silver_divider;
  111. /* current freq in virtual table */
  112. unsigned int min_limit_val;
  113. unsigned int max_limit_val;
  114. /* sched boost type */
  115. int sched_boost_type;
  116. bool sched_boost_cond;
  117. bool sched_boost_enabled;
  118. /* over limit */
  119. unsigned int over_limit;
  120. /* voltage based clock */
  121. bool vol_based_clk;
  122. int vbf_offset; /* gold clock offset for perf */
  123. /* adaptive boost */
  124. bool ab_enabled;
  125. struct aboost_th_table *ab_table;
  126. };
  127. /* TODO: move to dtsi? */
  128. static struct cflm_parameter param = {
  129. .freq_count = 0,
  130. .table_initialized = false,
  131. .s_first = 0,
  132. .g_first = 2,
  133. .p_first = 7,
  134. .titanium = 0,
  135. .t_first = 5,
  136. .g_fmin_up = 0, /* fixed gold clock for performance */
  137. .ltl_min_freq = 0, /* will be auto updated */
  138. .ltl_max_freq = 0, /* will be auto updated */
  139. .big_min_freq = 0, /* will be auto updated */
  140. .big_max_freq = 0, /* will be auto updated */
  141. .boost_map_size = 0,
  142. .limit_map_size = 0,
  143. .silver_divider = 2,
  144. .min_limit_val = -1,
  145. .max_limit_val = -1,
  146. .sched_boost_type = CONSERVATIVE_BOOST,
  147. .sched_boost_cond = false,
  148. .sched_boost_enabled = false,
  149. .over_limit = 0,
  150. .vol_based_clk = false,
  151. };
  152. static bool cflm_make_table(void)
  153. {
  154. int i, count = 0;
  155. int freq_count = 0;
  156. unsigned int freq;
  157. bool ret = false;
  158. /* big cluster table */
  159. if (!param.cpuftbl_b)
  160. goto little;
  161. for (i = 0; param.cpuftbl_b[i].frequency != CPUFREQ_TABLE_END; i++)
  162. count = i;
  163. for (i = count; i >= 0; i--) {
  164. freq = param.cpuftbl_b[i].frequency;
  165. if (freq == CPUFREQ_ENTRY_INVALID)
  166. continue;
  167. if (freq < param.big_min_freq ||
  168. freq > param.big_max_freq)
  169. continue;
  170. param.unified_cpuftbl[freq_count++] = freq;
  171. }
  172. little:
  173. /* LITTLE cluster table */
  174. if (!param.cpuftbl_L)
  175. goto done;
  176. for (i = 0; param.cpuftbl_L[i].frequency != CPUFREQ_TABLE_END; i++)
  177. count = i;
  178. for (i = count; i >= 0; i--) {
  179. freq = param.cpuftbl_L[i].frequency / param.silver_divider;
  180. if (freq == CPUFREQ_ENTRY_INVALID)
  181. continue;
  182. if (freq < param.ltl_min_freq ||
  183. freq > param.ltl_max_freq)
  184. continue;
  185. param.unified_cpuftbl[freq_count++] = freq;
  186. }
  187. done:
  188. if (freq_count) {
  189. pr_debug("%s: unified table is made\n", __func__);
  190. param.freq_count = freq_count;
  191. ret = true;
  192. } else {
  193. pr_err("%s: cannot make unified table\n", __func__);
  194. }
  195. return ret;
  196. }
  197. /**
  198. * cflm_set_table - cpufreq table from dt via qcom-cpufreq
  199. */
  200. static void cflm_set_table(int cpu, struct cpufreq_frequency_table *ftbl)
  201. {
  202. int i, count = 0;
  203. unsigned int max_freq_b = 0, min_freq_b = UINT_MAX;
  204. unsigned int max_freq_l = 0, min_freq_l = UINT_MAX;
  205. if (param.table_initialized)
  206. return;
  207. if (cpu == param.s_first)
  208. param.cpuftbl_L = ftbl;
  209. else if (cpu == param.p_first)
  210. param.cpuftbl_b = ftbl;
  211. if (!param.cpuftbl_L)
  212. return;
  213. if (!param.cpuftbl_b)
  214. return;
  215. pr_info("%s: freq table is ready, update config\n", __func__);
  216. /* update little config */
  217. for (i = 0; param.cpuftbl_L[i].frequency != CPUFREQ_TABLE_END; i++)
  218. count = i;
  219. for (i = count; i >= 0; i--) {
  220. if (param.cpuftbl_L[i].frequency == CPUFREQ_ENTRY_INVALID)
  221. continue;
  222. if (param.cpuftbl_L[i].frequency < min_freq_l)
  223. min_freq_l = param.cpuftbl_L[i].frequency;
  224. if (param.cpuftbl_L[i].frequency > max_freq_l)
  225. max_freq_l = param.cpuftbl_L[i].frequency;
  226. }
  227. if (!param.ltl_min_freq)
  228. param.ltl_min_freq = min_freq_l / param.silver_divider;
  229. if (!param.ltl_max_freq)
  230. param.ltl_max_freq = max_freq_l / param.silver_divider;
  231. /* update big config */
  232. for (i = 0; param.cpuftbl_b[i].frequency != CPUFREQ_TABLE_END; i++)
  233. count = i;
  234. for (i = count; i >= 0; i--) {
  235. if (param.cpuftbl_b[i].frequency == CPUFREQ_ENTRY_INVALID)
  236. continue;
  237. if ((param.cpuftbl_b[i].frequency < min_freq_b) &&
  238. (param.cpuftbl_b[i].frequency > param.ltl_max_freq))
  239. min_freq_b = param.cpuftbl_b[i].frequency;
  240. if (param.cpuftbl_b[i].frequency > max_freq_b)
  241. max_freq_b = param.cpuftbl_b[i].frequency;
  242. }
  243. if (!param.big_min_freq)
  244. param.big_min_freq = min_freq_b;
  245. if (!param.big_max_freq)
  246. param.big_max_freq = max_freq_b;
  247. pr_info("%s: updated: little(%u-%u), big(%u-%u)\n", __func__,
  248. param.ltl_min_freq, param.ltl_max_freq,
  249. param.big_min_freq, param.big_max_freq);
  250. param.table_initialized = cflm_make_table();
  251. }
  252. /**
  253. * cflm_get_table - fill the cpufreq table to support HMP
  254. * @buf a buf that has been requested to fill the cpufreq table
  255. */
  256. static ssize_t cflm_get_table(char *buf)
  257. {
  258. ssize_t len = 0;
  259. int i = 0;
  260. if (!param.freq_count)
  261. return len;
  262. for (i = 0; i < param.freq_count; i++)
  263. len += snprintf(buf + len, MAX_BUF_SIZE, "%u ",
  264. param.unified_cpuftbl[i]);
  265. len--;
  266. len += snprintf(buf + len, MAX_BUF_SIZE, "\n");
  267. pr_info("%s: %s\n", __func__, buf);
  268. return len;
  269. }
  270. static void cflm_update_boost(void)
  271. {
  272. int i;
  273. bool boost_condition = false;
  274. /* sched boost */
  275. param.sched_boost_cond = false;
  276. for (i = 0; i < CFLM_MAX_ITEM; i++) {
  277. if (freq_input[i].min > (int)param.ltl_max_freq) {
  278. param.sched_boost_cond = true;
  279. boost_condition = true;
  280. break;
  281. }
  282. }
  283. if (boost_condition) {
  284. if (!param.sched_boost_enabled) {
  285. pr_debug("%s: sched boost on, type(%d)\n", __func__, param.sched_boost_type);
  286. sched_set_boost(param.sched_boost_type);
  287. param.sched_boost_enabled = true;
  288. } else {
  289. pr_debug("%s: sched boost already on, do nothing\n", __func__);
  290. }
  291. } else {
  292. if (param.sched_boost_enabled) {
  293. pr_debug("%s: sched boost off(%d)\n", __func__, (param.sched_boost_type * -1));
  294. sched_set_boost(param.sched_boost_type * -1);
  295. param.sched_boost_enabled = false;
  296. } else {
  297. pr_debug("%s: sched boost already off, do nothing\n", __func__);
  298. }
  299. }
  300. }
  301. static s32 cflm_freq_qos_read_value(struct freq_constraints *qos,
  302. enum freq_qos_req_type type)
  303. {
  304. s32 ret;
  305. switch (type) {
  306. case FREQ_QOS_MIN:
  307. ret = IS_ERR_OR_NULL(qos) ?
  308. FREQ_QOS_MIN_DEFAULT_VALUE :
  309. READ_ONCE(qos->min_freq.target_value);
  310. break;
  311. case FREQ_QOS_MAX:
  312. ret = IS_ERR_OR_NULL(qos) ?
  313. FREQ_QOS_MAX_DEFAULT_VALUE :
  314. READ_ONCE(qos->max_freq.target_value);
  315. break;
  316. default:
  317. WARN_ON(1);
  318. ret = 0;
  319. }
  320. return ret;
  321. }
  322. static void cflm_current_qos(void)
  323. {
  324. struct cpufreq_policy *policy;
  325. int s_min = 0, s_max = 0;
  326. int g_min = 0, g_max = 0;
  327. int p_min = 0, p_max = 0;
  328. int t_min = 0, t_max = 0;
  329. unsigned int a_low = 0, a_high = 0;
  330. policy = cpufreq_cpu_get(param.s_first);
  331. if (policy) {
  332. s_min = cflm_freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
  333. s_max = cflm_freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
  334. cpufreq_cpu_put(policy);
  335. }
  336. if (param.ab_enabled) {
  337. cpufreq_walt_get_adaptive_freq(param.s_first, &a_low, &a_high);
  338. pr_cont("%s: s[%d(%d, %d)-%d]", __func__, s_min, a_low, a_high, s_max);
  339. } else {
  340. pr_cont("%s: s[%d-%d]", __func__, s_min, s_max);
  341. }
  342. policy = cpufreq_cpu_get(param.g_first);
  343. if (policy) {
  344. g_min = cflm_freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
  345. g_max = cflm_freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
  346. cpufreq_cpu_put(policy);
  347. }
  348. if (param.ab_enabled) {
  349. cpufreq_walt_get_adaptive_freq(param.g_first, &a_low, &a_high);
  350. pr_cont(", g[%d(%d, %d)-%d]", g_min, a_low, a_high, g_max);
  351. } else {
  352. pr_cont(", g[%d-%d]", g_min, g_max);
  353. }
  354. /* now, not use adaptive boost for titanium and prime */
  355. if (param.titanium) {
  356. policy = cpufreq_cpu_get(param.t_first);
  357. if (policy) {
  358. t_min = cflm_freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
  359. t_max = cflm_freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
  360. cpufreq_cpu_put(policy);
  361. }
  362. pr_cont(", t[%d-%d]", t_min, t_max);
  363. }
  364. policy = cpufreq_cpu_get(param.p_first);
  365. if (policy) {
  366. p_min = cflm_freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
  367. p_max = cflm_freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
  368. cpufreq_cpu_put(policy);
  369. }
  370. pr_cont(", p[%d-%d]", p_min, p_max);
  371. pr_cont("\n");
  372. }
  373. static bool cflm_max_lock_need_restore(void)
  374. {
  375. if ((int)param.over_limit <= 0)
  376. return false;
  377. if (freq_input[CFLM_USERSPACE].min > 0) {
  378. if (freq_input[CFLM_USERSPACE].min > (int)param.ltl_max_freq) {
  379. pr_debug("%s: userspace minlock (%d) > ltl max (%d)\n",
  380. __func__, freq_input[CFLM_USERSPACE].min, param.ltl_max_freq);
  381. return false;
  382. }
  383. }
  384. if (freq_input[CFLM_TOUCH].min > 0) {
  385. if (freq_input[CFLM_TOUCH].min > (int)param.ltl_max_freq) {
  386. pr_debug("%s: touch minlock (%d) > ltl max (%d)\n",
  387. __func__, freq_input[CFLM_TOUCH].min, param.ltl_max_freq);
  388. return false;
  389. }
  390. }
  391. return true;
  392. }
  393. static bool cflm_high_pri_min_lock_required(void)
  394. {
  395. if ((int)param.over_limit <= 0)
  396. return false;
  397. if (freq_input[CFLM_USERSPACE].min > 0) {
  398. if (freq_input[CFLM_USERSPACE].min > (int)param.ltl_max_freq) {
  399. pr_debug("%s: userspace minlock (%d) > ltl max (%d)\n",
  400. __func__, freq_input[CFLM_USERSPACE].min, param.ltl_max_freq);
  401. return true;
  402. }
  403. }
  404. if (freq_input[CFLM_TOUCH].min > 0) {
  405. if (freq_input[CFLM_TOUCH].min > (int)param.ltl_max_freq) {
  406. pr_debug("%s: touch minlock (%d) > ltl max (%d)\n",
  407. __func__, freq_input[CFLM_TOUCH].min, param.ltl_max_freq);
  408. return true;
  409. }
  410. }
  411. return false;
  412. }
  413. static unsigned int cflm_get_vol_matched_freq(unsigned int in_freq)
  414. {
  415. int i;
  416. unsigned int out_freq = in_freq;
  417. if (param.vbf_offset > cflm_vbf.count || param.vbf_offset < 0) {
  418. pr_err("%s: bad condition(off(%d), cnt(%d))",
  419. __func__, param.vbf_offset, cflm_vbf.count);
  420. return out_freq;
  421. }
  422. /* start from offset */
  423. for (i = param.vbf_offset; i < cflm_vbf.count; i++) {
  424. if (cflm_vbf.table[PRIME_CPU][i] <= in_freq) {
  425. out_freq = cflm_vbf.table[GOLD_CPU][i - param.vbf_offset];
  426. break;
  427. }
  428. }
  429. pr_debug("%s: in(%d), out(%d)\n", __func__, in_freq, out_freq);
  430. return out_freq;
  431. }
  432. static int cflm_get_silver_boost(int freq)
  433. {
  434. int i;
  435. for (i = 0; i < param.boost_map_size; i++)
  436. if (freq >= param.silver_boost_map[i].in)
  437. return param.silver_boost_map[i].out;
  438. return freq * param.silver_divider;
  439. }
  440. static int cflm_get_silver_limit(int freq)
  441. {
  442. int i;
  443. /* prime limit condition */
  444. for (i = 0; i < param.limit_map_size; i++)
  445. if (freq >= param.silver_limit_map[i].in)
  446. return MIN(param.silver_limit_map[i].out, param.s_fmax);
  447. /* silver limit condition */
  448. return freq * param.silver_divider;
  449. }
  450. static int cflm_adaptive_boost(int first_cpu, int min)
  451. {
  452. struct cpufreq_policy *policy;
  453. int cpu = 0;
  454. int ret = 0;
  455. int aboost_low;
  456. pr_debug("%s: cpu%d: %d\n", __func__, first_cpu, min);
  457. if (!param.ab_enabled)
  458. return -EINVAL;
  459. if (!param.ab_table)
  460. return -EINVAL;
  461. if (!param.ab_table[first_cpu].threshold)
  462. return -EINVAL;
  463. policy = cpufreq_cpu_get(first_cpu);
  464. if (!policy) {
  465. pr_err("%s: no policy for cpu%d\n", __func__, first_cpu);
  466. return -EFAULT;
  467. }
  468. if (strcmp((policy->governor->name), "walt")) {
  469. pr_err("%s: not supported gov(%s)\n", __func__, policy->governor->name);
  470. return -EFAULT;
  471. }
  472. if (min >= param.ab_table[first_cpu].threshold)
  473. aboost_low = param.ab_table[first_cpu].high;
  474. else
  475. aboost_low = param.ab_table[first_cpu].low;
  476. if ((min > 0) && (min < aboost_low)) {
  477. pr_err("%s: cpu%d boost min(%d) is lower than adaptive low(%d)\n",
  478. __func__, first_cpu, min, aboost_low);
  479. aboost_low = min;
  480. }
  481. for_each_cpu(cpu, policy->related_cpus) {
  482. if (min > 0) {
  483. pr_debug("%s: set aboost: cpu%d: %d, %d\n", __func__, cpu, aboost_low, min);
  484. ret = cpufreq_walt_set_adaptive_freq(cpu, aboost_low, min);
  485. } else {
  486. pr_debug("%s: clear aboost: cpu%d\n", __func__, cpu);
  487. ret = cpufreq_walt_reset_adaptive_freq(cpu);
  488. }
  489. }
  490. cpufreq_cpu_put(policy);
  491. return ret;
  492. }
  493. static void cflm_freq_decision(int type, int new_min, int new_max)
  494. {
  495. int cpu = 0;
  496. int s_min = param.s_fmin;
  497. int s_max = param.s_fmax;
  498. int g_min = param.g_fmin;
  499. int g_max = param.g_fmax;
  500. int p_min = param.p_fmin;
  501. int p_max = param.p_fmax;
  502. int t_max = param.t_fmax;
  503. bool need_update_user_max = false;
  504. int new_user_max = FREQ_QOS_MAX_DEFAULT_VALUE;
  505. pr_info("%s: input: type(%d), min(%d), max(%d)\n",
  506. __func__, type, new_min, new_max);
  507. /* update input freq */
  508. if (new_min != 0) {
  509. freq_input[type].min = new_min;
  510. if ((new_min == LIMIT_RELEASE || new_min == param.ltl_min_freq) &&
  511. freq_input[type].last_min_limit_time != 0) {
  512. freq_input[type].time_in_min_limit += ktime_to_ms(ktime_get()-
  513. freq_input[type].last_min_limit_time);
  514. freq_input[type].last_min_limit_time = 0;
  515. freq_input[type].boosted = NOT_BOOSTED;
  516. pr_debug("%s: type(%d), released(%d)\n", __func__, type, freq_input[type].boosted);
  517. }
  518. if (new_min != LIMIT_RELEASE && new_min != param.ltl_min_freq &&
  519. freq_input[type].last_min_limit_time == 0) {
  520. freq_input[type].last_min_limit_time = ktime_get();
  521. freq_input[type].boosted = BOOSTED;
  522. pr_debug("%s: type(%d), boosted(%d)\n", __func__, type, freq_input[type].boosted);
  523. }
  524. }
  525. if (new_max != 0) {
  526. freq_input[type].max = new_max;
  527. if ((new_max == LIMIT_RELEASE || new_max == param.big_max_freq) &&
  528. freq_input[type].last_max_limit_time != 0) {
  529. freq_input[type].time_in_max_limit += ktime_to_ms(ktime_get() -
  530. freq_input[type].last_max_limit_time);
  531. freq_input[type].last_max_limit_time = 0;
  532. }
  533. if (new_max != LIMIT_RELEASE && new_max != param.big_max_freq &&
  534. freq_input[type].last_max_limit_time == 0) {
  535. freq_input[type].last_max_limit_time = ktime_get();
  536. }
  537. }
  538. if (new_min > 0) {
  539. if (new_min < param.ltl_min_freq) {
  540. pr_err("%s: too low freq(%d), set to %d\n",
  541. __func__, new_min, param.ltl_min_freq);
  542. new_min = param.ltl_min_freq;
  543. }
  544. pr_debug("%s: new_min=%d, ltl_max=%d, over_limit=%d\n", __func__,
  545. new_min, param.ltl_max_freq, param.over_limit);
  546. if ((type == CFLM_USERSPACE || type == CFLM_TOUCH) &&
  547. cflm_high_pri_min_lock_required()) {
  548. if (freq_input[CFLM_USERSPACE].max > 0) {
  549. need_update_user_max = true;
  550. new_user_max = MAX((int)param.over_limit, freq_input[CFLM_USERSPACE].max);
  551. pr_debug("%s: override new_max %d => %d, userspace_min=%d, touch_min=%d, ltl_max=%d\n",
  552. __func__, freq_input[CFLM_USERSPACE].max, new_user_max, freq_input[CFLM_USERSPACE].min,
  553. freq_input[CFLM_TOUCH].min, param.ltl_max_freq);
  554. }
  555. }
  556. /* boost @gold/prime */
  557. s_min = cflm_get_silver_boost(new_min);
  558. if (new_min > param.ltl_max_freq) {
  559. g_min = MIN(new_min, param.g_fmax);
  560. p_min = MIN(new_min, param.p_fmax);
  561. } else {
  562. g_min = param.g_fmin;
  563. p_min = param.p_fmin;
  564. }
  565. if (cflm_adaptive_boost(param.s_first, s_min) < 0)
  566. freq_qos_update_request(&min_req[param.s_first][type], s_min); /* prevent adaptive boost fail */
  567. if (cflm_adaptive_boost(param.g_first, g_min) < 0)
  568. freq_qos_update_request(&min_req[param.g_first][type], g_min);
  569. freq_qos_update_request(&min_req[param.p_first][type], p_min);
  570. /* TEMP??: no boost for titanium
  571. *if (param.titanium)
  572. * freq_qos_update_request(&min_req[param.t_first][type], g_min);
  573. */
  574. } else if (new_min == LIMIT_RELEASE) {
  575. for_each_possible_cpu(cpu) {
  576. freq_qos_update_request(&min_req[cpu][type],
  577. FREQ_QOS_MIN_DEFAULT_VALUE);
  578. }
  579. if (param.ab_enabled) {
  580. int i;
  581. int aggr_state = 0;
  582. for (i = 0; i < CFLM_MAX_ITEM; i++)
  583. aggr_state += freq_input[i].boosted;
  584. if (aggr_state == 0) {
  585. cflm_adaptive_boost(param.s_first, 0);
  586. cflm_adaptive_boost(param.g_first, 0);
  587. pr_debug("%s: aboost: clear\n", __func__);
  588. }
  589. }
  590. if ((type == CFLM_USERSPACE || type == CFLM_TOUCH) &&
  591. cflm_max_lock_need_restore()) { // if there is no high priority min lock and over limit is set
  592. if (freq_input[CFLM_USERSPACE].max > 0) {
  593. need_update_user_max = true;
  594. new_user_max = freq_input[CFLM_USERSPACE].max;
  595. pr_debug("%s: restore new_max => %d\n",
  596. __func__, new_user_max);
  597. }
  598. }
  599. }
  600. if (new_max > 0) {
  601. if (new_max > param.big_max_freq) {
  602. pr_err("%s: too high freq(%d), set to %d\n",
  603. __func__, new_max, param.big_max_freq);
  604. new_max = param.big_max_freq;
  605. }
  606. if ((type == CFLM_USERSPACE) && // if userspace maxlock is being set
  607. cflm_high_pri_min_lock_required()) {
  608. need_update_user_max = true;
  609. new_user_max = MAX((int)param.over_limit, freq_input[CFLM_USERSPACE].max);
  610. pr_debug("%s: force up new_max %d => %d, userspace_min=%d, touch_min=%d, ltl_max=%d\n",
  611. __func__, new_max, new_user_max, freq_input[CFLM_USERSPACE].min,
  612. freq_input[CFLM_TOUCH].min, param.ltl_max_freq);
  613. }
  614. s_max = cflm_get_silver_limit(new_max);
  615. if (new_max < param.big_min_freq) {
  616. /* if silver clock is limited as fmax,
  617. * set promised clock for gold cluster
  618. */
  619. if ((new_max == param.s_fmax / param.silver_divider) && (param.g_fmin_up > 0)) {
  620. g_max = param.g_fmin_up;
  621. t_max = param.g_fmin_up;
  622. } else {
  623. g_max = param.g_fmin;
  624. t_max = param.t_fmin;
  625. }
  626. p_max = param.p_fmin;
  627. } else {
  628. p_max = MIN(new_max, param.p_fmax);
  629. g_max = MIN(new_max, param.g_fmax);
  630. if (param.vol_based_clk == true && cflm_vbf.count > 0)
  631. t_max = MIN(cflm_get_vol_matched_freq(p_max), param.t_fmax);
  632. else
  633. t_max = MIN(new_max, param.t_fmax);
  634. }
  635. freq_qos_update_request(&max_req[param.s_first][type], s_max);
  636. freq_qos_update_request(&max_req[param.g_first][type], g_max);
  637. freq_qos_update_request(&max_req[param.p_first][type], p_max);
  638. if (param.titanium)
  639. freq_qos_update_request(&max_req[param.t_first][type], t_max);
  640. } else if (new_max == LIMIT_RELEASE) {
  641. for_each_possible_cpu(cpu)
  642. freq_qos_update_request(&max_req[cpu][type],
  643. FREQ_QOS_MAX_DEFAULT_VALUE);
  644. }
  645. if ((freq_input[type].min <= (int)param.ltl_max_freq || new_user_max != (int)param.over_limit) &&
  646. freq_input[type].last_over_limit_time != 0) {
  647. freq_input[type].time_in_over_limit += ktime_to_ms(ktime_get() -
  648. freq_input[type].last_over_limit_time);
  649. freq_input[type].last_over_limit_time = 0;
  650. }
  651. if (freq_input[type].min > (int)param.ltl_max_freq && new_user_max == (int)param.over_limit &&
  652. freq_input[type].last_over_limit_time == 0) {
  653. freq_input[type].last_over_limit_time = ktime_get();
  654. }
  655. if (need_update_user_max) {
  656. pr_debug("%s: update_user_max is true\n", __func__);
  657. if (new_user_max > param.big_max_freq) {
  658. pr_debug("%s: too high freq(%d), set to %d\n",
  659. __func__, new_user_max, param.big_max_freq);
  660. new_user_max = param.big_max_freq;
  661. }
  662. s_max = cflm_get_silver_limit(new_user_max);
  663. if (new_user_max < param.big_min_freq) {
  664. /* if silver clock is limited as fmax,
  665. * set promised clock for gold cluster
  666. */
  667. if ((new_user_max == param.s_fmax / param.silver_divider) && (param.g_fmin_up > 0)) {
  668. g_max = param.g_fmin_up;
  669. t_max = param.g_fmin_up; /* use same freq with gold */
  670. } else {
  671. g_max = param.g_fmin;
  672. t_max = param.t_fmin;
  673. }
  674. p_max = param.p_fmin;
  675. } else {
  676. p_max = MIN(new_user_max, param.p_fmax);
  677. g_max = MIN(new_user_max, param.g_fmax);
  678. if (param.vol_based_clk == true && cflm_vbf.count > 0)
  679. t_max = MIN(cflm_get_vol_matched_freq(p_max), param.t_fmax);
  680. else
  681. t_max = MIN(new_user_max, param.t_fmax);
  682. }
  683. pr_info("%s: freq_update_request : new userspace max %d %d %d %d\n", __func__, s_max, g_max, t_max, p_max);
  684. freq_qos_update_request(&max_req[param.s_first][CFLM_USERSPACE], s_max);
  685. freq_qos_update_request(&max_req[param.g_first][CFLM_USERSPACE], g_max);
  686. freq_qos_update_request(&max_req[param.p_first][CFLM_USERSPACE], p_max);
  687. if (param.titanium)
  688. freq_qos_update_request(&max_req[param.t_first][CFLM_USERSPACE], g_max);
  689. }
  690. cflm_update_boost();
  691. cflm_current_qos();
  692. }
  693. static ssize_t cpufreq_table_show(struct kobject *kobj,
  694. struct kobj_attribute *attr, char *buf)
  695. {
  696. ssize_t len = 0;
  697. len = cflm_get_table(buf);
  698. return len;
  699. }
  700. static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
  701. struct kobj_attribute *attr,
  702. char *buf)
  703. {
  704. return snprintf(buf, MAX_BUF_SIZE, "%d\n", param.max_limit_val);
  705. }
  706. static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
  707. struct kobj_attribute *attr,
  708. const char *buf, size_t n)
  709. {
  710. int freq;
  711. int ret = -EINVAL;
  712. ret = kstrtoint(buf, 10, &freq);
  713. if (ret < 0) {
  714. pr_err("%s: cflm: Invalid cpufreq format\n", __func__);
  715. goto out;
  716. }
  717. mutex_lock(&cflm_mutex);
  718. param.max_limit_val = freq;
  719. cflm_freq_decision(CFLM_USERSPACE, 0, freq);
  720. mutex_unlock(&cflm_mutex);
  721. ret = n;
  722. out:
  723. return ret;
  724. }
  725. static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
  726. struct kobj_attribute *attr,
  727. char *buf)
  728. {
  729. return snprintf(buf, MAX_BUF_SIZE, "%d\n", param.min_limit_val);
  730. }
  731. static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
  732. struct kobj_attribute *attr,
  733. const char *buf, size_t n)
  734. {
  735. int freq;
  736. int ret = -EINVAL;
  737. ret = kstrtoint(buf, 10, &freq);
  738. if (ret < 0) {
  739. pr_err("%s: cflm: Invalid cpufreq format\n", __func__);
  740. goto out;
  741. }
  742. mutex_lock(&cflm_mutex);
  743. cflm_freq_decision(CFLM_USERSPACE, freq, 0);
  744. param.min_limit_val = freq;
  745. mutex_unlock(&cflm_mutex);
  746. ret = n;
  747. out:
  748. return ret;
  749. }
  750. static ssize_t over_limit_show(struct kobject *kobj,
  751. struct kobj_attribute *attr,
  752. char *buf)
  753. {
  754. return snprintf(buf, MAX_BUF_SIZE, "%d\n", param.over_limit);
  755. }
  756. static ssize_t over_limit_store(struct kobject *kobj,
  757. struct kobj_attribute *attr,
  758. const char *buf, size_t n)
  759. {
  760. int freq;
  761. int ret = -EINVAL;
  762. ret = kstrtoint(buf, 10, &freq);
  763. if (ret < 0) {
  764. pr_err("%s: cflm: Invalid cpufreq format\n", __func__);
  765. goto out;
  766. }
  767. mutex_lock(&cflm_mutex);
  768. if (param.over_limit != freq) {
  769. param.over_limit = freq;
  770. if ((int)param.max_limit_val > 0)
  771. cflm_freq_decision(CFLM_USERSPACE, 0, param.max_limit_val);
  772. }
  773. mutex_unlock(&cflm_mutex);
  774. ret = n;
  775. out:
  776. return ret;
  777. }
  778. static ssize_t limit_stat_show(struct kobject *kobj,
  779. struct kobj_attribute *attr,
  780. char *buf)
  781. {
  782. ssize_t len = 0;
  783. int i, j = 0;
  784. mutex_lock(&cflm_mutex);
  785. for (i = 0; i < CFLM_MAX_ITEM; i++) {
  786. if (freq_input[i].last_min_limit_time != 0) {
  787. freq_input[i].time_in_min_limit += ktime_to_ms(ktime_get() -
  788. freq_input[i].last_min_limit_time);
  789. freq_input[i].last_min_limit_time = ktime_get();
  790. }
  791. if (freq_input[i].last_max_limit_time != 0) {
  792. freq_input[i].time_in_max_limit += ktime_to_ms(ktime_get() -
  793. freq_input[i].last_max_limit_time);
  794. freq_input[i].last_max_limit_time = ktime_get();
  795. }
  796. if (freq_input[i].last_over_limit_time != 0) {
  797. freq_input[i].time_in_over_limit += ktime_to_ms(ktime_get() -
  798. freq_input[i].last_over_limit_time);
  799. freq_input[i].last_over_limit_time = ktime_get();
  800. }
  801. }
  802. for (j = 0; j < CFLM_MAX_ITEM; j++) {
  803. len += snprintf(buf + len, MAX_BUF_SIZE - len, "%llu %llu %llu\n",
  804. freq_input[j].time_in_min_limit, freq_input[j].time_in_max_limit,
  805. freq_input[j].time_in_over_limit);
  806. }
  807. mutex_unlock(&cflm_mutex);
  808. return len;
  809. }
  810. static unsigned int cflm_get_table_freq(struct cpufreq_policy *policy,
  811. unsigned int target_freq, unsigned int relation)
  812. {
  813. unsigned int idx;
  814. target_freq = clamp_val(target_freq, policy->min, policy->max);
  815. if (!policy->freq_table)
  816. return target_freq;
  817. idx = cpufreq_frequency_table_target(policy, target_freq, relation);
  818. return policy->freq_table[idx].frequency;
  819. }
  820. static ssize_t vtable_show(struct kobject *kobj,
  821. struct kobj_attribute *attr, char *buf)
  822. {
  823. ssize_t len = 0;
  824. int i = 0;
  825. struct cpufreq_policy *policy = cpufreq_cpu_get(param.g_first);
  826. unsigned int virt_clk = 0;
  827. if (!cflm_vbf.count)
  828. return len;
  829. if (param.vbf_offset > cflm_vbf.count) {
  830. pr_err("%s: bad condition(off(%d), cnt(%d))",
  831. __func__, param.vbf_offset, cflm_vbf.count);
  832. return len;
  833. }
  834. if (param.max_limit_val != LIMIT_RELEASE)
  835. len += snprintf(buf + len, MAX_BUF_SIZE, "!!!) please, read table again when no limit state\n");
  836. len += snprintf(buf + len, MAX_BUF_SIZE, "========================max===============================min================\n");
  837. len += snprintf(buf + len, MAX_BUF_SIZE, " virt | prime titan gold silver | prime titan gold silver\n");
  838. for (i = 0; i < param.freq_count; i++) {
  839. virt_clk = param.unified_cpuftbl[i];
  840. if (virt_clk > param.ltl_max_freq) {
  841. len += snprintf(buf + len, MAX_BUF_SIZE, " %7u | %7u %7u %7u %7u | %7u %7u %7u %7u\n",
  842. virt_clk,
  843. /* max = limit */
  844. virt_clk,
  845. cflm_get_vol_matched_freq(virt_clk),
  846. cflm_get_table_freq(policy, virt_clk, CPUFREQ_RELATION_H),
  847. cflm_get_silver_limit(virt_clk),
  848. /* min = boost */
  849. virt_clk,
  850. 0,
  851. cflm_get_table_freq(policy, virt_clk, CPUFREQ_RELATION_L),
  852. cflm_get_silver_boost(virt_clk));
  853. } else {
  854. len += snprintf(buf + len, MAX_BUF_SIZE, " %7u | %7u %7u %7u %7u | %7u %7u %7u %7u\n",
  855. virt_clk,
  856. /* max = limit */
  857. param.p_fmin,
  858. param.t_fmin,
  859. param.g_fmin,
  860. cflm_get_silver_limit(virt_clk),
  861. /* min = boost */
  862. 0,
  863. 0,
  864. 0,
  865. cflm_get_silver_boost(virt_clk));
  866. }
  867. }
  868. len += snprintf(buf + len, MAX_BUF_SIZE, "=============================================================================\n");
  869. cpufreq_cpu_put(policy);
  870. pr_info("%s: %s\n", __func__, buf);
  871. return len;
  872. }
  873. static ssize_t sched_boost_type_show(struct kobject *kobj,
  874. struct kobj_attribute *attr,
  875. char *buf)
  876. {
  877. return snprintf(buf, MAX_BUF_SIZE, "%d\n", param.sched_boost_type);
  878. }
  879. static ssize_t sched_boost_type_store(struct kobject *kobj,
  880. struct kobj_attribute *attr,
  881. const char *buf, size_t n)
  882. {
  883. int boost_type;
  884. int ret = -EINVAL;
  885. ret = kstrtoint(buf, 10, &boost_type);
  886. if (ret < 0) {
  887. pr_err("%s: cflm: Invalid cpufreq format\n", __func__);
  888. goto out;
  889. }
  890. mutex_lock(&cflm_mutex);
  891. if ((param.sched_boost_enabled) && (param.sched_boost_type != boost_type)) {
  892. pr_info("%s: sched boost is enabled(%d), reset(%d)\n", __func__, param.sched_boost_type, boost_type);
  893. sched_set_boost(param.sched_boost_type * -1);
  894. sched_set_boost(boost_type);
  895. }
  896. param.sched_boost_type = boost_type;
  897. pr_info("%s: sched boost type is changed to %d\n", __func__, param.sched_boost_type);
  898. mutex_unlock(&cflm_mutex);
  899. ret = n;
  900. out:
  901. return ret;
  902. }
  903. /* sysfs in /sys/power */
  904. static struct kobj_attribute cpufreq_table = {
  905. .attr = {
  906. .name = "cpufreq_table",
  907. .mode = 0444
  908. },
  909. .show = cpufreq_table_show,
  910. .store = NULL,
  911. };
  912. static struct kobj_attribute cpufreq_min_limit = {
  913. .attr = {
  914. .name = "cpufreq_min_limit",
  915. .mode = 0644
  916. },
  917. .show = cpufreq_min_limit_show,
  918. .store = cpufreq_min_limit_store,
  919. };
  920. static struct kobj_attribute cpufreq_max_limit = {
  921. .attr = {
  922. .name = "cpufreq_max_limit",
  923. .mode = 0644
  924. },
  925. .show = cpufreq_max_limit_show,
  926. .store = cpufreq_max_limit_store,
  927. };
  928. static struct kobj_attribute over_limit = {
  929. .attr = {
  930. .name = "over_limit",
  931. .mode = 0644
  932. },
  933. .show = over_limit_show,
  934. .store = over_limit_store,
  935. };
  936. static struct kobj_attribute limit_stat = {
  937. .attr = {
  938. .name = "limit_stat",
  939. .mode = 0644
  940. },
  941. .show = limit_stat_show,
  942. };
  943. static struct kobj_attribute vtable = {
  944. .attr = {
  945. .name = "vtable",
  946. .mode = 0444
  947. },
  948. .show = vtable_show,
  949. .store = NULL,
  950. };
  951. static struct kobj_attribute sched_boost_type = {
  952. .attr = {
  953. .name = "sched_boost_type",
  954. .mode = 0644
  955. },
  956. .show = sched_boost_type_show,
  957. .store = sched_boost_type_store,
  958. };
  959. int set_freq_limit(unsigned int id, unsigned int freq)
  960. {
  961. if (lpcharge) {
  962. pr_err("%s: not allowed in LPM\n", __func__);
  963. return 0;
  964. }
  965. mutex_lock(&cflm_mutex);
  966. pr_info("%s: cflm: id(%d) freq(%d)\n", __func__, (int)id, freq);
  967. cflm_freq_decision(id, freq, 0);
  968. mutex_unlock(&cflm_mutex);
  969. return 0;
  970. }
  971. EXPORT_SYMBOL_GPL(set_freq_limit);
  972. #define cflm_attr_rw(_name) \
  973. static struct kobj_attribute _name##_attr = \
  974. __ATTR(_name, 0644, show_##_name, store_##_name)
  975. #define show_one(file_name) \
  976. static ssize_t show_##file_name \
  977. (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
  978. { \
  979. return scnprintf(buf, PAGE_SIZE, "%u\n", param.file_name); \
  980. }
  981. #define store_one(file_name) \
  982. static ssize_t store_##file_name \
  983. (struct kobject *kobj, struct kobj_attribute *attr, \
  984. const char *buf, size_t count) \
  985. { \
  986. int ret; \
  987. \
  988. ret = sscanf(buf, "%u", &param.file_name); \
  989. if (ret != 1) \
  990. return -EINVAL; \
  991. \
  992. return count; \
  993. }
  994. /* votlage based */
  995. show_one(vol_based_clk);
  996. store_one(vol_based_clk);
  997. cflm_attr_rw(vol_based_clk);
  998. show_one(vbf_offset);
  999. store_one(vbf_offset);
  1000. cflm_attr_rw(vbf_offset);
  1001. /* adaptive boost */
  1002. show_one(ab_enabled);
  1003. store_one(ab_enabled);
  1004. cflm_attr_rw(ab_enabled);
  1005. static ssize_t show_cflm_info(struct kobject *kobj,
  1006. struct kobj_attribute *attr, char *buf)
  1007. {
  1008. ssize_t len = 0;
  1009. int i = 0;
  1010. mutex_lock(&cflm_mutex);
  1011. len += snprintf(buf, MAX_BUF_SIZE, "[basic info]\n");
  1012. len += snprintf(buf + len, MAX_BUF_SIZE - len,
  1013. "real: silver(%d ~ %d), gold(%d ~ %d), prime(%d ~ %d)\n",
  1014. param.s_fmin, param.s_fmax,
  1015. param.g_fmin, param.g_fmax,
  1016. param.p_fmin, param.p_fmax);
  1017. len += snprintf(buf + len, MAX_BUF_SIZE - len,
  1018. "virt: little(%d ~ %d), big(%d ~ %d)\n",
  1019. param.ltl_min_freq, param.ltl_max_freq,
  1020. param.big_min_freq, param.big_max_freq);
  1021. len += snprintf(buf + len, MAX_BUF_SIZE - len,
  1022. "param: div(%d), sched boost(%d)\n",
  1023. param.silver_divider, param.sched_boost_type);
  1024. len += snprintf(buf + len, MAX_BUF_SIZE - len,
  1025. "param: vbf(%d), offset(%d), aboost(%d)\n",
  1026. param.vol_based_clk, param.vbf_offset, param.ab_enabled);
  1027. len += snprintf(buf + len, MAX_BUF_SIZE - len, "[requested info]\n");
  1028. for (i = 0; i < CFLM_MAX_ITEM; i++) {
  1029. len += snprintf(buf + len, MAX_BUF_SIZE - len,
  1030. "requested: [%d] min(%d), max(%d)\n",
  1031. i, freq_input[i].min, freq_input[i].max);
  1032. }
  1033. len += snprintf(buf + len, MAX_BUF_SIZE - len, "[aboost table]\n");
  1034. if ((param.ab_enabled) && (param.ab_table)) {
  1035. for (i = 0; i < NUM_CPUS; i++) {
  1036. len += snprintf(buf + len, MAX_BUF_SIZE - len, "cpu%d: %d %d %d\n",
  1037. i, param.ab_table[i].threshold,
  1038. param.ab_table[i].high, param.ab_table[i].low);
  1039. }
  1040. }
  1041. mutex_unlock(&cflm_mutex);
  1042. return len;
  1043. }
  1044. static struct kobj_attribute cflm_info =
  1045. __ATTR(info, 0444, show_cflm_info, NULL);
  1046. static struct attribute *cflm_attributes[] = {
  1047. &cpufreq_table.attr,
  1048. &cpufreq_min_limit.attr,
  1049. &cpufreq_max_limit.attr,
  1050. &over_limit.attr,
  1051. &limit_stat.attr,
  1052. &cflm_info.attr,
  1053. &vtable.attr,
  1054. &sched_boost_type.attr,
  1055. &vol_based_clk_attr.attr,
  1056. &vbf_offset_attr.attr,
  1057. &ab_enabled_attr.attr,
  1058. NULL,
  1059. };
  1060. static struct attribute_group cflm_attr_group = {
  1061. .attrs = cflm_attributes,
  1062. };
  1063. #ifdef CONFIG_OF
  1064. static void cflm_parse_dt(struct platform_device *pdev)
  1065. {
  1066. int size = 0;
  1067. if (!pdev->dev.of_node) {
  1068. pr_info("%s: no device tree\n", __func__);
  1069. return;
  1070. }
  1071. /* voltage based */
  1072. param.vol_based_clk = of_property_read_bool(pdev->dev.of_node, "limit,vol_based_clk");
  1073. of_property_read_u32(pdev->dev.of_node, "limit,vbf_offset", &param.vbf_offset);
  1074. pr_info("%s: param: voltage based clock: %s(offset %d)\n",
  1075. __func__, param.vol_based_clk ? "true" : "false", param.vbf_offset);
  1076. /* boost table */
  1077. of_get_property(pdev->dev.of_node, "limit,silver_boost_table", &size);
  1078. if (size) {
  1079. param.silver_boost_map = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
  1080. of_property_read_u32_array(pdev->dev.of_node, "limit,silver_boost_table",
  1081. (u32 *)param.silver_boost_map, size / sizeof(u32));
  1082. param.boost_map_size = size / sizeof(*param.silver_boost_map);
  1083. }
  1084. pr_info("%s: param: boost map size(%d)\n", __func__, param.boost_map_size);
  1085. /* limit table */
  1086. size = 0;
  1087. of_get_property(pdev->dev.of_node, "limit,silver_limit_table", &size);
  1088. if (size) {
  1089. param.silver_limit_map = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
  1090. of_property_read_u32_array(pdev->dev.of_node, "limit,silver_limit_table",
  1091. (u32 *)param.silver_limit_map, size / sizeof(u32));
  1092. param.limit_map_size = size / sizeof(*param.silver_limit_map);
  1093. }
  1094. pr_info("%s: param: limit map size(%d)\n", __func__, param.limit_map_size);
  1095. /* adaptive boost */
  1096. size = 0;
  1097. of_get_property(pdev->dev.of_node, "limit,ab_table", &size);
  1098. if (size) {
  1099. param.ab_table = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
  1100. of_property_read_u32_array(pdev->dev.of_node, "limit,ab_table",
  1101. (u32 *)param.ab_table, size / sizeof(u32));
  1102. param.ab_enabled = 1;
  1103. pr_info("%s: param: aboost enabled(%d)\n", __func__, size);
  1104. } else {
  1105. param.ab_enabled = 0;
  1106. pr_info("%s: param: no aboost table(%d)\n", __func__, size);
  1107. }
  1108. /* lowest freq */
  1109. of_property_read_u32(pdev->dev.of_node, "limit,gold_fmin", &param.g_fmin);
  1110. of_property_read_u32(pdev->dev.of_node, "limit,titanium_fmin", &param.t_fmin);
  1111. of_property_read_u32(pdev->dev.of_node, "limit,prime_fmin", &param.p_fmin);
  1112. /* etc */
  1113. of_property_read_u32(pdev->dev.of_node, "limit,gold_fmin_up", &param.g_fmin_up);
  1114. of_property_read_u32(pdev->dev.of_node, "limit,little_max_freq", &param.ltl_max_freq);
  1115. of_property_read_u32(pdev->dev.of_node, "limit,big_min_freq", &param.big_min_freq);
  1116. /* sm8650 4 cluster(silver, gold, titanium, prime) */
  1117. param.titanium = of_property_read_bool(pdev->dev.of_node, "limit,support_titanium");
  1118. of_node_put(pdev->dev.of_node);
  1119. pr_info("%s: param: g_fmin_up(%d), ltl_max_freq(%d), big_min_freq(%d), titanium(%d)\n", __func__,
  1120. param.g_fmin_up, param.ltl_max_freq, param.big_min_freq, param.titanium);
  1121. };
  1122. #endif
  1123. int cflm_add_qos(void)
  1124. {
  1125. struct cpufreq_policy *policy;
  1126. unsigned int i = 0;
  1127. unsigned int j = 0;
  1128. int ret = 0;
  1129. for_each_possible_cpu(i) {
  1130. policy = cpufreq_cpu_get(i);
  1131. if (!policy) {
  1132. pr_err("no policy for cpu%d\n", i);
  1133. ret = -EPROBE_DEFER;
  1134. break;
  1135. }
  1136. for (j = 0; j < CFLM_MAX_ITEM; j++) {
  1137. ret = freq_qos_add_request(&policy->constraints,
  1138. &min_req[i][j],
  1139. FREQ_QOS_MIN, policy->cpuinfo.min_freq);
  1140. if (ret < 0) {
  1141. pr_err("%s: failed to add min req(%d)\n", __func__, ret);
  1142. break;
  1143. }
  1144. cflm_req_init[i] |= BIT(j*2);
  1145. ret = freq_qos_add_request(&policy->constraints,
  1146. &max_req[i][j],
  1147. FREQ_QOS_MAX, policy->cpuinfo.max_freq);
  1148. if (ret < 0) {
  1149. pr_err("%s: failed to add max req(%d)\n", __func__, ret);
  1150. break;
  1151. }
  1152. cflm_req_init[i] |= BIT(j*2+1);
  1153. }
  1154. if (ret < 0) {
  1155. cpufreq_cpu_put(policy);
  1156. break;
  1157. }
  1158. if (i == param.s_first) {
  1159. if (!param.s_fmin)
  1160. param.s_fmin = policy->cpuinfo.min_freq;
  1161. param.s_fmax = policy->cpuinfo.max_freq;
  1162. }
  1163. if (i == param.g_first) {
  1164. if (!param.g_fmin)
  1165. param.g_fmin = policy->cpuinfo.min_freq;
  1166. param.g_fmax = policy->cpuinfo.max_freq;
  1167. }
  1168. if (i == param.p_first) {
  1169. if (!param.p_fmin)
  1170. param.p_fmin = policy->cpuinfo.min_freq;
  1171. param.p_fmax = policy->cpuinfo.max_freq;
  1172. }
  1173. if (i == param.t_first) {
  1174. if (!param.t_fmin)
  1175. param.t_fmin = policy->cpuinfo.min_freq;
  1176. param.t_fmax = policy->cpuinfo.max_freq;
  1177. }
  1178. cflm_set_table(policy->cpu, policy->freq_table);
  1179. cpufreq_cpu_put(policy);
  1180. }
  1181. return ret;
  1182. }
  1183. void cflm_remove_qos(void)
  1184. {
  1185. unsigned int i = 0;
  1186. unsigned int j = 0;
  1187. int ret = 0;
  1188. pr_info("%s\n", __func__);
  1189. for_each_possible_cpu(i) {
  1190. for (j = 0; j < CFLM_MAX_ITEM; j++) {
  1191. if (cflm_req_init[i] & BIT(j*2)) {
  1192. //pr_info("%s: try to remove min[%d][%d] req\n", __func__, i, j);
  1193. ret = freq_qos_remove_request(&min_req[i][j]);
  1194. if (ret < 0)
  1195. pr_err("%s: failed to remove min_req (%d)\n", __func__, ret);
  1196. }
  1197. if (cflm_req_init[i] & BIT(j*2+1)) {
  1198. //pr_info("%s: try to remove max[%d][%d] req\n", __func__, i, j);
  1199. ret = freq_qos_remove_request(&max_req[i][j]);
  1200. if (ret < 0)
  1201. pr_err("%s: failed to remove max_req (%d)\n", __func__, ret);
  1202. }
  1203. }
  1204. cflm_req_init[i] = 0U;
  1205. }
  1206. }
  1207. int cflm_probe(struct platform_device *pdev)
  1208. {
  1209. int ret;
  1210. pr_info("%s\n", __func__);
  1211. if (lpcharge) {
  1212. pr_info("%s: dummy for LPM\n", __func__);
  1213. return 0;
  1214. }
  1215. #ifdef CONFIG_OF
  1216. cflm_parse_dt(pdev);
  1217. #endif
  1218. ret = cflm_add_qos();
  1219. if (ret < 0)
  1220. goto policy_not_ready;
  1221. cflm_kobj = kobject_create_and_add("cpufreq_limit",
  1222. &cpu_subsys.dev_root->kobj);
  1223. if (!cflm_kobj) {
  1224. pr_err("Unable to cread cflm_kobj\n");
  1225. goto object_create_failed;
  1226. }
  1227. ret = sysfs_create_group(cflm_kobj, &cflm_attr_group);
  1228. if (ret) {
  1229. pr_err("Unable to create cflm group\n");
  1230. goto group_create_failed;
  1231. }
  1232. pr_info("%s done\n", __func__);
  1233. return ret;
  1234. group_create_failed:
  1235. kobject_put(cflm_kobj);
  1236. object_create_failed:
  1237. cflm_kobj = NULL;
  1238. policy_not_ready:
  1239. cflm_remove_qos();
  1240. return ret;
  1241. }
  1242. static int cflm_remove(struct platform_device *pdev)
  1243. {
  1244. pr_info("%s\n", __func__);
  1245. if (!lpcharge && cflm_kobj) {
  1246. cflm_remove_qos();
  1247. sysfs_remove_group(cflm_kobj, &cflm_attr_group);
  1248. kobject_put(cflm_kobj);
  1249. cflm_kobj = NULL;
  1250. }
  1251. return 0;
  1252. }
  1253. static const struct of_device_id cflm_match_table[] = {
  1254. { .compatible = "cpufreq_limit" },
  1255. {}
  1256. };
  1257. static struct platform_driver cflm_driver = {
  1258. .driver = {
  1259. .name = "cpufreq_limit",
  1260. .of_match_table = cflm_match_table,
  1261. },
  1262. .probe = cflm_probe,
  1263. .remove = cflm_remove,
  1264. };
  1265. static int __init cflm_init(void)
  1266. {
  1267. return platform_driver_register(&cflm_driver);
  1268. }
  1269. static void __exit cflm_exit(void)
  1270. {
  1271. platform_driver_unregister(&cflm_driver);
  1272. }
  1273. MODULE_AUTHOR("Sangyoung Son <[email protected]");
  1274. MODULE_DESCRIPTION("'cpufreq_limit' - A driver to limit cpu frequency");
  1275. MODULE_LICENSE("GPL");
  1276. late_initcall(cflm_init);
  1277. module_exit(cflm_exit);