msm_performance.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/notifier.h>
  7. #include <linux/cpu.h>
  8. #include <linux/moduleparam.h>
  9. #include <linux/cpumask.h>
  10. #include <linux/cpufreq.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/tick.h>
  14. #include <trace/events/power.h>
  15. #include <linux/sysfs.h>
  16. #include <linux/module.h>
  17. #include <linux/input.h>
  18. #include <linux/kthread.h>
  19. #include <linux/sched/walt.h>
  20. #include <soc/qcom/msm_performance.h>
  21. #include <soc/qcom/pmu_lib.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/circ_buf.h>
  24. #include <linux/ktime.h>
  25. #include <linux/perf_event.h>
  26. #include <linux/errno.h>
  27. #include <linux/topology.h>
  28. #include <linux/scmi_protocol.h>
  29. #include <trace/events/power.h>
  30. #include <linux/platform_device.h>
  31. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  32. #include <linux/qcom_scmi_vendor.h>
  33. #else
  34. #include <linux/scmi_plh.h>
  35. #endif
  36. #define POLL_INT 25
  37. #define NODE_NAME_MAX_CHARS 16
  38. #define QUEUE_POOL_SIZE 512 /*2^8 always keep in 2^x */
  39. #define INST_EV 0x08 /* 0th event*/
  40. #define CYC_EV 0x11 /* 1st event*/
  41. #define INIT "Init"
  42. #define CPU_CYCLE_THRESHOLD 650000
  43. #define CPUCP_MIN_LOG_LEVEL 0
  44. #define CPUCP_MAX_LOG_LEVEL 0xF
  45. static DEFINE_PER_CPU(bool, cpu_is_hp);
  46. static DEFINE_MUTEX(perfevent_lock);
  47. static DEFINE_MUTEX(freq_pmqos_lock);
  48. enum event_idx {
  49. INST_EVENT,
  50. CYC_EVENT,
  51. NO_OF_EVENT
  52. };
  53. enum cpu_clusters {
  54. MIN = 0,
  55. MID = 1,
  56. MAX = 2,
  57. CLUSTER_MAX
  58. };
  59. static struct kset *msm_perf_kset;
  60. static struct kobject *param_kobj;
  61. static ssize_t get_cpu_min_freq(struct kobject *kobj,
  62. struct kobj_attribute *attr, char *buf);
  63. static ssize_t set_cpu_min_freq(struct kobject *kobj,
  64. struct kobj_attribute *attr, const char *buf,
  65. size_t count);
  66. static ssize_t get_cpu_max_freq(struct kobject *kobj,
  67. struct kobj_attribute *attr, char *buf);
  68. static ssize_t set_cpu_max_freq(struct kobject *kobj,
  69. struct kobj_attribute *attr, const char *buf,
  70. size_t count);
  71. static ssize_t get_cpu_total_instruction(struct kobject *kobj,
  72. struct kobj_attribute *attr, char *buf);
  73. static ssize_t get_game_start_pid(struct kobject *kobj,
  74. struct kobj_attribute *attr, char *buf);
  75. static ssize_t set_game_start_pid(struct kobject *kobj,
  76. struct kobj_attribute *attr, const char *buf,
  77. size_t count);
  78. static ssize_t get_splh_notif(struct kobject *kobj,
  79. struct kobj_attribute *attr, char *buf);
  80. static ssize_t set_splh_notif(struct kobject *kobj,
  81. struct kobj_attribute *attr, const char *buf,
  82. size_t count);
  83. static ssize_t get_splh_sample_ms(struct kobject *kobj,
  84. struct kobj_attribute *attr, char *buf);
  85. static ssize_t set_splh_sample_ms(struct kobject *kobj,
  86. struct kobj_attribute *attr, const char *buf,
  87. size_t count);
  88. static ssize_t get_splh_log_level(struct kobject *kobj,
  89. struct kobj_attribute *attr, char *buf);
  90. static ssize_t set_splh_log_level(struct kobject *kobj,
  91. struct kobj_attribute *attr, const char *buf,
  92. size_t count);
  93. static ssize_t get_lplh_notif(struct kobject *kobj,
  94. struct kobj_attribute *attr, char *buf);
  95. static ssize_t set_lplh_notif(struct kobject *kobj,
  96. struct kobj_attribute *attr, const char *buf,
  97. size_t count);
  98. static ssize_t get_lplh_sample_ms(struct kobject *kobj,
  99. struct kobj_attribute *attr, char *buf);
  100. static ssize_t set_lplh_sample_ms(struct kobject *kobj,
  101. struct kobj_attribute *attr, const char *buf,
  102. size_t count);
  103. static ssize_t get_lplh_log_level(struct kobject *kobj,
  104. struct kobj_attribute *attr, char *buf);
  105. static ssize_t set_lplh_log_level(struct kobject *kobj,
  106. struct kobj_attribute *attr, const char *buf,
  107. size_t count);
  108. static struct kobj_attribute cpu_min_freq_attr =
  109. __ATTR(cpu_min_freq, 0644, get_cpu_min_freq, set_cpu_min_freq);
  110. static struct kobj_attribute cpu_max_freq_attr =
  111. __ATTR(cpu_max_freq, 0644, get_cpu_max_freq, set_cpu_max_freq);
  112. static struct kobj_attribute inst_attr =
  113. __ATTR(inst, 0444, get_cpu_total_instruction, NULL);
  114. #if IS_ENABLED(CONFIG_SCHED_WALT)
  115. static ssize_t get_core_ctl_register(struct kobject *kobj,
  116. struct kobj_attribute *attr, char *buf);
  117. static ssize_t set_core_ctl_register(struct kobject *kobj,
  118. struct kobj_attribute *attr, const char *buf,
  119. size_t count);
  120. static struct kobj_attribute core_ctl_register_attr =
  121. __ATTR(core_ctl_register, 0644, get_core_ctl_register,
  122. set_core_ctl_register);
  123. #endif
  124. static struct kobj_attribute evnt_gplaf_pid_attr =
  125. __ATTR(evnt_gplaf_pid, 0644, get_game_start_pid, set_game_start_pid);
  126. static struct kobj_attribute splh_notif_attr =
  127. __ATTR(splh_notif, 0644, get_splh_notif, set_splh_notif);
  128. static struct kobj_attribute splh_sample_ms_attr =
  129. __ATTR(splh_sample_ms, 0644, get_splh_sample_ms, set_splh_sample_ms);
  130. static struct kobj_attribute splh_log_level_attr =
  131. __ATTR(splh_log_level, 0644, get_splh_log_level, set_splh_log_level);
  132. static struct kobj_attribute lplh_notif_attr =
  133. __ATTR(lplh_notif, 0644, get_lplh_notif, set_lplh_notif);
  134. static struct kobj_attribute lplh_sample_ms_attr =
  135. __ATTR(lplh_sample_ms, 0644, get_lplh_sample_ms, set_lplh_sample_ms);
  136. static struct kobj_attribute lplh_log_level_attr =
  137. __ATTR(lplh_log_level, 0644, get_lplh_log_level, set_lplh_log_level);
  138. static struct attribute *plh_param_attrs[] = {
  139. &splh_notif_attr.attr,
  140. &splh_sample_ms_attr.attr,
  141. &splh_log_level_attr.attr,
  142. &lplh_notif_attr.attr,
  143. &lplh_sample_ms_attr.attr,
  144. &lplh_log_level_attr.attr,
  145. NULL,
  146. };
  147. static struct attribute_group plh_param_attr_group = {
  148. .attrs = plh_param_attrs,
  149. };
  150. static int add_plh_params(void)
  151. {
  152. int ret = 0;
  153. if (param_kobj)
  154. ret = sysfs_update_group(param_kobj, &plh_param_attr_group);
  155. if (!param_kobj || ret) {
  156. pr_err("msm_perf: plh: Failed to update param_kobj\n");
  157. return -ENOMEM;
  158. }
  159. return 0;
  160. }
  161. static struct attribute *param_attrs[] = {
  162. &cpu_min_freq_attr.attr,
  163. &cpu_max_freq_attr.attr,
  164. &inst_attr.attr,
  165. #if IS_ENABLED(CONFIG_SCHED_WALT)
  166. &core_ctl_register_attr.attr,
  167. #endif
  168. &evnt_gplaf_pid_attr.attr,
  169. NULL,
  170. };
  171. static struct attribute_group param_attr_group = {
  172. .attrs = param_attrs,
  173. };
  174. static int add_module_params(void)
  175. {
  176. int ret;
  177. struct kobject *module_kobj;
  178. module_kobj = &msm_perf_kset->kobj;
  179. param_kobj = kobject_create_and_add("parameters", module_kobj);
  180. if (!param_kobj) {
  181. pr_err("msm_perf: Failed to add param_kobj\n");
  182. return -ENOMEM;
  183. }
  184. ret = sysfs_create_group(param_kobj, &param_attr_group);
  185. if (ret) {
  186. pr_err("msm_perf: Failed to create sysfs\n");
  187. return ret;
  188. }
  189. return 0;
  190. }
  191. /* To handle cpufreq min/max request */
  192. struct cpu_status {
  193. unsigned int min;
  194. unsigned int max;
  195. };
  196. static DEFINE_PER_CPU(struct cpu_status, msm_perf_cpu_stats);
  197. static DEFINE_PER_CPU(struct freq_qos_request, qos_req_min);
  198. static DEFINE_PER_CPU(struct freq_qos_request, qos_req_max);
  199. static cpumask_var_t limit_mask_min;
  200. static cpumask_var_t limit_mask_max;
  201. static DECLARE_COMPLETION(gfx_evt_arrival);
  202. struct gpu_data {
  203. pid_t pid;
  204. int ctx_id;
  205. unsigned int timestamp;
  206. ktime_t arrive_ts;
  207. int evt_typ;
  208. };
  209. static struct gpu_data gpu_circ_buff[QUEUE_POOL_SIZE];
  210. struct queue_indicies {
  211. int head;
  212. int tail;
  213. };
  214. static struct queue_indicies curr_pos;
  215. static DEFINE_SPINLOCK(gfx_circ_buff_lock);
  216. struct event_data {
  217. u32 event_id;
  218. u64 prev_count;
  219. u64 cur_delta;
  220. u64 cached_total_count;
  221. };
  222. static struct event_data **pmu_events;
  223. static unsigned long min_cpu_capacity = ULONG_MAX;
  224. struct events {
  225. spinlock_t cpu_hotplug_lock;
  226. bool cpu_hotplug;
  227. bool init_success;
  228. };
  229. static struct events events_group;
  230. static struct task_struct *events_notify_thread;
  231. static unsigned int aggr_big_nr;
  232. static unsigned int aggr_top_load;
  233. static unsigned int top_load[CLUSTER_MAX];
  234. static unsigned int curr_cap[CLUSTER_MAX];
  235. static atomic_t game_status_pid;
  236. static bool ready_for_freq_updates;
  237. static int freq_qos_request_init(void)
  238. {
  239. unsigned int cpu;
  240. int ret;
  241. struct cpufreq_policy *policy;
  242. struct freq_qos_request *req;
  243. for_each_present_cpu(cpu) {
  244. policy = cpufreq_cpu_get(cpu);
  245. if (!policy) {
  246. pr_err("%s: Failed to get cpufreq policy for cpu%d\n",
  247. __func__, cpu);
  248. ret = -EAGAIN;
  249. goto cleanup;
  250. }
  251. per_cpu(msm_perf_cpu_stats, cpu).min = 0;
  252. req = &per_cpu(qos_req_min, cpu);
  253. ret = freq_qos_add_request(&policy->constraints, req,
  254. FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE);
  255. if (ret < 0) {
  256. pr_err("%s: Failed to add min freq constraint (%d)\n",
  257. __func__, ret);
  258. cpufreq_cpu_put(policy);
  259. goto cleanup;
  260. }
  261. per_cpu(msm_perf_cpu_stats, cpu).max = FREQ_QOS_MAX_DEFAULT_VALUE;
  262. req = &per_cpu(qos_req_max, cpu);
  263. ret = freq_qos_add_request(&policy->constraints, req,
  264. FREQ_QOS_MAX, FREQ_QOS_MAX_DEFAULT_VALUE);
  265. if (ret < 0) {
  266. pr_err("%s: Failed to add max freq constraint (%d)\n",
  267. __func__, ret);
  268. cpufreq_cpu_put(policy);
  269. goto cleanup;
  270. }
  271. cpufreq_cpu_put(policy);
  272. }
  273. return 0;
  274. cleanup:
  275. for_each_present_cpu(cpu) {
  276. req = &per_cpu(qos_req_min, cpu);
  277. if (req && freq_qos_request_active(req))
  278. freq_qos_remove_request(req);
  279. req = &per_cpu(qos_req_max, cpu);
  280. if (req && freq_qos_request_active(req))
  281. freq_qos_remove_request(req);
  282. per_cpu(msm_perf_cpu_stats, cpu).min = 0;
  283. per_cpu(msm_perf_cpu_stats, cpu).max = FREQ_QOS_MAX_DEFAULT_VALUE;
  284. }
  285. return ret;
  286. }
  287. /*******************************sysfs start************************************/
  288. static ssize_t set_cpu_min_freq(struct kobject *kobj,
  289. struct kobj_attribute *attr, const char *buf, size_t count)
  290. {
  291. int i, ntokens = 0;
  292. unsigned int val, cpu;
  293. const char *cp = buf;
  294. struct cpu_status *i_cpu_stats;
  295. struct freq_qos_request *req;
  296. int ret = 0;
  297. mutex_lock(&freq_pmqos_lock);
  298. if (!ready_for_freq_updates) {
  299. ret = freq_qos_request_init();
  300. if (ret) {
  301. pr_err("%s: Failed to init qos requests policy for ret=%d\n",
  302. __func__, ret);
  303. mutex_unlock(&freq_pmqos_lock);
  304. return ret;
  305. }
  306. ready_for_freq_updates = true;
  307. }
  308. mutex_unlock(&freq_pmqos_lock);
  309. while ((cp = strpbrk(cp + 1, " :")))
  310. ntokens++;
  311. /* CPU:value pair */
  312. if (!(ntokens % 2))
  313. return -EINVAL;
  314. cp = buf;
  315. cpumask_clear(limit_mask_min);
  316. for (i = 0; i < ntokens; i += 2) {
  317. if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
  318. return -EINVAL;
  319. if (cpu >= nr_cpu_ids)
  320. break;
  321. if (cpu_possible(cpu)) {
  322. i_cpu_stats = &per_cpu(msm_perf_cpu_stats, cpu);
  323. i_cpu_stats->min = val;
  324. cpumask_set_cpu(cpu, limit_mask_min);
  325. }
  326. cp = strnchr(cp, strlen(cp), ' ');
  327. cp++;
  328. }
  329. /*
  330. * Since on synchronous systems policy is shared amongst multiple
  331. * CPUs only one CPU needs to be updated for the limit to be
  332. * reflected for the entire cluster. We can avoid updating the policy
  333. * of other CPUs in the cluster once it is done for at least one CPU
  334. * in the cluster
  335. */
  336. cpus_read_lock();
  337. for_each_cpu(i, limit_mask_min) {
  338. i_cpu_stats = &per_cpu(msm_perf_cpu_stats, i);
  339. req = &per_cpu(qos_req_min, i);
  340. if (freq_qos_update_request(req, i_cpu_stats->min) < 0)
  341. continue;
  342. }
  343. cpus_read_unlock();
  344. return count;
  345. }
  346. static ssize_t get_cpu_min_freq(struct kobject *kobj,
  347. struct kobj_attribute *attr, char *buf)
  348. {
  349. int cnt = 0, cpu;
  350. for_each_present_cpu(cpu) {
  351. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt,
  352. "%d:%u ", cpu,
  353. per_cpu(msm_perf_cpu_stats, cpu).min);
  354. }
  355. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
  356. return cnt;
  357. }
  358. static ssize_t set_cpu_max_freq(struct kobject *kobj,
  359. struct kobj_attribute *attr, const char *buf, size_t count)
  360. {
  361. int i, ntokens = 0;
  362. unsigned int val, cpu;
  363. const char *cp = buf;
  364. struct cpu_status *i_cpu_stats;
  365. struct freq_qos_request *req;
  366. int ret = 0;
  367. mutex_lock(&freq_pmqos_lock);
  368. if (!ready_for_freq_updates) {
  369. ret = freq_qos_request_init();
  370. if (ret) {
  371. pr_err("%s: Failed to init qos requests policy for ret=%d\n",
  372. __func__, ret);
  373. mutex_unlock(&freq_pmqos_lock);
  374. return ret;
  375. }
  376. ready_for_freq_updates = true;
  377. }
  378. mutex_unlock(&freq_pmqos_lock);
  379. while ((cp = strpbrk(cp + 1, " :")))
  380. ntokens++;
  381. /* CPU:value pair */
  382. if (!(ntokens % 2))
  383. return -EINVAL;
  384. cp = buf;
  385. cpumask_clear(limit_mask_max);
  386. for (i = 0; i < ntokens; i += 2) {
  387. if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
  388. return -EINVAL;
  389. if (cpu >= nr_cpu_ids)
  390. break;
  391. if (cpu_possible(cpu)) {
  392. i_cpu_stats = &per_cpu(msm_perf_cpu_stats, cpu);
  393. i_cpu_stats->max = min_t(uint, val,
  394. (unsigned int)FREQ_QOS_MAX_DEFAULT_VALUE);
  395. cpumask_set_cpu(cpu, limit_mask_max);
  396. }
  397. cp = strnchr(cp, strlen(cp), ' ');
  398. cp++;
  399. }
  400. cpus_read_lock();
  401. for_each_cpu(i, limit_mask_max) {
  402. i_cpu_stats = &per_cpu(msm_perf_cpu_stats, i);
  403. req = &per_cpu(qos_req_max, i);
  404. if (freq_qos_update_request(req, i_cpu_stats->max) < 0)
  405. continue;
  406. }
  407. cpus_read_unlock();
  408. return count;
  409. }
  410. static ssize_t get_cpu_max_freq(struct kobject *kobj,
  411. struct kobj_attribute *attr, char *buf)
  412. {
  413. int cnt = 0, cpu;
  414. for_each_present_cpu(cpu) {
  415. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt,
  416. "%d:%u ", cpu,
  417. per_cpu(msm_perf_cpu_stats, cpu).max);
  418. }
  419. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
  420. return cnt;
  421. }
  422. static struct kobject *events_kobj;
  423. static ssize_t show_cpu_hotplug(struct kobject *kobj,
  424. struct kobj_attribute *attr, char *buf)
  425. {
  426. return scnprintf(buf, PAGE_SIZE, "\n");
  427. }
  428. static struct kobj_attribute cpu_hotplug_attr =
  429. __ATTR(cpu_hotplug, 0444, show_cpu_hotplug, NULL);
  430. static struct attribute *events_attrs[] = {
  431. &cpu_hotplug_attr.attr,
  432. NULL,
  433. };
  434. static struct attribute_group events_attr_group = {
  435. .attrs = events_attrs,
  436. };
  437. static ssize_t show_perf_gfx_evts(struct kobject *kobj,
  438. struct kobj_attribute *attr,
  439. char *buf)
  440. {
  441. struct queue_indicies updated_pos;
  442. unsigned long flags;
  443. ssize_t retval = 0;
  444. int idx = 0, size, act_idx, ret = -1;
  445. ret = wait_for_completion_interruptible(&gfx_evt_arrival);
  446. if (ret)
  447. return 0;
  448. spin_lock_irqsave(&gfx_circ_buff_lock, flags);
  449. updated_pos.head = curr_pos.head;
  450. updated_pos.tail = curr_pos.tail;
  451. size = CIRC_CNT(updated_pos.head, updated_pos.tail, QUEUE_POOL_SIZE);
  452. curr_pos.tail = (curr_pos.tail + size) % QUEUE_POOL_SIZE;
  453. spin_unlock_irqrestore(&gfx_circ_buff_lock, flags);
  454. for (idx = 0; idx < size; idx++) {
  455. act_idx = (updated_pos.tail + idx) % QUEUE_POOL_SIZE;
  456. retval += scnprintf(buf + retval, PAGE_SIZE - retval,
  457. "%d %d %u %d %lu :",
  458. gpu_circ_buff[act_idx].pid,
  459. gpu_circ_buff[act_idx].ctx_id,
  460. gpu_circ_buff[act_idx].timestamp,
  461. gpu_circ_buff[act_idx].evt_typ,
  462. ktime_to_us(gpu_circ_buff[act_idx].arrive_ts));
  463. if (retval >= PAGE_SIZE) {
  464. pr_err("msm_perf:data limit exceed\n");
  465. break;
  466. }
  467. }
  468. return retval;
  469. }
  470. static struct kobj_attribute gfx_event_info_attr =
  471. __ATTR(gfx_evt, 0444, show_perf_gfx_evts, NULL);
  472. static ssize_t show_big_nr(struct kobject *kobj,
  473. struct kobj_attribute *attr,
  474. char *buf)
  475. {
  476. return scnprintf(buf, PAGE_SIZE, "%u\n", aggr_big_nr);
  477. }
  478. static struct kobj_attribute big_nr_attr =
  479. __ATTR(aggr_big_nr, 0444, show_big_nr, NULL);
  480. static ssize_t show_top_load(struct kobject *kobj,
  481. struct kobj_attribute *attr,
  482. char *buf)
  483. {
  484. return scnprintf(buf, PAGE_SIZE, "%u\n", aggr_top_load);
  485. }
  486. static struct kobj_attribute top_load_attr =
  487. __ATTR(aggr_top_load, 0444, show_top_load, NULL);
  488. static ssize_t show_top_load_cluster(struct kobject *kobj,
  489. struct kobj_attribute *attr,
  490. char *buf)
  491. {
  492. return scnprintf(buf, PAGE_SIZE, "%u %u %u\n",
  493. top_load[MIN], top_load[MID],
  494. top_load[MAX]);
  495. }
  496. static struct kobj_attribute cluster_top_load_attr =
  497. __ATTR(top_load_cluster, 0444, show_top_load_cluster, NULL);
  498. static ssize_t show_curr_cap_cluster(struct kobject *kobj,
  499. struct kobj_attribute *attr,
  500. char *buf)
  501. {
  502. return scnprintf(buf, PAGE_SIZE, "%u %u %u\n",
  503. curr_cap[MIN], curr_cap[MID],
  504. curr_cap[MAX]);
  505. }
  506. static struct kobj_attribute cluster_curr_cap_attr =
  507. __ATTR(curr_cap_cluster, 0444, show_curr_cap_cluster, NULL);
  508. static struct attribute *notify_attrs[] = {
  509. &big_nr_attr.attr,
  510. &top_load_attr.attr,
  511. &cluster_top_load_attr.attr,
  512. &cluster_curr_cap_attr.attr,
  513. &gfx_event_info_attr.attr,
  514. NULL,
  515. };
  516. static struct attribute_group notify_attr_group = {
  517. .attrs = notify_attrs,
  518. };
  519. static struct kobject *notify_kobj;
  520. /*******************************sysfs ends************************************/
  521. /*****************PMU Data Collection*****************/
  522. static int set_event(struct event_data *ev, int cpu)
  523. {
  524. int ret;
  525. ret = qcom_pmu_event_supported(ev->event_id, cpu);
  526. if (ret) {
  527. pr_err("msm_perf: %s failed, eventId:0x%x, cpu:%d, error code:%d\n",
  528. __func__, ev->event_id, cpu, ret);
  529. return ret;
  530. }
  531. return 0;
  532. }
  533. static void free_pmu_counters(unsigned int cpu)
  534. {
  535. int i = 0;
  536. if (!cpu_possible(cpu))
  537. return;
  538. for (i = 0; i < NO_OF_EVENT; i++) {
  539. pmu_events[i][cpu].prev_count = 0;
  540. pmu_events[i][cpu].cur_delta = 0;
  541. pmu_events[i][cpu].cached_total_count = 0;
  542. }
  543. }
  544. static int init_pmu_counter(void)
  545. {
  546. int cpu;
  547. unsigned long cpu_capacity;
  548. int ret = 0;
  549. int i = 0, j = 0;
  550. int no_of_cpus = 0;
  551. for_each_possible_cpu(cpu)
  552. no_of_cpus++;
  553. pmu_events = kcalloc(NO_OF_EVENT, sizeof(struct event_data *), GFP_KERNEL);
  554. if (!pmu_events)
  555. return -ENOMEM;
  556. for (i = 0; i < NO_OF_EVENT; i++) {
  557. pmu_events[i] = kcalloc(no_of_cpus, sizeof(struct event_data), GFP_KERNEL);
  558. if (!pmu_events[i]) {
  559. for (j = i; j >= 0; j--) {
  560. kfree(pmu_events[j]);
  561. pmu_events[j] = NULL;
  562. }
  563. kfree(pmu_events);
  564. pmu_events = NULL;
  565. return -ENOMEM;
  566. }
  567. }
  568. /* Create events per CPU */
  569. for_each_possible_cpu(cpu) {
  570. /* create Instruction event */
  571. pmu_events[INST_EVENT][cpu].event_id = INST_EV;
  572. ret = set_event(&pmu_events[INST_EVENT][cpu], cpu);
  573. if (ret < 0)
  574. return ret;
  575. /* create cycle event */
  576. pmu_events[CYC_EVENT][cpu].event_id = CYC_EV;
  577. ret = set_event(&pmu_events[CYC_EVENT][cpu], cpu);
  578. if (ret < 0) {
  579. free_pmu_counters(cpu);
  580. return ret;
  581. }
  582. /* find capacity per cpu */
  583. cpu_capacity = arch_scale_cpu_capacity(cpu);
  584. if (cpu_capacity < min_cpu_capacity)
  585. min_cpu_capacity = cpu_capacity;
  586. }
  587. return 0;
  588. }
  589. static inline void msm_perf_read_event(struct event_data *event, int cpu)
  590. {
  591. u64 ev_count = 0;
  592. int ret;
  593. u64 total;
  594. mutex_lock(&perfevent_lock);
  595. if (!event->event_id) {
  596. mutex_unlock(&perfevent_lock);
  597. return;
  598. }
  599. if (!per_cpu(cpu_is_hp, cpu)) {
  600. ret = qcom_pmu_read(cpu, event->event_id, &total);
  601. if (ret) {
  602. mutex_unlock(&perfevent_lock);
  603. return;
  604. }
  605. }
  606. else
  607. total = event->cached_total_count;
  608. ev_count = total - event->prev_count;
  609. event->prev_count = total;
  610. event->cur_delta = ev_count;
  611. mutex_unlock(&perfevent_lock);
  612. }
  613. static ssize_t get_cpu_total_instruction(struct kobject *kobj,
  614. struct kobj_attribute *attr, char *buf)
  615. {
  616. u64 instruction = 0;
  617. u64 cycles = 0;
  618. u64 total_inst_big = 0;
  619. u64 total_inst_little = 0;
  620. u64 ipc_big = 0;
  621. u64 ipc_little = 0;
  622. int cnt = 0, cpu;
  623. for_each_possible_cpu(cpu) {
  624. /* Read Instruction event */
  625. msm_perf_read_event(&pmu_events[INST_EVENT][cpu], cpu);
  626. /* Read Cycle event */
  627. msm_perf_read_event(&pmu_events[CYC_EVENT][cpu], cpu);
  628. instruction = pmu_events[INST_EVENT][cpu].cur_delta;
  629. cycles = pmu_events[CYC_EVENT][cpu].cur_delta;
  630. /* collecting max inst and ipc for max cap and min cap cpus */
  631. if (arch_scale_cpu_capacity(cpu) > min_cpu_capacity) {
  632. if (cycles && cycles >= CPU_CYCLE_THRESHOLD)
  633. ipc_big = max(ipc_big,
  634. ((instruction*100)/cycles));
  635. total_inst_big += instruction;
  636. } else {
  637. if (cycles)
  638. ipc_little = max(ipc_little,
  639. ((instruction*100)/cycles));
  640. total_inst_little += instruction;
  641. }
  642. }
  643. cnt += scnprintf(buf, PAGE_SIZE, "%llu:%llu:%llu:%llu\n",
  644. total_inst_big, ipc_big,
  645. total_inst_little, ipc_little);
  646. return cnt;
  647. }
  648. static int hotplug_notify_down(unsigned int cpu)
  649. {
  650. mutex_lock(&perfevent_lock);
  651. per_cpu(cpu_is_hp, cpu) = true;
  652. free_pmu_counters(cpu);
  653. mutex_unlock(&perfevent_lock);
  654. return 0;
  655. }
  656. static int hotplug_notify_up(unsigned int cpu)
  657. {
  658. unsigned long flags;
  659. mutex_lock(&perfevent_lock);
  660. per_cpu(cpu_is_hp, cpu) = false;
  661. mutex_unlock(&perfevent_lock);
  662. if (events_group.init_success) {
  663. spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
  664. events_group.cpu_hotplug = true;
  665. spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
  666. wake_up_process(events_notify_thread);
  667. }
  668. return 0;
  669. }
  670. static int events_notify_userspace(void *data)
  671. {
  672. unsigned long flags;
  673. bool notify_change;
  674. while (1) {
  675. set_current_state(TASK_INTERRUPTIBLE);
  676. spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
  677. if (!events_group.cpu_hotplug) {
  678. spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock),
  679. flags);
  680. schedule();
  681. if (kthread_should_stop())
  682. break;
  683. spin_lock_irqsave(&(events_group.cpu_hotplug_lock),
  684. flags);
  685. }
  686. set_current_state(TASK_RUNNING);
  687. notify_change = events_group.cpu_hotplug;
  688. events_group.cpu_hotplug = false;
  689. spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
  690. if (notify_change)
  691. sysfs_notify(events_kobj, NULL, "cpu_hotplug");
  692. }
  693. return 0;
  694. }
  695. static int init_notify_group(void)
  696. {
  697. int ret;
  698. struct kobject *module_kobj = &msm_perf_kset->kobj;
  699. notify_kobj = kobject_create_and_add("notify", module_kobj);
  700. if (!notify_kobj) {
  701. pr_err("msm_perf: Failed to add notify_kobj\n");
  702. return -ENOMEM;
  703. }
  704. ret = sysfs_create_group(notify_kobj, &notify_attr_group);
  705. if (ret) {
  706. kobject_put(notify_kobj);
  707. pr_err("msm_perf: Failed to create sysfs\n");
  708. return ret;
  709. }
  710. return 0;
  711. }
  712. static int init_events_group(void)
  713. {
  714. int ret;
  715. struct kobject *module_kobj = &msm_perf_kset->kobj;
  716. events_kobj = kobject_create_and_add("events", module_kobj);
  717. if (!events_kobj) {
  718. pr_err("msm_perf: Failed to add events_kobj\n");
  719. return -ENOMEM;
  720. }
  721. ret = sysfs_create_group(events_kobj, &events_attr_group);
  722. if (ret) {
  723. pr_err("msm_perf: Failed to create sysfs\n");
  724. return ret;
  725. }
  726. spin_lock_init(&(events_group.cpu_hotplug_lock));
  727. events_notify_thread = kthread_run(events_notify_userspace,
  728. NULL, "msm_perf:events_notify");
  729. if (IS_ERR(events_notify_thread))
  730. return PTR_ERR(events_notify_thread);
  731. events_group.init_success = true;
  732. return 0;
  733. }
  734. #if IS_ENABLED(CONFIG_SCHED_WALT)
  735. static void nr_notify_userspace(struct work_struct *work)
  736. {
  737. sysfs_notify(notify_kobj, NULL, "aggr_top_load");
  738. sysfs_notify(notify_kobj, NULL, "aggr_big_nr");
  739. sysfs_notify(notify_kobj, NULL, "top_load_cluster");
  740. sysfs_notify(notify_kobj, NULL, "curr_cap_cluster");
  741. }
  742. static int msm_perf_core_ctl_notify(struct notifier_block *nb,
  743. unsigned long unused,
  744. void *data)
  745. {
  746. static unsigned int tld, nrb, i;
  747. static unsigned int top_ld[CLUSTER_MAX], curr_cp[CLUSTER_MAX];
  748. static DECLARE_WORK(sysfs_notify_work, nr_notify_userspace);
  749. struct core_ctl_notif_data *d = data;
  750. int cluster = 0;
  751. nrb += d->nr_big;
  752. tld += d->coloc_load_pct;
  753. for (cluster = 0; cluster < CLUSTER_MAX; cluster++) {
  754. top_ld[cluster] += d->ta_util_pct[cluster];
  755. curr_cp[cluster] += d->cur_cap_pct[cluster];
  756. }
  757. i++;
  758. if (i == POLL_INT) {
  759. aggr_big_nr = ((nrb%POLL_INT) ? 1 : 0) + nrb/POLL_INT;
  760. aggr_top_load = tld/POLL_INT;
  761. for (cluster = 0; cluster < CLUSTER_MAX; cluster++) {
  762. top_load[cluster] = top_ld[cluster]/POLL_INT;
  763. curr_cap[cluster] = curr_cp[cluster]/POLL_INT;
  764. top_ld[cluster] = 0;
  765. curr_cp[cluster] = 0;
  766. }
  767. tld = 0;
  768. nrb = 0;
  769. i = 0;
  770. schedule_work(&sysfs_notify_work);
  771. }
  772. return NOTIFY_OK;
  773. }
  774. static struct notifier_block msm_perf_nb = {
  775. .notifier_call = msm_perf_core_ctl_notify
  776. };
  777. static bool core_ctl_register;
  778. static ssize_t get_core_ctl_register(struct kobject *kobj,
  779. struct kobj_attribute *attr, char *buf)
  780. {
  781. return scnprintf(buf, PAGE_SIZE, "%c\n", core_ctl_register ? 'Y' : 'N');
  782. }
  783. static ssize_t set_core_ctl_register(struct kobject *kobj,
  784. struct kobj_attribute *attr, const char *buf, size_t count)
  785. {
  786. bool old_val = core_ctl_register;
  787. int ret;
  788. ret = kstrtobool(buf, &core_ctl_register);
  789. if (ret < 0) {
  790. pr_err("msm_perf: getting new core_ctl_register failed, ret=%d\n", ret);
  791. return ret;
  792. }
  793. if (core_ctl_register == old_val)
  794. return count;
  795. if (core_ctl_register)
  796. core_ctl_notifier_register(&msm_perf_nb);
  797. else
  798. core_ctl_notifier_unregister(&msm_perf_nb);
  799. return count;
  800. }
  801. #endif
  802. void msm_perf_events_update(enum evt_update_t update_typ,
  803. enum gfx_evt_t evt_typ, pid_t pid,
  804. uint32_t ctx_id, uint32_t timestamp, bool end_of_frame)
  805. {
  806. unsigned long flags;
  807. int idx = 0;
  808. if (update_typ != MSM_PERF_GFX)
  809. return;
  810. if (pid != atomic_read(&game_status_pid) || (timestamp == 0)
  811. || !(end_of_frame))
  812. return;
  813. spin_lock_irqsave(&gfx_circ_buff_lock, flags);
  814. idx = curr_pos.head;
  815. curr_pos.head = ((curr_pos.head + 1) % QUEUE_POOL_SIZE);
  816. spin_unlock_irqrestore(&gfx_circ_buff_lock, flags);
  817. gpu_circ_buff[idx].pid = pid;
  818. gpu_circ_buff[idx].ctx_id = ctx_id;
  819. gpu_circ_buff[idx].timestamp = timestamp;
  820. gpu_circ_buff[idx].evt_typ = evt_typ;
  821. gpu_circ_buff[idx].arrive_ts = ktime_get();
  822. if (evt_typ == MSM_PERF_QUEUE || evt_typ == MSM_PERF_RETIRED)
  823. complete(&gfx_evt_arrival);
  824. }
  825. EXPORT_SYMBOL(msm_perf_events_update);
  826. static ssize_t set_game_start_pid(struct kobject *kobj,
  827. struct kobj_attribute *attr, const char *buf, size_t count)
  828. {
  829. long usr_val = 0;
  830. int ret;
  831. ret = kstrtol(buf, 0, &usr_val);
  832. if (ret) {
  833. pr_err("msm_perf: kstrtol failed, ret=%d\n", ret);
  834. return ret;
  835. }
  836. atomic_set(&game_status_pid, usr_val);
  837. return count;
  838. }
  839. static ssize_t get_game_start_pid(struct kobject *kobj,
  840. struct kobj_attribute *attr, char *buf)
  841. {
  842. long usr_val = atomic_read(&game_status_pid);
  843. return scnprintf(buf, PAGE_SIZE, "%ld\n", usr_val);
  844. }
  845. /*******************************GFX Call************************************/
  846. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  847. static const struct qcom_scmi_vendor_ops *plh_ops;
  848. #else
  849. static const struct scmi_plh_vendor_ops *plh_ops;
  850. #endif
  851. static struct scmi_protocol_handle *plh_handle;
  852. int cpucp_plh_init(struct scmi_device *sdev)
  853. {
  854. int ret = 0;
  855. if (!sdev || !sdev->handle)
  856. return -EINVAL;
  857. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  858. plh_ops = sdev->handle->devm_protocol_get(sdev, QCOM_SCMI_VENDOR_PROTOCOL, &plh_handle);
  859. #else
  860. plh_ops = sdev->handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PLH, &plh_handle);
  861. #endif
  862. if (!plh_ops)
  863. return -EINVAL;
  864. return ret;
  865. }
  866. EXPORT_SYMBOL(cpucp_plh_init);
  867. static int splh_notif, splh_init_done, splh_sample_ms, splh_log_level;
  868. #define SPLH_MIN_SAMPLE_MS 1
  869. #define SPLH_MAX_SAMPLE_MS 30
  870. #define PLH_FPS_MAX_CNT 8
  871. #define PLH_IPC_FREQ_VTBL_MAX_CNT 5 /* ipc freq pair */
  872. #define PLH_INIT_IPC_FREQ_TBL_PARAMS \
  873. (2 + PLH_FPS_MAX_CNT * (1 + (2 * PLH_IPC_FREQ_VTBL_MAX_CNT)))
  874. static ssize_t get_splh_sample_ms(struct kobject *kobj,
  875. struct kobj_attribute *attr, char *buf)
  876. {
  877. return scnprintf(buf, PAGE_SIZE, "%d\n", splh_sample_ms);
  878. }
  879. static ssize_t set_splh_sample_ms(struct kobject *kobj,
  880. struct kobj_attribute *attr, const char *buf,
  881. size_t count)
  882. {
  883. int ret, ms_val_backup;
  884. if (!plh_handle || !plh_ops) {
  885. pr_err("msm_perf: plh scmi handle or vendor ops null\n");
  886. return -EINVAL;
  887. }
  888. ms_val_backup = splh_sample_ms;
  889. ret = sscanf(buf, "%du", &splh_sample_ms);
  890. if (ret < 0) {
  891. pr_err("msm_perf: getting new splh_sample_ms failed, ret=%d\n", ret);
  892. return ret;
  893. }
  894. splh_sample_ms = clamp(splh_sample_ms, SPLH_MIN_SAMPLE_MS, SPLH_MAX_SAMPLE_MS);
  895. ret = splh_set_sample_ms(splh_sample_ms);
  896. if (ret < 0) {
  897. splh_sample_ms = ms_val_backup;
  898. pr_err("msm_perf: setting new splh_sample_ms failed, ret=%d\n", ret);
  899. return ret;
  900. }
  901. return count;
  902. }
  903. static ssize_t get_splh_log_level(struct kobject *kobj,
  904. struct kobj_attribute *attr, char *buf)
  905. {
  906. return scnprintf(buf, PAGE_SIZE, "%d\n", splh_log_level);
  907. }
  908. static ssize_t set_splh_log_level(struct kobject *kobj,
  909. struct kobj_attribute *attr, const char *buf,
  910. size_t count)
  911. {
  912. int ret, log_val_backup;
  913. if (!plh_handle || !plh_ops) {
  914. pr_err("msm_perf: plh scmi handle or vendor ops null\n");
  915. return -EINVAL;
  916. }
  917. log_val_backup = splh_log_level;
  918. ret = sscanf(buf, "%du", &splh_log_level);
  919. if (ret < 0) {
  920. pr_err("msm_perf: getting new splh_log_level failed, ret=%d\n", ret);
  921. return ret;
  922. }
  923. splh_log_level = clamp(splh_log_level, CPUCP_MIN_LOG_LEVEL, CPUCP_MAX_LOG_LEVEL);
  924. ret = splh_set_log_level(splh_log_level);
  925. if (ret < 0) {
  926. splh_log_level = log_val_backup;
  927. pr_err("msm_perf: setting new splh_log_level failed, ret=%d\n", ret);
  928. return ret;
  929. }
  930. return count;
  931. }
  932. static int init_plh_notif(const char *buf)
  933. {
  934. int i, j, ret;
  935. u16 tmp[PLH_INIT_IPC_FREQ_TBL_PARAMS];
  936. u16 *ptmp = tmp, ntokens, nfps, n_ipc_freq_pair, tmp_valid_len = 0;
  937. const char *cp, *cp1;
  938. /* buf contains the init info from user */
  939. if (buf == NULL || !plh_handle || !plh_ops)
  940. return -EINVAL;
  941. cp = buf;
  942. ntokens = 0;
  943. while ((cp = strpbrk(cp + 1, ":")))
  944. ntokens++;
  945. /* format of cmd nfps, n_ipc_freq_pair, <fps0, <ipc0, freq0>,...>,... */
  946. cp = buf;
  947. if (sscanf(cp, INIT ":%hu", &nfps)) {
  948. if ((nfps != ntokens-1) || (nfps == 0) || (nfps > PLH_FPS_MAX_CNT))
  949. return -EINVAL;
  950. cp = strnchr(cp, strlen(cp), ':'); /* skip INIT */
  951. cp++;
  952. cp = strnchr(cp, strlen(cp), ':'); /* skip nfps */
  953. if (!cp)
  954. return -EINVAL;
  955. *ptmp++ = nfps; /* nfps is first cmd param */
  956. tmp_valid_len++;
  957. cp1 = cp;
  958. ntokens = 0;
  959. /* get count of nfps * n_ipc_freq_pair * <ipc freq pair values> */
  960. while ((cp1 = strpbrk(cp1 + 1, ",")))
  961. ntokens++;
  962. if (ntokens % (2 * nfps)) /* ipc freq pair values should be multiple of nfps */
  963. return -EINVAL;
  964. n_ipc_freq_pair = ntokens / (2 * nfps); /* ipc_freq pair values for each FPS */
  965. if ((n_ipc_freq_pair == 0) || (n_ipc_freq_pair > PLH_IPC_FREQ_VTBL_MAX_CNT))
  966. return -EINVAL;
  967. *ptmp++ = n_ipc_freq_pair; /* n_ipc_freq_pair is second cmd param */
  968. tmp_valid_len++;
  969. cp1 = cp;
  970. for (i = 0; i < nfps; i++) {
  971. if (sscanf(cp1, ":%hu", ptmp) != 1)
  972. return -EINVAL;
  973. ptmp++; /* increment after storing FPS val */
  974. tmp_valid_len++;
  975. cp1 = strnchr(cp1, strlen(cp1), ','); /* move to ,ipc */
  976. if (!cp1)
  977. return -EINVAL;
  978. for (j = 0; j < 2 * n_ipc_freq_pair; j++) {
  979. if (sscanf(cp1, ",%hu", ptmp) != 1)
  980. return -EINVAL;
  981. ptmp++; /* increment after storing ipc or freq */
  982. tmp_valid_len++;
  983. cp1++;
  984. if (j != (2 * n_ipc_freq_pair - 1)) {
  985. cp1 = strnchr(cp1, strlen(cp1), ','); /* move to next */
  986. if (!cp1)
  987. return -EINVAL;
  988. }
  989. }
  990. if (i != (nfps - 1)) {
  991. cp1 = strnchr(cp1, strlen(cp1), ':'); /* move to next FPS val */
  992. if (!cp1)
  993. return -EINVAL;
  994. }
  995. }
  996. } else {
  997. return -EINVAL;
  998. }
  999. ret = splh_init_ipc_freq_tbl(tmp, tmp_valid_len);
  1000. if (ret < 0) {
  1001. pr_err("msm_perf: splh: failed sending ipc freq table\n");
  1002. return -EINVAL;
  1003. }
  1004. splh_init_done = 1;
  1005. pr_info("msm_perf: splh: nfps=%u n_ipc_freq_pair=%u last_freq_val=%u len=%u\n",
  1006. nfps, n_ipc_freq_pair, *--ptmp, tmp_valid_len);
  1007. return 0;
  1008. }
  1009. static void activate_splh_notif(void)
  1010. {
  1011. int ret;
  1012. /* received event notification here */
  1013. if (!plh_handle || !plh_ops) {
  1014. pr_err("msm_perf: splh not supported\n");
  1015. return;
  1016. }
  1017. if (splh_notif)
  1018. ret = splh_start_activity(splh_notif);
  1019. else
  1020. ret = splh_stop_activity;
  1021. if (ret < 0) {
  1022. pr_err("msm_perf: splh start or stop failed, ret=%d\n", ret);
  1023. return;
  1024. }
  1025. }
  1026. static ssize_t get_splh_notif(struct kobject *kobj,
  1027. struct kobj_attribute *attr, char *buf)
  1028. {
  1029. return scnprintf(buf, PAGE_SIZE, "%d\n", splh_notif);
  1030. }
  1031. static ssize_t set_splh_notif(struct kobject *kobj,
  1032. struct kobj_attribute *attr, const char *buf,
  1033. size_t count)
  1034. {
  1035. int ret;
  1036. if (strnstr(buf, INIT, sizeof(INIT)) != NULL) {
  1037. splh_init_done = 0;
  1038. ret = init_plh_notif(buf);
  1039. if (ret < 0)
  1040. pr_err("msm_perf: splh ipc freq tbl init failed, ret=%d\n", ret);
  1041. return count;
  1042. }
  1043. if (!splh_init_done) {
  1044. pr_err("msm_perf: splh ipc freq tbl not initialized\n");
  1045. return -EINVAL;
  1046. }
  1047. ret = sscanf(buf, "%du", &splh_notif);
  1048. if (ret < 0)
  1049. return ret;
  1050. activate_splh_notif();
  1051. return count;
  1052. }
  1053. #define LPLH_MIN_SAMPLE_MS 1
  1054. #define LPLH_MAX_SAMPLE_MS 30
  1055. #define LPLH_CLUSTER_MAX_CNT 4
  1056. #define LPLH_IPC_FREQ_VTBL_MAX_CNT 5 /* ipc freq pair */
  1057. #define LPLH_INIT_IPC_FREQ_TBL_PARAMS \
  1058. (1 + LPLH_CLUSTER_MAX_CNT * (2 + (2 * LPLH_IPC_FREQ_VTBL_MAX_CNT)))
  1059. static int lplh_notif, lplh_init_done, lplh_sample_ms, lplh_log_level;
  1060. static ssize_t get_lplh_sample_ms(struct kobject *kobj,
  1061. struct kobj_attribute *attr, char *buf)
  1062. {
  1063. return scnprintf(buf, PAGE_SIZE, "%d\n", lplh_sample_ms);
  1064. }
  1065. static ssize_t set_lplh_sample_ms(struct kobject *kobj,
  1066. struct kobj_attribute *attr, const char *buf,
  1067. size_t count)
  1068. {
  1069. int ret, ms_val_backup;
  1070. if (!plh_handle || !plh_ops) {
  1071. pr_err("msm_perf: plh scmi handle or vendor ops null\n");
  1072. return -EINVAL;
  1073. }
  1074. ms_val_backup = lplh_sample_ms;
  1075. ret = sscanf(buf, "%du", &lplh_sample_ms);
  1076. if (ret < 0) {
  1077. pr_err("msm_perf: getting new lplh_sample_ms failed, ret=%d\n", ret);
  1078. return ret;
  1079. }
  1080. lplh_sample_ms = clamp(lplh_sample_ms, LPLH_MIN_SAMPLE_MS, LPLH_MAX_SAMPLE_MS);
  1081. ret = lplh_set_sample_ms(lplh_sample_ms);
  1082. if (ret < 0) {
  1083. lplh_sample_ms = ms_val_backup;
  1084. pr_err("msm_perf: setting new lplh_sample_ms failed, ret=%d\n", ret);
  1085. return ret;
  1086. }
  1087. return count;
  1088. }
  1089. static ssize_t get_lplh_log_level(struct kobject *kobj,
  1090. struct kobj_attribute *attr, char *buf)
  1091. {
  1092. return scnprintf(buf, PAGE_SIZE, "%d\n", lplh_log_level);
  1093. }
  1094. static ssize_t set_lplh_log_level(struct kobject *kobj,
  1095. struct kobj_attribute *attr, const char *buf,
  1096. size_t count)
  1097. {
  1098. int ret, log_val_backup;
  1099. if (!plh_handle || !plh_ops) {
  1100. pr_err("msm_perf: plh scmi handle or vendor ops null\n");
  1101. return -EINVAL;
  1102. }
  1103. log_val_backup = lplh_log_level;
  1104. ret = sscanf(buf, "%du", &lplh_log_level);
  1105. if (ret < 0) {
  1106. pr_err("msm_perf: getting new lplh_log_level failed, ret=%d\n", ret);
  1107. return ret;
  1108. }
  1109. lplh_log_level = clamp(lplh_log_level, CPUCP_MIN_LOG_LEVEL, CPUCP_MAX_LOG_LEVEL);
  1110. ret = lplh_set_log_level(lplh_log_level);
  1111. if (ret < 0) {
  1112. lplh_log_level = log_val_backup;
  1113. pr_err("msm_perf: setting new lplh_log_level failed, ret=%d\n", ret);
  1114. return ret;
  1115. }
  1116. return count;
  1117. }
  1118. static int init_lplh_notif(const char *buf)
  1119. {
  1120. u16 tmp[LPLH_INIT_IPC_FREQ_TBL_PARAMS];
  1121. char *token;
  1122. int i, j, ret;
  1123. u16 *ptmp = tmp, total_tokens = 0, nTokens = 0, nClusters = 0, clusterId, nValues, value;
  1124. const char *cp, *cp1;
  1125. /* buf contains the init info from user */
  1126. if (buf == NULL || !plh_handle || !plh_ops)
  1127. return -EINVAL;
  1128. cp = buf;
  1129. if (sscanf(cp, INIT ":%hu", &nClusters)) {
  1130. if (!nClusters || nClusters > LPLH_CLUSTER_MAX_CNT)
  1131. return -EINVAL;
  1132. *ptmp++ = nClusters;
  1133. total_tokens++;
  1134. while ((cp = strpbrk(cp + 1, ":")))
  1135. nTokens++;
  1136. if (!nTokens || (nTokens - 1 != nClusters))
  1137. return -EINVAL;
  1138. cp = buf;
  1139. cp = strnchr(cp, strlen(cp), ':'); /* skip INIT */
  1140. cp++;
  1141. cp = strnchr(cp, strlen(cp), ':'); /* skip nClusters */
  1142. cp++;
  1143. if (!cp || !strlen(cp))
  1144. return -EINVAL;
  1145. for (i = 0; i < nClusters; i++) {
  1146. clusterId = 0;
  1147. if (!cp || strlen(cp) == 0)
  1148. return -EINVAL;
  1149. if (sscanf(cp, "%hu,", &clusterId)) {
  1150. *ptmp++ = clusterId;
  1151. total_tokens++;
  1152. cp = strnchr(cp, strlen(cp), ',');
  1153. if (!cp)
  1154. return -EINVAL;
  1155. token = strsep((char **)&cp, ":");
  1156. if (!token || strlen(token) == 0)
  1157. return -EINVAL;
  1158. nValues = 1;
  1159. cp1 = token;
  1160. while ((cp1 = strpbrk(cp1 + 1, ",")))
  1161. nValues++;
  1162. if (nValues % 2 != 0 || LPLH_IPC_FREQ_VTBL_MAX_CNT < nValues/2)
  1163. return -EINVAL;
  1164. *ptmp++ = nValues/2;
  1165. total_tokens++;
  1166. for (j = 0; j < nValues / 2; j++) {
  1167. value = 0;
  1168. if (!token || sscanf(token, ",%hu", &value) != 1)
  1169. return -EINVAL;
  1170. *ptmp++ = value;
  1171. total_tokens++;
  1172. token++;
  1173. if (!token || strlen(token) == 0)
  1174. return -EINVAL;
  1175. token = strnchr(token, strlen(token), ',');
  1176. if (!token || sscanf(token, ",%hu", &value) != 1)
  1177. return -EINVAL;
  1178. *ptmp++ = value;
  1179. total_tokens++;
  1180. token++;
  1181. token = strnchr(token, strlen(token), ',');
  1182. }
  1183. } else {
  1184. return -EINVAL;
  1185. }
  1186. }
  1187. } else {
  1188. return -EINVAL;
  1189. }
  1190. ret = lplh_init_ipc_freq_tbl(tmp, total_tokens);
  1191. if (ret < 0) {
  1192. pr_err("msm_perf: lplh: failed sending ipc freq table\n");
  1193. return -EINVAL;
  1194. }
  1195. pr_info("msm_perf: lplh: nClusters=%u last_freq_val=%u len=%u\n",
  1196. nClusters, *--ptmp, total_tokens);
  1197. lplh_init_done = 1;
  1198. return 0;
  1199. }
  1200. static void activate_lplh_notif(void)
  1201. {
  1202. int ret;
  1203. if (!plh_handle || !plh_ops) {
  1204. pr_err("msm_perf: lplh not supported\n");
  1205. return;
  1206. }
  1207. if (lplh_notif)
  1208. ret = lplh_start_activity(lplh_notif);
  1209. else
  1210. ret = lplh_stop_activity;
  1211. if (ret < 0) {
  1212. pr_err("msm_perf: lplh start or stop failed, ret=%d\n", ret);
  1213. return;
  1214. }
  1215. }
  1216. static ssize_t get_lplh_notif(struct kobject *kobj,
  1217. struct kobj_attribute *attr, char *buf)
  1218. {
  1219. return scnprintf(buf, PAGE_SIZE, "%d\n", lplh_notif);
  1220. }
  1221. static ssize_t set_lplh_notif(struct kobject *kobj,
  1222. struct kobj_attribute *attr, const char *buf,
  1223. size_t count)
  1224. {
  1225. int ret;
  1226. if (strnstr(buf, INIT, sizeof(INIT)) != NULL) {
  1227. lplh_init_done = 0;
  1228. ret = init_lplh_notif(buf);
  1229. if (ret < 0)
  1230. pr_err("msm_perf: lplh ipc freq tbl init failed, ret=%d\n", ret);
  1231. return count;
  1232. }
  1233. if (!lplh_init_done) {
  1234. pr_err("msm_perf: lplh ipc freq tbl not initialized\n");
  1235. return -EINVAL;
  1236. }
  1237. ret = sscanf(buf, "%du", &lplh_notif);
  1238. if (ret < 0)
  1239. return ret;
  1240. activate_lplh_notif();
  1241. return count;
  1242. }
  1243. static int scmi_plh_init(struct platform_device *pdev)
  1244. {
  1245. struct device *dev = &pdev->dev;
  1246. int ret = 0;
  1247. struct scmi_device *scmi_dev = NULL;
  1248. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  1249. scmi_dev = get_qcom_scmi_device();
  1250. #elif IS_ENABLED(CONFIG_QTI_PLH_SCMI_CLIENT)
  1251. scmi_dev = get_plh_scmi_device();
  1252. #endif
  1253. if (IS_ERR(scmi_dev)) {
  1254. ret = PTR_ERR(scmi_dev);
  1255. if (ret == -EPROBE_DEFER)
  1256. return ret;
  1257. dev_err(dev, "plh: Error no plh device, ret = %d\n", ret);
  1258. } else {
  1259. ret = cpucp_plh_init(scmi_dev);
  1260. if (ret < 0)
  1261. dev_err(dev, "plh: Error in %s, ret = %d\n", __func__, ret);
  1262. }
  1263. ret = add_plh_params();
  1264. return ret;
  1265. }
  1266. static const struct of_device_id scmi_plh_match_table[] = {
  1267. { .compatible = "qcom,scmi_plh" },
  1268. {},
  1269. };
  1270. static struct platform_driver scmi_plh_driver = {
  1271. .driver = {
  1272. .name = "scmi-plh",
  1273. .of_match_table = scmi_plh_match_table,
  1274. },
  1275. .probe = scmi_plh_init,
  1276. };
  1277. static int __init msm_performance_init(void)
  1278. {
  1279. unsigned int cpu;
  1280. int ret;
  1281. if (!alloc_cpumask_var(&limit_mask_min, GFP_KERNEL))
  1282. return -ENOMEM;
  1283. if (!alloc_cpumask_var(&limit_mask_max, GFP_KERNEL)) {
  1284. free_cpumask_var(limit_mask_min);
  1285. return -ENOMEM;
  1286. }
  1287. msm_perf_kset = kset_create_and_add("msm_performance", NULL, kernel_kobj);
  1288. if (!msm_perf_kset) {
  1289. free_cpumask_var(limit_mask_min);
  1290. free_cpumask_var(limit_mask_max);
  1291. return -ENOMEM;
  1292. }
  1293. add_module_params();
  1294. init_events_group();
  1295. init_notify_group();
  1296. init_pmu_counter();
  1297. cpus_read_lock();
  1298. for_each_possible_cpu(cpu) {
  1299. if (!cpumask_test_cpu(cpu, cpu_online_mask))
  1300. per_cpu(cpu_is_hp, cpu) = true;
  1301. }
  1302. ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
  1303. "msm_performance_cpu_hotplug",
  1304. hotplug_notify_up,
  1305. hotplug_notify_down);
  1306. cpus_read_unlock();
  1307. platform_driver_register(&scmi_plh_driver);
  1308. return 0;
  1309. }
  1310. MODULE_LICENSE("GPL v2");
  1311. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  1312. MODULE_SOFTDEP("pre: qcom_scmi_client");
  1313. #endif
  1314. late_initcall(msm_performance_init);