qcom-cpufreq-hw.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/cpufreq.h>
  8. #include <linux/init.h>
  9. #include <linux/interconnect.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/pm_opp.h>
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/qcom-cpufreq-hw.h>
  19. #include <linux/topology.h>
  20. #include <trace/events/power.h>
  21. #if IS_ENABLED(CONFIG_QCOM_LMH_STAT)
  22. #include <linux/sched/clock.h>
  23. #include <linux/time.h>
  24. #endif
  25. #if IS_ENABLED(CONFIG_SEC_PM_LOG)
  26. #include <linux/sec_pm_log.h>
  27. #endif
  28. #define CREATE_TRACE_POINTS
  29. #include <trace/events/dcvsh.h>
  30. #include <linux/units.h>
  31. #define LUT_MAX_ENTRIES 40U
  32. #define LUT_SRC GENMASK(31, 30)
  33. #define LUT_L_VAL GENMASK(7, 0)
  34. #define LUT_CORE_COUNT GENMASK(18, 16)
  35. #define LUT_VOLT GENMASK(11, 0)
  36. #define CLK_HW_DIV 2
  37. #define LUT_TURBO_IND 1
  38. #define MAX_FN_SIZE 20
  39. #define GT_IRQ_STATUS BIT(2)
  40. #define CYCLE_CNTR_OFFSET(core_id, m, acc_count) \
  41. (acc_count ? ((core_id + 1) * 4) : 0)
  42. struct cpufreq_counter {
  43. u64 total_cycle_counter;
  44. u32 prev_cycle_counter;
  45. spinlock_t lock;
  46. };
  47. static struct cpufreq_counter qcom_cpufreq_counter[NR_CPUS];
  48. struct qcom_cpufreq_soc_data {
  49. u32 reg_enable;
  50. u32 reg_domain_state;
  51. u32 reg_dcvs_ctrl;
  52. u32 reg_freq_lut;
  53. u32 reg_volt_lut;
  54. u32 reg_intr_clr;
  55. u32 reg_current_vote;
  56. u32 reg_perf_state;
  57. u32 reg_cycle_cntr;
  58. u32 lut_max_entries;
  59. u8 lut_row_size;
  60. bool accumulative_counter;
  61. bool turbo_ind_support;
  62. bool perf_lock_support;
  63. };
  64. struct qcom_cpufreq_data {
  65. void __iomem *base;
  66. void __iomem *pdmem_base;
  67. struct resource *res;
  68. const struct qcom_cpufreq_soc_data *soc_data;
  69. /*
  70. * Mutex to synchronize between de-init sequence and re-starting LMh
  71. * polling/interrupts
  72. */
  73. struct mutex throttle_lock;
  74. int hw_clk_domain;
  75. int throttle_irq;
  76. char irq_name[15];
  77. bool cancel_throttle;
  78. struct delayed_work throttle_work;
  79. struct cpufreq_policy *policy;
  80. unsigned long last_non_boost_freq;
  81. bool per_core_dcvs;
  82. unsigned long dcvsh_freq_limit;
  83. struct device_attribute freq_limit_attr;
  84. #if IS_ENABLED(CONFIG_QCOM_LMH_STAT)
  85. unsigned long long last_time;
  86. unsigned int state_num;
  87. unsigned int max_state;
  88. unsigned int last_index;
  89. u64 *time_in_state;
  90. unsigned int *freq_table;
  91. struct device_attribute time_in_state_attr;
  92. #endif
  93. #if IS_ENABLED(CONFIG_SEC_PM_LOG)
  94. unsigned long lowest_freq;
  95. bool limiting;
  96. ktime_t start_time;
  97. ktime_t limited_time;
  98. unsigned long accu_time;
  99. #endif
  100. };
  101. #if IS_ENABLED(CONFIG_QCOM_LMH_STAT)
  102. static int freq_table_get_index(struct qcom_cpufreq_data *data, unsigned int freq)
  103. {
  104. int i;
  105. for (i = 0; i < data->max_state; i++)
  106. if (data->freq_table[i] == freq)
  107. return i;
  108. return -1;
  109. }
  110. static int freq_table_highest_index(struct qcom_cpufreq_data *data)
  111. {
  112. int i, high_idx = -1;
  113. unsigned int high_freq = 0;
  114. if (data->max_state == 0)
  115. return high_idx;
  116. for (i = 0; i < data->max_state; i++) {
  117. if (data->freq_table[i] > high_freq) {
  118. high_idx = i;
  119. high_freq = data->freq_table[i];
  120. }
  121. }
  122. return high_idx;
  123. }
  124. static void throttle_stats_update(struct qcom_cpufreq_data *data,
  125. unsigned long long time)
  126. {
  127. unsigned long long cur_time = local_clock();
  128. data->time_in_state[data->last_index] += cur_time - time;
  129. data->last_time = cur_time;
  130. }
  131. static void time_in_state_update(struct qcom_cpufreq_data *data,
  132. unsigned long throttled_freq)
  133. {
  134. int new_index, old_index;
  135. if (data->max_state == 0)
  136. return;
  137. old_index = data->last_index;
  138. new_index = freq_table_get_index(data, throttled_freq);
  139. if (old_index == -1 || new_index == -1)
  140. return;
  141. throttle_stats_update(data, data->last_time);
  142. data->last_index = new_index;
  143. }
  144. static ssize_t time_in_state_show(struct device *dev,
  145. struct device_attribute *attr, char *buf)
  146. {
  147. struct qcom_cpufreq_data *data = container_of(attr, struct qcom_cpufreq_data,
  148. time_in_state_attr);
  149. unsigned long long time;
  150. ssize_t len = 0;
  151. int i;
  152. if (data->max_state == 0)
  153. return 0;
  154. for (i = 0; i < data->state_num; i++) {
  155. time = data->time_in_state[i];
  156. if (i == data->last_index)
  157. time += local_clock() - data->last_time;
  158. len += sprintf(buf + len, "%u %llu\n",
  159. data->freq_table[i], div_u64(time, NSEC_PER_MSEC));
  160. }
  161. return len;
  162. }
  163. static int time_in_state_attr_init(struct qcom_cpufreq_data *data,
  164. struct device *cpu_dev)
  165. {
  166. struct cpufreq_frequency_table *pos;
  167. unsigned int i = 0, count;
  168. unsigned int alloc_size;
  169. count = cpufreq_table_count_valid_entries(data->policy);
  170. if (!count)
  171. return -1;
  172. alloc_size = sizeof(u64) * count + sizeof(unsigned int) * count;
  173. data->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  174. if (!data->time_in_state)
  175. return -1;
  176. data->freq_table = (unsigned int *)(data->time_in_state + count);
  177. cpufreq_for_each_valid_entry(pos, data->policy->freq_table)
  178. if (freq_table_get_index(data, pos->frequency) == -1)
  179. data->freq_table[i++] = pos->frequency;
  180. data->state_num = i;
  181. sysfs_attr_init(&data->time_in_state_attr.attr);
  182. data->time_in_state_attr.attr.name = "dcvsh_state_ms";
  183. data->time_in_state_attr.show = time_in_state_show;
  184. data->time_in_state_attr.attr.mode = 0444;
  185. if (device_create_file(cpu_dev, &data->time_in_state_attr)) {
  186. kfree(data->time_in_state);
  187. return -1;
  188. }
  189. /* policy->cpuinfo.max_freq == 0 at this time */
  190. data->max_state = count;
  191. data->last_index = freq_table_highest_index(data);
  192. data->last_time = local_clock();
  193. pr_err("%s : cpu[%d] max_state %lu, last_time %llu, last_index %d\n",
  194. __func__, data->policy->cpu,
  195. data->max_state, data->last_time, data->last_index);
  196. return 0;
  197. }
  198. #endif
  199. static unsigned long cpu_hw_rate, xo_rate;
  200. static bool icc_scaling_enabled;
  201. /*
  202. * show_hw_clk_domain - the HW clock domain per policy
  203. */
  204. static ssize_t show_hw_clk_domain(struct cpufreq_policy *policy, char *buf)
  205. {
  206. struct qcom_cpufreq_data *data;
  207. data = policy->driver_data;
  208. if (data)
  209. return scnprintf(buf, sizeof(int), "%u\n", data->hw_clk_domain);
  210. return -EIO;
  211. }
  212. cpufreq_freq_attr_ro(hw_clk_domain);
  213. static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
  214. unsigned long freq_khz)
  215. {
  216. unsigned long freq_hz = freq_khz * 1000;
  217. struct dev_pm_opp *opp;
  218. struct device *dev;
  219. int ret;
  220. dev = get_cpu_device(policy->cpu);
  221. if (!dev)
  222. return -ENODEV;
  223. opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
  224. if (IS_ERR(opp))
  225. return PTR_ERR(opp);
  226. ret = dev_pm_opp_set_opp(dev, opp);
  227. dev_pm_opp_put(opp);
  228. return ret;
  229. }
  230. static int qcom_cpufreq_update_opp(struct device *cpu_dev,
  231. unsigned long freq_khz,
  232. unsigned long volt)
  233. {
  234. unsigned long freq_hz = freq_khz * 1000;
  235. int ret;
  236. /* Skip voltage update if the opp table is not available */
  237. if (!icc_scaling_enabled)
  238. return dev_pm_opp_add(cpu_dev, freq_hz, volt);
  239. ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
  240. if (ret) {
  241. dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
  242. return ret;
  243. }
  244. return dev_pm_opp_enable(cpu_dev, freq_hz);
  245. }
  246. u64 qcom_cpufreq_get_cpu_cycle_counter(int cpu)
  247. {
  248. const struct qcom_cpufreq_soc_data *soc_data;
  249. struct cpufreq_counter *cpu_counter;
  250. struct qcom_cpufreq_data *data;
  251. struct cpufreq_policy *policy;
  252. u64 cycle_counter_ret;
  253. unsigned long flags;
  254. u16 offset;
  255. u32 val;
  256. policy = cpufreq_cpu_get_raw(cpu);
  257. if (!policy)
  258. return 0;
  259. data = policy->driver_data;
  260. soc_data = data->soc_data;
  261. cpu_counter = &qcom_cpufreq_counter[cpu];
  262. spin_lock_irqsave(&cpu_counter->lock, flags);
  263. offset = CYCLE_CNTR_OFFSET(topology_core_id(cpu), policy->related_cpus,
  264. soc_data->accumulative_counter);
  265. val = readl_relaxed(data->base +
  266. soc_data->reg_cycle_cntr + offset);
  267. if (val < cpu_counter->prev_cycle_counter) {
  268. /* Handle counter overflow */
  269. cpu_counter->total_cycle_counter += UINT_MAX -
  270. cpu_counter->prev_cycle_counter + val;
  271. cpu_counter->prev_cycle_counter = val;
  272. } else {
  273. cpu_counter->total_cycle_counter += val -
  274. cpu_counter->prev_cycle_counter;
  275. cpu_counter->prev_cycle_counter = val;
  276. }
  277. cycle_counter_ret = cpu_counter->total_cycle_counter;
  278. spin_unlock_irqrestore(&cpu_counter->lock, flags);
  279. pr_debug("CPU %u, core-id 0x%x, offset %u\n", cpu, topology_core_id(cpu), offset);
  280. return cycle_counter_ret;
  281. }
  282. EXPORT_SYMBOL(qcom_cpufreq_get_cpu_cycle_counter);
  283. static void __cpufreq_hw_target_index_call_notifier_chain(struct cpufreq_policy *policy, unsigned int index);
  284. static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
  285. unsigned int index)
  286. {
  287. struct qcom_cpufreq_data *data = policy->driver_data;
  288. const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
  289. unsigned long freq = policy->freq_table[index].frequency;
  290. unsigned int i;
  291. __cpufreq_hw_target_index_call_notifier_chain(policy, index);
  292. if (soc_data->perf_lock_support) {
  293. if (data->pdmem_base)
  294. writel_relaxed(index, data->pdmem_base);
  295. }
  296. writel_relaxed(index, data->base + soc_data->reg_perf_state);
  297. if (data->per_core_dcvs)
  298. for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
  299. writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
  300. if (icc_scaling_enabled)
  301. qcom_cpufreq_set_bw(policy, freq);
  302. return 0;
  303. }
  304. static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
  305. {
  306. unsigned int lval;
  307. if (data->soc_data->reg_current_vote)
  308. lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
  309. else
  310. lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
  311. return lval * xo_rate;
  312. }
  313. /* Get the frequency requested by the cpufreq core for the CPU */
  314. static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
  315. {
  316. struct qcom_cpufreq_data *data;
  317. const struct qcom_cpufreq_soc_data *soc_data;
  318. struct cpufreq_policy *policy;
  319. unsigned int index;
  320. policy = cpufreq_cpu_get_raw(cpu);
  321. if (!policy)
  322. return 0;
  323. data = policy->driver_data;
  324. soc_data = data->soc_data;
  325. index = readl_relaxed(data->base + soc_data->reg_perf_state);
  326. index = min(index, soc_data->lut_max_entries - 1);
  327. return policy->freq_table[index].frequency;
  328. }
  329. static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
  330. {
  331. struct qcom_cpufreq_data *data;
  332. struct cpufreq_policy *policy;
  333. policy = cpufreq_cpu_get_raw(cpu);
  334. if (!policy)
  335. return 0;
  336. data = policy->driver_data;
  337. if (data->throttle_irq >= 0)
  338. return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
  339. return qcom_cpufreq_get_freq(cpu);
  340. }
  341. static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
  342. unsigned int target_freq)
  343. {
  344. struct qcom_cpufreq_data *data = policy->driver_data;
  345. const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
  346. unsigned int index;
  347. unsigned int i;
  348. index = policy->cached_resolved_idx;
  349. writel_relaxed(index, data->base + soc_data->reg_perf_state);
  350. if (data->per_core_dcvs)
  351. for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
  352. writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
  353. return policy->freq_table[index].frequency;
  354. }
  355. static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
  356. struct cpufreq_policy *policy)
  357. {
  358. u32 data, src, lval, i, core_count, prev_freq = 0, freq;
  359. u32 volt, max_cc = 0;
  360. struct cpufreq_frequency_table *table;
  361. struct dev_pm_opp *opp;
  362. unsigned long rate;
  363. int ret;
  364. struct qcom_cpufreq_data *drv_data = policy->driver_data;
  365. const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
  366. table = kcalloc(soc_data->lut_max_entries + 1, sizeof(*table), GFP_KERNEL);
  367. if (!table)
  368. return -ENOMEM;
  369. ret = dev_pm_opp_of_add_table(cpu_dev);
  370. if (!ret) {
  371. /* Disable all opps and cross-validate against LUT later */
  372. icc_scaling_enabled = true;
  373. for (rate = 0; ; rate++) {
  374. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
  375. if (IS_ERR(opp))
  376. break;
  377. dev_pm_opp_put(opp);
  378. dev_pm_opp_disable(cpu_dev, rate);
  379. }
  380. } else if (ret != -ENODEV) {
  381. dev_err(cpu_dev, "Invalid opp table in device tree\n");
  382. kfree(table);
  383. return ret;
  384. } else {
  385. policy->fast_switch_possible = true;
  386. icc_scaling_enabled = false;
  387. }
  388. for (i = 0; i < soc_data->lut_max_entries; i++) {
  389. data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
  390. i * soc_data->lut_row_size);
  391. src = FIELD_GET(LUT_SRC, data);
  392. lval = FIELD_GET(LUT_L_VAL, data);
  393. core_count = FIELD_GET(LUT_CORE_COUNT, data);
  394. if (i == 0)
  395. max_cc = core_count;
  396. data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
  397. i * soc_data->lut_row_size);
  398. volt = FIELD_GET(LUT_VOLT, data) * 1000;
  399. if (src)
  400. freq = xo_rate * lval / 1000;
  401. else
  402. freq = cpu_hw_rate / 1000;
  403. if (core_count == LUT_TURBO_IND && soc_data->turbo_ind_support)
  404. table[i].frequency = CPUFREQ_ENTRY_INVALID;
  405. else if (freq != prev_freq) {
  406. if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
  407. table[i].frequency = freq;
  408. if (core_count < max_cc)
  409. table[i].flags = CPUFREQ_BOOST_FREQ;
  410. dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
  411. freq, core_count);
  412. } else {
  413. dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
  414. table[i].frequency = CPUFREQ_ENTRY_INVALID;
  415. }
  416. }
  417. /*
  418. * Two of the same frequencies with the same core counts means
  419. * end of table
  420. */
  421. if (i > 0 && prev_freq == freq) {
  422. struct cpufreq_frequency_table *prev = &table[i - 1];
  423. /*
  424. * Only treat the last frequency that might be a boost
  425. * as the boost frequency
  426. */
  427. if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
  428. if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
  429. prev->frequency = prev_freq;
  430. prev->flags = CPUFREQ_BOOST_FREQ;
  431. } else {
  432. dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
  433. freq);
  434. }
  435. }
  436. break;
  437. }
  438. prev_freq = freq;
  439. }
  440. table[i].frequency = CPUFREQ_TABLE_END;
  441. policy->freq_table = table;
  442. for (i = 0; i < soc_data->lut_max_entries && table[i].frequency != CPUFREQ_TABLE_END; i++) {
  443. if (table[i].flags == CPUFREQ_BOOST_FREQ)
  444. break;
  445. drv_data->last_non_boost_freq = table[i].frequency;
  446. }
  447. dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
  448. return 0;
  449. }
  450. static void qcom_get_related_cpus(int index, struct cpumask *m)
  451. {
  452. struct device_node *cpu_np;
  453. struct of_phandle_args args;
  454. int cpu, ret;
  455. for_each_possible_cpu(cpu) {
  456. cpu_np = of_cpu_device_node_get(cpu);
  457. if (!cpu_np)
  458. continue;
  459. ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
  460. "#freq-domain-cells", 0,
  461. &args);
  462. of_node_put(cpu_np);
  463. if (ret < 0)
  464. continue;
  465. if (index == args.args[0])
  466. cpumask_set_cpu(cpu, m);
  467. }
  468. }
  469. static ssize_t dcvsh_freq_limit_show(struct device *dev,
  470. struct device_attribute *attr, char *buf)
  471. {
  472. struct qcom_cpufreq_data *c = container_of(attr, struct qcom_cpufreq_data,
  473. freq_limit_attr);
  474. return scnprintf(buf, PAGE_SIZE, "%lu\n", c->dcvsh_freq_limit);
  475. }
  476. static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
  477. {
  478. struct cpufreq_policy *policy = data->policy;
  479. int cpu = cpumask_first(policy->related_cpus);
  480. struct device *dev = get_cpu_device(cpu);
  481. unsigned long freq_hz, throttled_freq, thermal_pressure;
  482. struct dev_pm_opp *opp;
  483. unsigned long trace_freq;
  484. char lmh_debug[8] = {0};
  485. if (!dev)
  486. return;
  487. /*
  488. * Get the h/w throttled frequency, normalize it using the
  489. * registered opp table and use it to calculate thermal pressure.
  490. */
  491. freq_hz = qcom_lmh_get_throttle_freq(data);
  492. opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
  493. if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
  494. opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
  495. if (IS_ERR(opp))
  496. dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
  497. else
  498. dev_pm_opp_put(opp);
  499. trace_freq =
  500. throttled_freq = thermal_pressure = freq_hz / HZ_PER_KHZ;
  501. /*
  502. * In the unlikely case policy is unregistered do not enable
  503. * polling or h/w interrupt
  504. */
  505. mutex_lock(&data->throttle_lock);
  506. if (data->cancel_throttle)
  507. goto out;
  508. /*
  509. * If h/w throttled frequency is higher than what cpufreq has requested
  510. * for, then stop polling and switch back to interrupt mechanism.
  511. */
  512. if (throttled_freq >= qcom_cpufreq_get_freq(cpu)) {
  513. trace_freq =
  514. thermal_pressure = policy->cpuinfo.max_freq;
  515. enable_irq(data->throttle_irq);
  516. trace_dcvsh_throttle(cpu, 0);
  517. #if IS_ENABLED(CONFIG_SEC_PM_LOG)
  518. if (data->limiting == true) {
  519. data->limiting = false;
  520. data->limited_time = (ktime_get() - data->start_time);
  521. data->accu_time += ktime_to_ms(data->limited_time);
  522. ss_thermal_print("Fin. lmh cpu%d, lowest %lu, f_lim %lu, dcvsh %lu, accu %d\n",
  523. cpu, (data->lowest_freq / 1000), (throttled_freq / 1000),
  524. (qcom_cpufreq_hw_get(cpu) / 1000), data->accu_time);
  525. data->lowest_freq = UINT_MAX;
  526. }
  527. #endif
  528. } else {
  529. /*
  530. * If the frequency is at least the highest, non-boost
  531. * frequency, then the delta vs. what's requested is likely due
  532. * to core-count boost limitations and shouldn't be
  533. * communicated as thermal pressure.
  534. */
  535. if (throttled_freq >= data->last_non_boost_freq)
  536. thermal_pressure = policy->cpuinfo.max_freq;
  537. #if IS_ENABLED(CONFIG_SEC_PM_LOG)
  538. if (data->limiting == false) {
  539. ss_thermal_print("Start lmh cpu%d @%lu\n", cpu, (thermal_pressure / 1000));
  540. data->lowest_freq = thermal_pressure;
  541. data->limiting = true;
  542. data->start_time = ktime_get();
  543. } else {
  544. if (thermal_pressure < data->lowest_freq)
  545. data->lowest_freq = thermal_pressure;
  546. }
  547. #endif
  548. mod_delayed_work(system_highpri_wq, &data->throttle_work,
  549. msecs_to_jiffies(10));
  550. }
  551. trace_dcvsh_freq(cpu, qcom_cpufreq_get_freq(cpu), throttled_freq, thermal_pressure);
  552. /* Update thermal pressure (the boost frequencies are accepted) */
  553. arch_update_thermal_pressure(policy->related_cpus, thermal_pressure);
  554. data->dcvsh_freq_limit = thermal_pressure;
  555. snprintf(lmh_debug, sizeof(lmh_debug), "lmh_%d", cpu);
  556. trace_clock_set_rate(lmh_debug, trace_freq, raw_smp_processor_id());
  557. #if IS_ENABLED(CONFIG_QCOM_LMH_STAT)
  558. time_in_state_update(data, trace_freq);
  559. #endif
  560. out:
  561. mutex_unlock(&data->throttle_lock);
  562. }
  563. static void qcom_lmh_dcvs_poll(struct work_struct *work)
  564. {
  565. struct qcom_cpufreq_data *data;
  566. data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
  567. qcom_lmh_dcvs_notify(data);
  568. }
  569. static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
  570. {
  571. struct qcom_cpufreq_data *c_data = data;
  572. struct cpufreq_policy *policy = c_data->policy;
  573. /* Disable interrupt and enable polling */
  574. disable_irq_nosync(c_data->throttle_irq);
  575. trace_dcvsh_throttle(cpumask_first(policy->cpus), 1);
  576. schedule_delayed_work(&c_data->throttle_work, 0);
  577. if (c_data->soc_data->reg_intr_clr)
  578. writel_relaxed(GT_IRQ_STATUS,
  579. c_data->base + c_data->soc_data->reg_intr_clr);
  580. return IRQ_HANDLED;
  581. }
  582. static const struct qcom_cpufreq_soc_data qcom_soc_data = {
  583. .reg_enable = 0x0,
  584. .reg_dcvs_ctrl = 0xbc,
  585. .reg_freq_lut = 0x110,
  586. .reg_volt_lut = 0x114,
  587. .reg_current_vote = 0x704,
  588. .reg_perf_state = 0x920,
  589. .reg_cycle_cntr = 0x9c0,
  590. .lut_row_size = 32,
  591. .lut_max_entries = LUT_MAX_ENTRIES,
  592. .accumulative_counter = false,
  593. .turbo_ind_support = true,
  594. };
  595. static const struct qcom_cpufreq_soc_data epss_soc_data = {
  596. .reg_enable = 0x0,
  597. .reg_domain_state = 0x20,
  598. .reg_dcvs_ctrl = 0xb0,
  599. .reg_freq_lut = 0x100,
  600. .reg_volt_lut = 0x200,
  601. .reg_intr_clr = 0x308,
  602. .reg_perf_state = 0x320,
  603. .reg_cycle_cntr = 0x3c4,
  604. .lut_row_size = 4,
  605. .lut_max_entries = LUT_MAX_ENTRIES,
  606. .accumulative_counter = true,
  607. .turbo_ind_support = false,
  608. .perf_lock_support = false,
  609. };
  610. static const struct qcom_cpufreq_soc_data epss_pdmem_soc_data = {
  611. .reg_enable = 0x0,
  612. .reg_domain_state = 0x20,
  613. .reg_dcvs_ctrl = 0xb0,
  614. .reg_freq_lut = 0x100,
  615. .reg_volt_lut = 0x200,
  616. .reg_intr_clr = 0x308,
  617. .reg_perf_state = 0x320,
  618. .reg_cycle_cntr = 0x3c4,
  619. .lut_row_size = 4,
  620. .lut_max_entries = LUT_MAX_ENTRIES,
  621. .accumulative_counter = true,
  622. .turbo_ind_support = false,
  623. .perf_lock_support = true,
  624. };
  625. static const struct qcom_cpufreq_soc_data rimps_soc_data = {
  626. .reg_enable = 0x0,
  627. .reg_domain_state = 0x20,
  628. .reg_dcvs_ctrl = 0xb0,
  629. .reg_freq_lut = 0x100,
  630. .reg_volt_lut = 0x200,
  631. .reg_intr_clr = 0x308,
  632. .reg_perf_state = 0x320,
  633. .reg_cycle_cntr = 0x3c4,
  634. .lut_row_size = 4,
  635. .lut_max_entries = 12,
  636. .accumulative_counter = true,
  637. .turbo_ind_support = false,
  638. .perf_lock_support = false,
  639. };
  640. static const struct qcom_cpufreq_soc_data rimps_pdmem_soc_data = {
  641. .reg_enable = 0x0,
  642. .reg_domain_state = 0x20,
  643. .reg_dcvs_ctrl = 0xb0,
  644. .reg_freq_lut = 0x100,
  645. .reg_volt_lut = 0x200,
  646. .reg_intr_clr = 0x308,
  647. .reg_perf_state = 0x320,
  648. .reg_cycle_cntr = 0x3c4,
  649. .lut_row_size = 4,
  650. .lut_max_entries = 12,
  651. .accumulative_counter = true,
  652. .turbo_ind_support = false,
  653. .perf_lock_support = true,
  654. };
  655. static const struct of_device_id qcom_cpufreq_hw_match[] = {
  656. { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
  657. { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
  658. { .compatible = "qcom,cpufreq-epss-pdmem", .data = &epss_pdmem_soc_data },
  659. { .compatible = "qcom,cpufreq-rimps", .data = &rimps_soc_data },
  660. { .compatible = "qcom,cpufreq-rimps-pdmem", .data = &rimps_pdmem_soc_data },
  661. {}
  662. };
  663. MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
  664. static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index,
  665. struct device *cpu_dev)
  666. {
  667. struct qcom_cpufreq_data *data = policy->driver_data;
  668. struct platform_device *pdev = cpufreq_get_driver_data();
  669. int ret;
  670. /*
  671. * Look for LMh interrupt. If no interrupt line is specified /
  672. * if there is an error, allow cpufreq to be enabled as usual.
  673. */
  674. data->throttle_irq = platform_get_irq_optional(pdev, index);
  675. if (data->throttle_irq == -ENXIO)
  676. return 0;
  677. if (data->throttle_irq < 0)
  678. return data->throttle_irq;
  679. data->cancel_throttle = false;
  680. data->policy = policy;
  681. mutex_init(&data->throttle_lock);
  682. INIT_DELAYED_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
  683. snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
  684. ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
  685. IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
  686. if (ret) {
  687. dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
  688. return 0;
  689. }
  690. ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
  691. if (ret)
  692. dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
  693. data->irq_name, data->throttle_irq);
  694. sysfs_attr_init(&data->freq_limit_attr.attr);
  695. data->freq_limit_attr.attr.name = "dcvsh_freq_limit";
  696. data->freq_limit_attr.show = dcvsh_freq_limit_show;
  697. data->freq_limit_attr.attr.mode = 0444;
  698. data->dcvsh_freq_limit = U32_MAX;
  699. device_create_file(cpu_dev, &data->freq_limit_attr);
  700. #if IS_ENABLED(CONFIG_SEC_PM_LOG)
  701. data->accu_time = 0;
  702. #endif
  703. #if IS_ENABLED(CONFIG_QCOM_LMH_STAT)
  704. if (time_in_state_attr_init(data, cpu_dev))
  705. pr_err("QCOM LMH clock state init error\n");
  706. #endif
  707. return 0;
  708. }
  709. static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
  710. {
  711. struct qcom_cpufreq_data *data = policy->driver_data;
  712. struct platform_device *pdev = cpufreq_get_driver_data();
  713. int ret;
  714. if (data->throttle_irq <= 0)
  715. return 0;
  716. mutex_lock(&data->throttle_lock);
  717. data->cancel_throttle = false;
  718. mutex_unlock(&data->throttle_lock);
  719. ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
  720. if (ret)
  721. dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
  722. data->irq_name, data->throttle_irq);
  723. return ret;
  724. }
  725. static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
  726. {
  727. struct qcom_cpufreq_data *data = policy->driver_data;
  728. if (data->throttle_irq <= 0)
  729. return 0;
  730. mutex_lock(&data->throttle_lock);
  731. data->cancel_throttle = true;
  732. mutex_unlock(&data->throttle_lock);
  733. cancel_delayed_work_sync(&data->throttle_work);
  734. irq_set_affinity_and_hint(data->throttle_irq, NULL);
  735. disable_irq_nosync(data->throttle_irq);
  736. arch_update_thermal_pressure(policy->related_cpus, U32_MAX);
  737. trace_dcvsh_throttle(cpumask_first(policy->related_cpus), 0);
  738. return 0;
  739. }
  740. static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
  741. {
  742. if (data->throttle_irq <= 0)
  743. return;
  744. free_irq(data->throttle_irq, data);
  745. }
  746. static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
  747. {
  748. struct platform_device *pdev = cpufreq_get_driver_data();
  749. struct device *dev = &pdev->dev;
  750. struct of_phandle_args args;
  751. struct device_node *cpu_np;
  752. struct device *cpu_dev;
  753. struct resource *res;
  754. void __iomem *base;
  755. struct qcom_cpufreq_data *data;
  756. char pdmem_name[MAX_FN_SIZE] = {};
  757. int ret, index;
  758. cpu_dev = get_cpu_device(policy->cpu);
  759. if (!cpu_dev) {
  760. pr_err("%s: failed to get cpu%d device\n", __func__,
  761. policy->cpu);
  762. return -ENODEV;
  763. }
  764. cpu_np = of_cpu_device_node_get(policy->cpu);
  765. if (!cpu_np)
  766. return -EINVAL;
  767. ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
  768. "#freq-domain-cells", 0, &args);
  769. of_node_put(cpu_np);
  770. if (ret)
  771. return ret;
  772. index = args.args[0];
  773. data = policy->driver_data;
  774. if (!data) {
  775. res = platform_get_resource(pdev, IORESOURCE_MEM, index);
  776. if (!res) {
  777. dev_err(dev, "failed to get mem resource %d\n", index);
  778. return -ENODEV;
  779. }
  780. if (!devm_request_mem_region(dev, res->start, resource_size(res), res->name)) {
  781. dev_err(dev, "failed to request resource %pR\n", res);
  782. return -EBUSY;
  783. }
  784. base = devm_ioremap(dev, res->start, resource_size(res));
  785. if (!base) {
  786. dev_err(dev, "failed to map resource %pR\n", res);
  787. return -ENOMEM;
  788. }
  789. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  790. if (!data) {
  791. return -ENOMEM;
  792. }
  793. ret = sysfs_create_file(&policy->kobj, &hw_clk_domain.attr);
  794. if (ret) {
  795. pr_err("%s: cannot register HW clock domain sysfs file\n", __func__);
  796. return ret;
  797. }
  798. data->soc_data = of_device_get_match_data(&pdev->dev);
  799. data->base = base;
  800. data->res = res;
  801. data->hw_clk_domain = index;
  802. }
  803. base = data->base;
  804. /* HW should be in enabled state to proceed */
  805. if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
  806. dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
  807. ret = -ENODEV;
  808. goto error;
  809. }
  810. if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
  811. data->per_core_dcvs = true;
  812. qcom_get_related_cpus(index, policy->cpus);
  813. if (cpumask_empty(policy->cpus)) {
  814. dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
  815. ret = -ENOENT;
  816. goto error;
  817. }
  818. policy->driver_data = data;
  819. policy->dvfs_possible_from_any_cpu = true;
  820. ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
  821. if (ret) {
  822. dev_err(dev, "Domain-%d failed to read LUT\n", index);
  823. goto error;
  824. }
  825. if (data->soc_data->perf_lock_support) {
  826. snprintf(pdmem_name, sizeof(pdmem_name), "pdmem-domain%d",
  827. index);
  828. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  829. pdmem_name);
  830. if (!res)
  831. dev_err(dev, "PDMEM domain-%d failed\n", index);
  832. base = devm_ioremap_resource(dev, res);
  833. if (IS_ERR(base))
  834. dev_err(dev, "Failed to map PDMEM domain-%d\n", index);
  835. else
  836. data->pdmem_base = base;
  837. }
  838. ret = dev_pm_opp_get_opp_count(cpu_dev);
  839. if (ret <= 0) {
  840. dev_err(cpu_dev, "Failed to add OPPs\n");
  841. ret = -ENODEV;
  842. goto error;
  843. }
  844. if (policy_has_boost_freq(policy)) {
  845. ret = cpufreq_enable_boost_support();
  846. if (ret)
  847. dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
  848. }
  849. ret = qcom_cpufreq_hw_lmh_init(policy, index, cpu_dev);
  850. if (ret)
  851. goto error;
  852. return 0;
  853. error:
  854. policy->driver_data = NULL;
  855. return ret;
  856. }
  857. static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
  858. {
  859. struct device *cpu_dev = get_cpu_device(policy->cpu);
  860. qcom_cpufreq_hw_lmh_exit(policy->driver_data);
  861. dev_pm_opp_remove_all_dynamic(cpu_dev);
  862. dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
  863. kfree(policy->freq_table);
  864. return 0;
  865. }
  866. static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
  867. {
  868. struct qcom_cpufreq_data *data = policy->driver_data;
  869. if (data->throttle_irq >= 0)
  870. enable_irq(data->throttle_irq);
  871. }
  872. static struct freq_attr *qcom_cpufreq_hw_attr[] = {
  873. &cpufreq_freq_attr_scaling_available_freqs,
  874. &cpufreq_freq_attr_scaling_boost_freqs,
  875. NULL
  876. };
  877. static struct cpufreq_driver cpufreq_qcom_hw_driver = {
  878. .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
  879. CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
  880. CPUFREQ_IS_COOLING_DEV,
  881. .verify = cpufreq_generic_frequency_table_verify,
  882. .target_index = qcom_cpufreq_hw_target_index,
  883. .get = qcom_cpufreq_hw_get,
  884. .init = qcom_cpufreq_hw_cpu_init,
  885. .exit = qcom_cpufreq_hw_cpu_exit,
  886. .online = qcom_cpufreq_hw_cpu_online,
  887. .offline = qcom_cpufreq_hw_cpu_offline,
  888. .register_em = cpufreq_register_em_with_opp,
  889. .fast_switch = qcom_cpufreq_hw_fast_switch,
  890. .name = "qcom-cpufreq-hw",
  891. .attr = qcom_cpufreq_hw_attr,
  892. .ready = qcom_cpufreq_ready,
  893. .boost_enabled = true,
  894. };
  895. static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
  896. {
  897. struct device *cpu_dev;
  898. struct clk *clk;
  899. int ret, cpu;
  900. clk = clk_get(&pdev->dev, "xo");
  901. if (IS_ERR(clk))
  902. return PTR_ERR(clk);
  903. xo_rate = clk_get_rate(clk);
  904. clk_put(clk);
  905. clk = clk_get(&pdev->dev, "alternate");
  906. if (IS_ERR(clk))
  907. return PTR_ERR(clk);
  908. cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
  909. clk_put(clk);
  910. cpufreq_qcom_hw_driver.driver_data = pdev;
  911. /* Check for optional interconnect paths on CPU0 */
  912. cpu_dev = get_cpu_device(0);
  913. if (!cpu_dev)
  914. return -EPROBE_DEFER;
  915. ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
  916. if (ret)
  917. return ret;
  918. for_each_possible_cpu(cpu)
  919. spin_lock_init(&qcom_cpufreq_counter[cpu].lock);
  920. ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
  921. if (ret)
  922. dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
  923. else
  924. dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
  925. return ret;
  926. }
  927. static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
  928. {
  929. return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
  930. }
  931. static struct platform_driver qcom_cpufreq_hw_driver = {
  932. .probe = qcom_cpufreq_hw_driver_probe,
  933. .remove = qcom_cpufreq_hw_driver_remove,
  934. .driver = {
  935. .name = "qcom-cpufreq-hw",
  936. .of_match_table = qcom_cpufreq_hw_match,
  937. },
  938. };
  939. static int __init qcom_cpufreq_hw_init(void)
  940. {
  941. return platform_driver_register(&qcom_cpufreq_hw_driver);
  942. }
  943. postcore_initcall(qcom_cpufreq_hw_init);
  944. static void __exit qcom_cpufreq_hw_exit(void)
  945. {
  946. platform_driver_unregister(&qcom_cpufreq_hw_driver);
  947. }
  948. module_exit(qcom_cpufreq_hw_exit);
  949. MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
  950. MODULE_LICENSE("GPL v2");
  951. #if IS_ENABLED(CONFIG_SEC_QC_SMEM)
  952. static ATOMIC_NOTIFIER_HEAD(target_index_notifier_list);
  953. int qcom_cpufreq_hw_target_index_register_notifier(struct notifier_block *nb)
  954. {
  955. return atomic_notifier_chain_register(&target_index_notifier_list, nb);
  956. }
  957. EXPORT_SYMBOL(qcom_cpufreq_hw_target_index_register_notifier);
  958. int qcom_cpufreq_hw_target_index_unregister_notifier(struct notifier_block *nb)
  959. {
  960. return atomic_notifier_chain_unregister(&target_index_notifier_list, nb);
  961. }
  962. EXPORT_SYMBOL(qcom_cpufreq_hw_target_index_unregister_notifier);
  963. static void __cpufreq_hw_target_index_call_notifier_chain(struct cpufreq_policy *policy, unsigned int index)
  964. {
  965. atomic_notifier_call_chain(&target_index_notifier_list, index, policy);
  966. }
  967. #else
  968. static void __cpufreq_hw_target_index_call_notifier_chain(struct cpufreq_policy *policy, unsigned int index) {}
  969. #endif