msm_lmh_dcvs.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
  7. #include <linux/module.h>
  8. #include <linux/slab.h>
  9. #include <linux/thermal.h>
  10. #include <linux/of.h>
  11. #include <linux/of_device.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/of_address.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/sched.h>
  16. #include <linux/io.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/timer.h>
  19. #include <linux/pm_opp.h>
  20. #include <linux/atomic.h>
  21. #include <linux/regulator/consumer.h>
  22. #include <asm/smp_plat.h>
  23. #define LIMITS_DCVSH 0x10
  24. #define LIMITS_NODE_DCVS 0x44435653
  25. #define LIMITS_SUB_FN_THERMAL 0x54484D4C
  26. #define LIMITS_HI_THRESHOLD 0x48494748
  27. #define LIMITS_LOW_THRESHOLD 0x4C4F5700
  28. #define LIMITS_ARM_THRESHOLD 0x41524D00
  29. #define LIMITS_CLUSTER_0 0x6370302D
  30. #define LIMITS_CLUSTER_1 0x6370312D
  31. #define LIMITS_FREQ_CAP 0x46434150
  32. #define LIMITS_TEMP_DEFAULT 75000
  33. #define LIMITS_TEMP_HIGH_THRESH_MAX 120000
  34. #define LIMITS_LOW_THRESHOLD_OFFSET 500
  35. #define LIMITS_POLLING_DELAY_MS 10
  36. #define LIMITS_CLUSTER_REQ_OFFSET 0x704
  37. #define LIMITS_CLUSTER_INT_CLR_OFFSET 0x8
  38. #define dcvsh_get_frequency(_val, _max) do { \
  39. _max = (_val) & 0x3FF; \
  40. _max *= 19200; \
  41. } while (0)
  42. #define FREQ_KHZ_TO_HZ(_val) ((_val) * 1000)
  43. #define FREQ_HZ_TO_KHZ(_val) ((_val) / 1000)
  44. enum lmh_hw_trips {
  45. LIMITS_TRIP_ARM,
  46. LIMITS_TRIP_HI,
  47. LIMITS_TRIP_MAX,
  48. };
  49. struct __limits_cdev_data {
  50. struct thermal_cooling_device *cdev;
  51. u32 max_freq;
  52. };
  53. struct limits_dcvs_hw {
  54. char sensor_name[THERMAL_NAME_LENGTH];
  55. uint32_t affinity;
  56. int irq_num;
  57. void *osm_hw_reg;
  58. void *int_clr_reg;
  59. cpumask_t core_map;
  60. struct delayed_work freq_poll_work;
  61. unsigned long max_freq[NR_CPUS];
  62. unsigned long hw_freq_limit;
  63. struct device_attribute lmh_freq_attr;
  64. struct list_head list;
  65. bool is_irq_enabled;
  66. struct mutex access_lock;
  67. struct __limits_cdev_data *cdev_data;
  68. uint32_t cdev_registered;
  69. struct regulator *isens_reg[2];
  70. };
  71. LIST_HEAD(lmh_dcvs_hw_list);
  72. DEFINE_MUTEX(lmh_dcvs_list_access);
  73. static void limits_dcvs_get_freq_limits(struct limits_dcvs_hw *hw)
  74. {
  75. unsigned long freq_ceil = UINT_MAX, freq_floor = 0;
  76. struct device *cpu_dev = NULL;
  77. uint32_t cpu, idx = 0;
  78. for_each_cpu(cpu, &hw->core_map) {
  79. freq_ceil = UINT_MAX;
  80. freq_floor = 0;
  81. cpu_dev = get_cpu_device(cpu);
  82. if (!cpu_dev) {
  83. pr_err("Error in get CPU%d device\n", cpu);
  84. idx++;
  85. continue;
  86. }
  87. dev_pm_opp_find_freq_floor(cpu_dev, &freq_ceil);
  88. dev_pm_opp_find_freq_ceil(cpu_dev, &freq_floor);
  89. hw->max_freq[idx] = freq_ceil / 1000;
  90. idx++;
  91. }
  92. }
  93. static unsigned long limits_mitigation_notify(struct limits_dcvs_hw *hw)
  94. {
  95. uint32_t val = 0, max_cpu_ct = 0, max_cpu_limit = 0, idx = 0, cpu = 0;
  96. struct device *cpu_dev = NULL;
  97. unsigned long freq_val, max_limit = 0;
  98. struct dev_pm_opp *opp_entry;
  99. val = readl_relaxed(hw->osm_hw_reg);
  100. dcvsh_get_frequency(val, max_limit);
  101. for_each_cpu(cpu, &hw->core_map) {
  102. cpu_dev = get_cpu_device(cpu);
  103. if (!cpu_dev) {
  104. pr_err("Error in get CPU%d device\n",
  105. cpumask_first(&hw->core_map));
  106. goto notify_exit;
  107. }
  108. pr_debug("CPU:%d max value read:%lu\n",
  109. cpumask_first(&hw->core_map),
  110. max_limit);
  111. freq_val = FREQ_KHZ_TO_HZ(max_limit);
  112. opp_entry = dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
  113. /*
  114. * Hardware mitigation frequency can be lower than the lowest
  115. * possible CPU frequency. In that case freq floor call will
  116. * fail with -ERANGE and we need to match to the lowest
  117. * frequency using freq_ceil.
  118. */
  119. if (IS_ERR(opp_entry) && PTR_ERR(opp_entry) == -ERANGE) {
  120. opp_entry = dev_pm_opp_find_freq_ceil(cpu_dev,
  121. &freq_val);
  122. if (IS_ERR(opp_entry))
  123. dev_err(cpu_dev,
  124. "frequency:%lu. opp error:%ld\n",
  125. freq_val, PTR_ERR(opp_entry));
  126. }
  127. if (FREQ_HZ_TO_KHZ(freq_val) == hw->max_freq[idx]) {
  128. max_cpu_ct++;
  129. if (max_cpu_limit < hw->max_freq[idx])
  130. max_cpu_limit = hw->max_freq[idx];
  131. idx++;
  132. continue;
  133. }
  134. max_limit = FREQ_HZ_TO_KHZ(freq_val);
  135. break;
  136. }
  137. if (max_cpu_ct == cpumask_weight(&hw->core_map))
  138. max_limit = max_cpu_limit;
  139. pr_debug("CPU:%d max limit:%lu\n", cpumask_first(&hw->core_map),
  140. max_limit);
  141. notify_exit:
  142. hw->hw_freq_limit = max_limit;
  143. return max_limit;
  144. }
  145. static void limits_dcvs_poll(struct work_struct *work)
  146. {
  147. unsigned long max_limit = 0;
  148. struct limits_dcvs_hw *hw = container_of(work,
  149. struct limits_dcvs_hw,
  150. freq_poll_work.work);
  151. int cpu_ct = 0, cpu = 0, idx = 0;
  152. mutex_lock(&hw->access_lock);
  153. if (hw->max_freq[0] == U32_MAX)
  154. limits_dcvs_get_freq_limits(hw);
  155. max_limit = limits_mitigation_notify(hw);
  156. for_each_cpu(cpu, &hw->core_map) {
  157. if (max_limit >= hw->max_freq[idx])
  158. cpu_ct++;
  159. idx++;
  160. }
  161. if (cpu_ct >= cpumask_weight(&hw->core_map)) {
  162. writel_relaxed(0xFF, hw->int_clr_reg);
  163. hw->is_irq_enabled = true;
  164. enable_irq(hw->irq_num);
  165. } else {
  166. mod_delayed_work(system_highpri_wq, &hw->freq_poll_work,
  167. msecs_to_jiffies(LIMITS_POLLING_DELAY_MS));
  168. }
  169. mutex_unlock(&hw->access_lock);
  170. }
  171. static void lmh_dcvs_notify(struct limits_dcvs_hw *hw)
  172. {
  173. if (hw->is_irq_enabled) {
  174. hw->is_irq_enabled = false;
  175. disable_irq_nosync(hw->irq_num);
  176. limits_mitigation_notify(hw);
  177. mod_delayed_work(system_highpri_wq, &hw->freq_poll_work,
  178. msecs_to_jiffies(LIMITS_POLLING_DELAY_MS));
  179. }
  180. }
  181. static irqreturn_t lmh_dcvs_handle_isr(int irq, void *data)
  182. {
  183. struct limits_dcvs_hw *hw = data;
  184. mutex_lock(&hw->access_lock);
  185. lmh_dcvs_notify(hw);
  186. mutex_unlock(&hw->access_lock);
  187. return IRQ_HANDLED;
  188. }
  189. static void limits_isens_qref_init(struct platform_device *pdev,
  190. struct limits_dcvs_hw *hw,
  191. int idx, char *reg_name,
  192. char *reg_setting)
  193. {
  194. int ret = 0;
  195. uint32_t settings[3];
  196. ret = of_property_read_u32_array(pdev->dev.of_node,
  197. reg_setting, settings, 3);
  198. if (ret) {
  199. if (ret == -EINVAL)
  200. return;
  201. pr_err("Regulator:isens_vref settings read error:%d\n",
  202. ret);
  203. return;
  204. }
  205. hw->isens_reg[idx] = devm_regulator_get(&pdev->dev, reg_name);
  206. if (IS_ERR_OR_NULL(hw->isens_reg[idx])) {
  207. pr_err("Regulator:isens_vref init error:%ld\n",
  208. PTR_ERR(hw->isens_reg[idx]));
  209. return;
  210. }
  211. ret = regulator_set_voltage(hw->isens_reg[idx], settings[0],
  212. settings[1]);
  213. if (ret) {
  214. pr_err("Regulator:isens_vref set voltage error:%d\n", ret);
  215. return;
  216. }
  217. ret = regulator_set_load(hw->isens_reg[idx], settings[2]);
  218. if (ret) {
  219. pr_err("Regulator:isens_vref set load error:%d\n", ret);
  220. return;
  221. }
  222. if (regulator_enable(hw->isens_reg[idx])) {
  223. pr_err("Failed to enable regulator:isens_vref\n");
  224. return;
  225. }
  226. }
  227. static void limits_isens_vref_ldo_init(struct platform_device *pdev,
  228. struct limits_dcvs_hw *hw)
  229. {
  230. limits_isens_qref_init(pdev, hw, 0, "isens_vref_1p8",
  231. "isens-vref-1p8-settings");
  232. limits_isens_qref_init(pdev, hw, 1, "isens_vref_0p8",
  233. "isens-vref-0p8-settings");
  234. }
  235. static ssize_t
  236. lmh_freq_limit_show(struct device *dev, struct device_attribute *devattr,
  237. char *buf)
  238. {
  239. struct limits_dcvs_hw *hw = container_of(devattr,
  240. struct limits_dcvs_hw,
  241. lmh_freq_attr);
  242. return scnprintf(buf, PAGE_SIZE, "%lu\n", hw->hw_freq_limit);
  243. }
  244. static int limits_dcvs_probe(struct platform_device *pdev)
  245. {
  246. int ret;
  247. int affinity = -1;
  248. struct limits_dcvs_hw *hw;
  249. struct device_node *dn = pdev->dev.of_node;
  250. struct device_node *cpu_node, *lmh_node;
  251. uint32_t request_reg, clear_reg;
  252. int cpu, idx = 0;
  253. cpumask_t mask = { CPU_BITS_NONE };
  254. const __be32 *addr;
  255. for_each_possible_cpu(cpu) {
  256. cpu_node = of_cpu_device_node_get(cpu);
  257. if (!cpu_node)
  258. continue;
  259. lmh_node = of_parse_phandle(cpu_node, "qcom,lmh-dcvs", 0);
  260. if (lmh_node == dn) {
  261. /*set the cpumask*/
  262. cpumask_set_cpu(cpu, &(mask));
  263. }
  264. of_node_put(cpu_node);
  265. of_node_put(lmh_node);
  266. }
  267. hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
  268. if (!hw)
  269. return -ENOMEM;
  270. /*
  271. * We just init regulator if none of the CPUs have
  272. * reference to our LMH node
  273. */
  274. if (cpumask_empty(&mask)) {
  275. limits_isens_vref_ldo_init(pdev, hw);
  276. mutex_lock(&lmh_dcvs_list_access);
  277. INIT_LIST_HEAD(&hw->list);
  278. list_add_tail(&hw->list, &lmh_dcvs_hw_list);
  279. mutex_unlock(&lmh_dcvs_list_access);
  280. return 0;
  281. }
  282. hw->cdev_data = devm_kcalloc(&pdev->dev, cpumask_weight(&mask),
  283. sizeof(*hw->cdev_data),
  284. GFP_KERNEL);
  285. if (!hw->cdev_data)
  286. return -ENOMEM;
  287. cpumask_copy(&hw->core_map, &mask);
  288. hw->cdev_registered = 0;
  289. for_each_cpu(cpu, &hw->core_map) {
  290. hw->cdev_data[idx].cdev = NULL;
  291. hw->cdev_data[idx].max_freq = U32_MAX;
  292. hw->max_freq[idx] = U32_MAX;
  293. idx++;
  294. }
  295. ret = of_property_read_u32(dn, "qcom,affinity", &affinity);
  296. if (ret)
  297. return -ENODEV;
  298. switch (affinity) {
  299. case 0:
  300. hw->affinity = LIMITS_CLUSTER_0;
  301. break;
  302. case 1:
  303. hw->affinity = LIMITS_CLUSTER_1;
  304. break;
  305. default:
  306. return -EINVAL;
  307. }
  308. addr = of_get_address(dn, 0, NULL, NULL);
  309. if (!addr) {
  310. pr_err("Property llm-base-addr not found\n");
  311. return -EINVAL;
  312. }
  313. clear_reg = be32_to_cpu(addr[0]) + LIMITS_CLUSTER_INT_CLR_OFFSET;
  314. addr = of_get_address(dn, 1, NULL, NULL);
  315. if (!addr) {
  316. pr_err("Property osm-base-addr not found\n");
  317. return -EINVAL;
  318. }
  319. request_reg = be32_to_cpu(addr[0]) + LIMITS_CLUSTER_REQ_OFFSET;
  320. hw->hw_freq_limit = U32_MAX;
  321. snprintf(hw->sensor_name, sizeof(hw->sensor_name), "limits_sensor-%02d",
  322. affinity);
  323. mutex_init(&hw->access_lock);
  324. INIT_DEFERRABLE_WORK(&hw->freq_poll_work, limits_dcvs_poll);
  325. hw->osm_hw_reg = devm_ioremap(&pdev->dev, request_reg, 0x4);
  326. if (!hw->osm_hw_reg) {
  327. pr_err("register remap failed\n");
  328. goto probe_exit;
  329. }
  330. hw->int_clr_reg = devm_ioremap(&pdev->dev, clear_reg, 0x4);
  331. if (!hw->int_clr_reg) {
  332. pr_err("interrupt clear reg remap failed\n");
  333. goto probe_exit;
  334. }
  335. hw->irq_num = of_irq_get(pdev->dev.of_node, 0);
  336. if (hw->irq_num < 0) {
  337. pr_err("Error getting IRQ number. err:%d\n", hw->irq_num);
  338. goto probe_exit;
  339. }
  340. hw->is_irq_enabled = true;
  341. ret = devm_request_threaded_irq(&pdev->dev, hw->irq_num, NULL,
  342. lmh_dcvs_handle_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT
  343. | IRQF_NO_SUSPEND | IRQF_SHARED, hw->sensor_name, hw);
  344. if (ret) {
  345. pr_err("Error registering for irq. err:%d\n", ret);
  346. ret = 0;
  347. goto probe_exit;
  348. }
  349. limits_isens_vref_ldo_init(pdev, hw);
  350. sysfs_attr_init(&hw->lmh_freq_attr.attr);
  351. hw->lmh_freq_attr.attr.name = "lmh_freq_limit";
  352. hw->lmh_freq_attr.show = lmh_freq_limit_show;
  353. hw->lmh_freq_attr.attr.mode = 0444;
  354. device_create_file(&pdev->dev, &hw->lmh_freq_attr);
  355. probe_exit:
  356. mutex_lock(&lmh_dcvs_list_access);
  357. INIT_LIST_HEAD(&hw->list);
  358. list_add_tail(&hw->list, &lmh_dcvs_hw_list);
  359. mutex_unlock(&lmh_dcvs_list_access);
  360. return ret;
  361. }
  362. static const struct of_device_id limits_dcvs_match[] = {
  363. { .compatible = "qcom,msm-hw-limits", },
  364. {},
  365. };
  366. static struct platform_driver limits_dcvs_driver = {
  367. .probe = limits_dcvs_probe,
  368. .driver = {
  369. .name = KBUILD_MODNAME,
  370. .of_match_table = limits_dcvs_match,
  371. },
  372. };
  373. builtin_platform_driver(limits_dcvs_driver);
  374. MODULE_LICENSE("GPL");