ipa_power.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2018-2022 Linaro Ltd.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/device.h>
  7. #include <linux/interconnect.h>
  8. #include <linux/pm.h>
  9. #include <linux/pm_runtime.h>
  10. #include <linux/bitops.h>
  11. #include "linux/soc/qcom/qcom_aoss.h"
  12. #include "ipa.h"
  13. #include "ipa_power.h"
  14. #include "ipa_endpoint.h"
  15. #include "ipa_modem.h"
  16. #include "ipa_data.h"
  17. /**
  18. * DOC: IPA Power Management
  19. *
  20. * The IPA hardware is enabled when the IPA core clock and all the
  21. * interconnects (buses) it depends on are enabled. Runtime power
  22. * management is used to determine whether the core clock and
  23. * interconnects are enabled, and if not in use to be suspended
  24. * automatically.
  25. *
  26. * The core clock currently runs at a fixed clock rate when enabled,
  27. * an all interconnects use a fixed average and peak bandwidth.
  28. */
  29. #define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
  30. /**
  31. * enum ipa_power_flag - IPA power flags
  32. * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
  33. * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
  34. * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit()
  35. * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume()
  36. * @IPA_POWER_FLAG_COUNT: Number of defined power flags
  37. */
  38. enum ipa_power_flag {
  39. IPA_POWER_FLAG_RESUMED,
  40. IPA_POWER_FLAG_SYSTEM,
  41. IPA_POWER_FLAG_STOPPED,
  42. IPA_POWER_FLAG_STARTED,
  43. IPA_POWER_FLAG_COUNT, /* Last; not a flag */
  44. };
  45. /**
  46. * struct ipa_power - IPA power management information
  47. * @dev: IPA device pointer
  48. * @core: IPA core clock
  49. * @qmp: QMP handle for AOSS communication
  50. * @spinlock: Protects modem TX queue enable/disable
  51. * @flags: Boolean state flags
  52. * @interconnect_count: Number of elements in interconnect[]
  53. * @interconnect: Interconnect array
  54. */
  55. struct ipa_power {
  56. struct device *dev;
  57. struct clk *core;
  58. struct qmp *qmp;
  59. spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
  60. DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
  61. u32 interconnect_count;
  62. struct icc_bulk_data interconnect[];
  63. };
  64. /* Initialize interconnects required for IPA operation */
  65. static int ipa_interconnect_init(struct ipa_power *power,
  66. const struct ipa_interconnect_data *data)
  67. {
  68. struct icc_bulk_data *interconnect;
  69. int ret;
  70. u32 i;
  71. /* Initialize our interconnect data array for bulk operations */
  72. interconnect = &power->interconnect[0];
  73. for (i = 0; i < power->interconnect_count; i++) {
  74. /* interconnect->path is filled in by of_icc_bulk_get() */
  75. interconnect->name = data->name;
  76. interconnect->avg_bw = data->average_bandwidth;
  77. interconnect->peak_bw = data->peak_bandwidth;
  78. data++;
  79. interconnect++;
  80. }
  81. ret = of_icc_bulk_get(power->dev, power->interconnect_count,
  82. power->interconnect);
  83. if (ret)
  84. return ret;
  85. /* All interconnects are initially disabled */
  86. icc_bulk_disable(power->interconnect_count, power->interconnect);
  87. /* Set the bandwidth values to be used when enabled */
  88. ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
  89. if (ret)
  90. icc_bulk_put(power->interconnect_count, power->interconnect);
  91. return ret;
  92. }
  93. /* Inverse of ipa_interconnect_init() */
  94. static void ipa_interconnect_exit(struct ipa_power *power)
  95. {
  96. icc_bulk_put(power->interconnect_count, power->interconnect);
  97. }
  98. /* Enable IPA power, enabling interconnects and the core clock */
  99. static int ipa_power_enable(struct ipa *ipa)
  100. {
  101. struct ipa_power *power = ipa->power;
  102. int ret;
  103. ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
  104. if (ret)
  105. return ret;
  106. ret = clk_prepare_enable(power->core);
  107. if (ret) {
  108. dev_err(power->dev, "error %d enabling core clock\n", ret);
  109. icc_bulk_disable(power->interconnect_count,
  110. power->interconnect);
  111. }
  112. return ret;
  113. }
  114. /* Inverse of ipa_power_enable() */
  115. static void ipa_power_disable(struct ipa *ipa)
  116. {
  117. struct ipa_power *power = ipa->power;
  118. clk_disable_unprepare(power->core);
  119. icc_bulk_disable(power->interconnect_count, power->interconnect);
  120. }
  121. static int ipa_runtime_suspend(struct device *dev)
  122. {
  123. struct ipa *ipa = dev_get_drvdata(dev);
  124. /* Endpoints aren't usable until setup is complete */
  125. if (ipa->setup_complete) {
  126. __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
  127. ipa_endpoint_suspend(ipa);
  128. gsi_suspend(&ipa->gsi);
  129. }
  130. ipa_power_disable(ipa);
  131. return 0;
  132. }
  133. static int ipa_runtime_resume(struct device *dev)
  134. {
  135. struct ipa *ipa = dev_get_drvdata(dev);
  136. int ret;
  137. ret = ipa_power_enable(ipa);
  138. if (WARN_ON(ret < 0))
  139. return ret;
  140. /* Endpoints aren't usable until setup is complete */
  141. if (ipa->setup_complete) {
  142. gsi_resume(&ipa->gsi);
  143. ipa_endpoint_resume(ipa);
  144. }
  145. return 0;
  146. }
  147. static int ipa_suspend(struct device *dev)
  148. {
  149. struct ipa *ipa = dev_get_drvdata(dev);
  150. __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
  151. /* Increment the disable depth to ensure that the IRQ won't
  152. * be re-enabled until the matching _enable call in
  153. * ipa_resume(). We do this to ensure that the interrupt
  154. * handler won't run whilst PM runtime is disabled.
  155. *
  156. * Note that disabling the IRQ is NOT the same as disabling
  157. * irq wake. If wakeup is enabled for the IPA then the IRQ
  158. * will still cause the system to wake up, see irq_set_irq_wake().
  159. */
  160. ipa_interrupt_irq_disable(ipa);
  161. return pm_runtime_force_suspend(dev);
  162. }
  163. static int ipa_resume(struct device *dev)
  164. {
  165. struct ipa *ipa = dev_get_drvdata(dev);
  166. int ret;
  167. ret = pm_runtime_force_resume(dev);
  168. __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
  169. /* Now that PM runtime is enabled again it's safe
  170. * to turn the IRQ back on and process any data
  171. * that was received during suspend.
  172. */
  173. ipa_interrupt_irq_enable(ipa);
  174. return ret;
  175. }
  176. /* Return the current IPA core clock rate */
  177. u32 ipa_core_clock_rate(struct ipa *ipa)
  178. {
  179. return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
  180. }
  181. /**
  182. * ipa_suspend_handler() - Handle the suspend IPA interrupt
  183. * @ipa: IPA pointer
  184. * @irq_id: IPA interrupt type (unused)
  185. *
  186. * If an RX endpoint is suspended, and the IPA has a packet destined for
  187. * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
  188. * that it should resume the endpoint. If we get one of these interrupts
  189. * we just wake up the system.
  190. */
  191. static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
  192. {
  193. /* To handle an IPA interrupt we will have resumed the hardware
  194. * just to handle the interrupt, so we're done. If we are in a
  195. * system suspend, trigger a system resume.
  196. */
  197. if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
  198. if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
  199. pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
  200. /* Acknowledge/clear the suspend interrupt on all endpoints */
  201. ipa_interrupt_suspend_clear_all(ipa->interrupt);
  202. }
  203. /* The next few functions coordinate stopping and starting the modem
  204. * network device transmit queue.
  205. *
  206. * Transmit can be running concurrent with power resume, and there's a
  207. * chance the resume completes before the transmit path stops the queue,
  208. * leaving the queue in a stopped state. The next two functions are used
  209. * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
  210. * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
  211. * is used by ipa_runtime_resume() to conditionally restart it.
  212. *
  213. * Two flags and a spinlock are used. If the queue is stopped, the STOPPED
  214. * power flag is set. And if the queue is started, the STARTED flag is set.
  215. * The queue is only started on resume if the STOPPED flag is set. And the
  216. * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
  217. * set. As a result, the queue remains operational if the two activites
  218. * happen concurrently regardless of the order they complete. The spinlock
  219. * ensures the flag and TX queue operations are done atomically.
  220. *
  221. * The first function stops the modem netdev transmit queue, but only if
  222. * the STARTED flag is *not* set. That flag is cleared if it was set.
  223. * If the queue is stopped, the STOPPED flag is set. This is called only
  224. * from the power ->runtime_resume operation.
  225. */
  226. void ipa_power_modem_queue_stop(struct ipa *ipa)
  227. {
  228. struct ipa_power *power = ipa->power;
  229. unsigned long flags;
  230. spin_lock_irqsave(&power->spinlock, flags);
  231. if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
  232. netif_stop_queue(ipa->modem_netdev);
  233. __set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
  234. }
  235. spin_unlock_irqrestore(&power->spinlock, flags);
  236. }
  237. /* This function starts the modem netdev transmit queue, but only if the
  238. * STOPPED flag is set. That flag is cleared if it was set. If the queue
  239. * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
  240. * to skip stopping the queue in the event of a race.
  241. */
  242. void ipa_power_modem_queue_wake(struct ipa *ipa)
  243. {
  244. struct ipa_power *power = ipa->power;
  245. unsigned long flags;
  246. spin_lock_irqsave(&power->spinlock, flags);
  247. if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
  248. __set_bit(IPA_POWER_FLAG_STARTED, power->flags);
  249. netif_wake_queue(ipa->modem_netdev);
  250. }
  251. spin_unlock_irqrestore(&power->spinlock, flags);
  252. }
  253. /* This function clears the STARTED flag once the TX queue is operating */
  254. void ipa_power_modem_queue_active(struct ipa *ipa)
  255. {
  256. clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
  257. }
  258. static int ipa_power_retention_init(struct ipa_power *power)
  259. {
  260. struct qmp *qmp = qmp_get(power->dev);
  261. if (IS_ERR(qmp)) {
  262. if (PTR_ERR(qmp) == -EPROBE_DEFER)
  263. return -EPROBE_DEFER;
  264. /* We assume any other error means it's not defined/needed */
  265. qmp = NULL;
  266. }
  267. power->qmp = qmp;
  268. return 0;
  269. }
  270. static void ipa_power_retention_exit(struct ipa_power *power)
  271. {
  272. qmp_put(power->qmp);
  273. power->qmp = NULL;
  274. }
  275. /* Control register retention on power collapse */
  276. void ipa_power_retention(struct ipa *ipa, bool enable)
  277. {
  278. static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
  279. struct ipa_power *power = ipa->power;
  280. char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */
  281. int ret;
  282. if (!power->qmp)
  283. return; /* Not needed on this platform */
  284. (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0');
  285. ret = qmp_send(power->qmp, buf, sizeof(buf));
  286. if (ret)
  287. dev_err(power->dev, "error %d sending QMP %sable request\n",
  288. ret, enable ? "en" : "dis");
  289. }
  290. int ipa_power_setup(struct ipa *ipa)
  291. {
  292. int ret;
  293. ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
  294. ipa_suspend_handler);
  295. ret = device_init_wakeup(&ipa->pdev->dev, true);
  296. if (ret)
  297. ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
  298. return ret;
  299. }
  300. void ipa_power_teardown(struct ipa *ipa)
  301. {
  302. (void)device_init_wakeup(&ipa->pdev->dev, false);
  303. ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
  304. }
  305. /* Initialize IPA power management */
  306. struct ipa_power *
  307. ipa_power_init(struct device *dev, const struct ipa_power_data *data)
  308. {
  309. struct ipa_power *power;
  310. struct clk *clk;
  311. size_t size;
  312. int ret;
  313. clk = clk_get(dev, "core");
  314. if (IS_ERR(clk)) {
  315. dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
  316. return ERR_CAST(clk);
  317. }
  318. ret = clk_set_rate(clk, data->core_clock_rate);
  319. if (ret) {
  320. dev_err(dev, "error %d setting core clock rate to %u\n",
  321. ret, data->core_clock_rate);
  322. goto err_clk_put;
  323. }
  324. size = struct_size(power, interconnect, data->interconnect_count);
  325. power = kzalloc(size, GFP_KERNEL);
  326. if (!power) {
  327. ret = -ENOMEM;
  328. goto err_clk_put;
  329. }
  330. power->dev = dev;
  331. power->core = clk;
  332. spin_lock_init(&power->spinlock);
  333. power->interconnect_count = data->interconnect_count;
  334. ret = ipa_interconnect_init(power, data->interconnect_data);
  335. if (ret)
  336. goto err_kfree;
  337. ret = ipa_power_retention_init(power);
  338. if (ret)
  339. goto err_interconnect_exit;
  340. pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
  341. pm_runtime_use_autosuspend(dev);
  342. pm_runtime_enable(dev);
  343. return power;
  344. err_interconnect_exit:
  345. ipa_interconnect_exit(power);
  346. err_kfree:
  347. kfree(power);
  348. err_clk_put:
  349. clk_put(clk);
  350. return ERR_PTR(ret);
  351. }
  352. /* Inverse of ipa_power_init() */
  353. void ipa_power_exit(struct ipa_power *power)
  354. {
  355. struct device *dev = power->dev;
  356. struct clk *clk = power->core;
  357. pm_runtime_disable(dev);
  358. pm_runtime_dont_use_autosuspend(dev);
  359. ipa_power_retention_exit(power);
  360. ipa_interconnect_exit(power);
  361. kfree(power);
  362. clk_put(clk);
  363. }
  364. const struct dev_pm_ops ipa_pm_ops = {
  365. .suspend = ipa_suspend,
  366. .resume = ipa_resume,
  367. .runtime_suspend = ipa_runtime_suspend,
  368. .runtime_resume = ipa_runtime_resume,
  369. };