t7xx_pci.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, MediaTek Inc.
  4. * Copyright (c) 2021-2022, Intel Corporation.
  5. *
  6. * Authors:
  7. * Haijun Liu <[email protected]>
  8. * Ricardo Martinez <[email protected]>
  9. * Sreehari Kancharla <[email protected]>
  10. *
  11. * Contributors:
  12. * Amir Hanania <[email protected]>
  13. * Andy Shevchenko <[email protected]>
  14. * Chiranjeevi Rapolu <[email protected]>
  15. * Eliot Lee <[email protected]>
  16. * Moises Veleta <[email protected]>
  17. */
  18. #include <linux/atomic.h>
  19. #include <linux/bits.h>
  20. #include <linux/completion.h>
  21. #include <linux/device.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/gfp.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/io.h>
  26. #include <linux/iopoll.h>
  27. #include <linux/jiffies.h>
  28. #include <linux/list.h>
  29. #include <linux/module.h>
  30. #include <linux/mutex.h>
  31. #include <linux/pci.h>
  32. #include <linux/pm.h>
  33. #include <linux/pm_runtime.h>
  34. #include <linux/pm_wakeup.h>
  35. #include <linux/spinlock.h>
  36. #include "t7xx_mhccif.h"
  37. #include "t7xx_modem_ops.h"
  38. #include "t7xx_pci.h"
  39. #include "t7xx_pcie_mac.h"
  40. #include "t7xx_reg.h"
  41. #include "t7xx_state_monitor.h"
  42. #define T7XX_PCI_IREG_BASE 0
  43. #define T7XX_PCI_EREG_BASE 2
  44. #define T7XX_INIT_TIMEOUT 20
  45. #define PM_SLEEP_DIS_TIMEOUT_MS 20
  46. #define PM_ACK_TIMEOUT_MS 1500
  47. #define PM_AUTOSUSPEND_MS 20000
  48. #define PM_RESOURCE_POLL_TIMEOUT_US 10000
  49. #define PM_RESOURCE_POLL_STEP_US 100
  50. enum t7xx_pm_state {
  51. MTK_PM_EXCEPTION,
  52. MTK_PM_INIT, /* Device initialized, but handshake not completed */
  53. MTK_PM_SUSPENDED,
  54. MTK_PM_RESUMED,
  55. };
  56. static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
  57. {
  58. void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
  59. u32 value;
  60. value = ioread32(ctrl_reg);
  61. if (enable)
  62. value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
  63. else
  64. value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
  65. iowrite32(value, ctrl_reg);
  66. }
  67. static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
  68. {
  69. int ret, val;
  70. ret = read_poll_timeout(ioread32, val,
  71. (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
  72. PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
  73. IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
  74. if (ret == -ETIMEDOUT)
  75. dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
  76. return ret;
  77. }
  78. static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
  79. {
  80. struct pci_dev *pdev = t7xx_dev->pdev;
  81. INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
  82. mutex_init(&t7xx_dev->md_pm_entity_mtx);
  83. spin_lock_init(&t7xx_dev->md_pm_lock);
  84. init_completion(&t7xx_dev->sleep_lock_acquire);
  85. init_completion(&t7xx_dev->pm_sr_ack);
  86. init_completion(&t7xx_dev->init_done);
  87. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
  88. device_init_wakeup(&pdev->dev, true);
  89. dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
  90. DPM_FLAG_NO_DIRECT_COMPLETE);
  91. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
  92. pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
  93. pm_runtime_use_autosuspend(&pdev->dev);
  94. return t7xx_wait_pm_config(t7xx_dev);
  95. }
  96. void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
  97. {
  98. /* Enable the PCIe resource lock only after MD deep sleep is done */
  99. t7xx_mhccif_mask_clr(t7xx_dev,
  100. D2H_INT_DS_LOCK_ACK |
  101. D2H_INT_SUSPEND_ACK |
  102. D2H_INT_RESUME_ACK |
  103. D2H_INT_SUSPEND_ACK_AP |
  104. D2H_INT_RESUME_ACK_AP);
  105. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
  106. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
  107. pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
  108. pm_runtime_allow(&t7xx_dev->pdev->dev);
  109. pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
  110. complete_all(&t7xx_dev->init_done);
  111. }
  112. static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
  113. {
  114. /* The device is kept in FSM re-init flow
  115. * so just roll back PM setting to the init setting.
  116. */
  117. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
  118. pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
  119. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
  120. return t7xx_wait_pm_config(t7xx_dev);
  121. }
  122. void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
  123. {
  124. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
  125. t7xx_wait_pm_config(t7xx_dev);
  126. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
  127. }
  128. int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
  129. {
  130. struct md_pm_entity *entity;
  131. mutex_lock(&t7xx_dev->md_pm_entity_mtx);
  132. list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
  133. if (entity->id == pm_entity->id) {
  134. mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
  135. return -EEXIST;
  136. }
  137. }
  138. list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
  139. mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
  140. return 0;
  141. }
  142. int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
  143. {
  144. struct md_pm_entity *entity, *tmp_entity;
  145. mutex_lock(&t7xx_dev->md_pm_entity_mtx);
  146. list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
  147. if (entity->id == pm_entity->id) {
  148. list_del(&pm_entity->entity);
  149. mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
  150. return 0;
  151. }
  152. }
  153. mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
  154. return -ENXIO;
  155. }
  156. int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
  157. {
  158. struct device *dev = &t7xx_dev->pdev->dev;
  159. int ret;
  160. ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
  161. msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
  162. if (!ret)
  163. dev_err_ratelimited(dev, "Resource wait complete timed out\n");
  164. return ret;
  165. }
  166. /**
  167. * t7xx_pci_disable_sleep() - Disable deep sleep capability.
  168. * @t7xx_dev: MTK device.
  169. *
  170. * Lock the deep sleep capability, note that the device can still go into deep sleep
  171. * state while device is in D0 state, from the host's point-of-view.
  172. *
  173. * If device is in deep sleep state, wake up the device and disable deep sleep capability.
  174. */
  175. void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
  176. {
  177. unsigned long flags;
  178. spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
  179. t7xx_dev->sleep_disable_count++;
  180. if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
  181. goto unlock_and_complete;
  182. if (t7xx_dev->sleep_disable_count == 1) {
  183. u32 status;
  184. reinit_completion(&t7xx_dev->sleep_lock_acquire);
  185. t7xx_dev_set_sleep_capability(t7xx_dev, false);
  186. status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
  187. if (status & T7XX_PCIE_RESOURCE_STS_MSK)
  188. goto unlock_and_complete;
  189. t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
  190. }
  191. spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
  192. return;
  193. unlock_and_complete:
  194. spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
  195. complete_all(&t7xx_dev->sleep_lock_acquire);
  196. }
  197. /**
  198. * t7xx_pci_enable_sleep() - Enable deep sleep capability.
  199. * @t7xx_dev: MTK device.
  200. *
  201. * After enabling deep sleep, device can enter into deep sleep state.
  202. */
  203. void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
  204. {
  205. unsigned long flags;
  206. spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
  207. t7xx_dev->sleep_disable_count--;
  208. if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
  209. goto unlock;
  210. if (t7xx_dev->sleep_disable_count == 0)
  211. t7xx_dev_set_sleep_capability(t7xx_dev, true);
  212. unlock:
  213. spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
  214. }
  215. static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
  216. {
  217. unsigned long wait_ret;
  218. reinit_completion(&t7xx_dev->pm_sr_ack);
  219. t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
  220. wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
  221. msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
  222. if (!wait_ret)
  223. return -ETIMEDOUT;
  224. return 0;
  225. }
  226. static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
  227. {
  228. enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
  229. struct t7xx_pci_dev *t7xx_dev;
  230. struct md_pm_entity *entity;
  231. int ret;
  232. t7xx_dev = pci_get_drvdata(pdev);
  233. if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
  234. dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
  235. return -EFAULT;
  236. }
  237. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
  238. ret = t7xx_wait_pm_config(t7xx_dev);
  239. if (ret) {
  240. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
  241. return ret;
  242. }
  243. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
  244. t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
  245. t7xx_dev->rgu_pci_irq_en = false;
  246. list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
  247. if (!entity->suspend)
  248. continue;
  249. ret = entity->suspend(t7xx_dev, entity->entity_param);
  250. if (ret) {
  251. entity_id = entity->id;
  252. dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
  253. goto abort_suspend;
  254. }
  255. }
  256. ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
  257. if (ret) {
  258. dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
  259. goto abort_suspend;
  260. }
  261. ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
  262. if (ret) {
  263. t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
  264. dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
  265. goto abort_suspend;
  266. }
  267. list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
  268. if (entity->suspend_late)
  269. entity->suspend_late(t7xx_dev, entity->entity_param);
  270. }
  271. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
  272. return 0;
  273. abort_suspend:
  274. list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
  275. if (entity_id == entity->id)
  276. break;
  277. if (entity->resume)
  278. entity->resume(t7xx_dev, entity->entity_param);
  279. }
  280. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
  281. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
  282. t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
  283. return ret;
  284. }
  285. static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
  286. {
  287. t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
  288. /* Disable interrupt first and let the IPs enable them */
  289. iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
  290. /* Device disables PCIe interrupts during resume and
  291. * following function will re-enable PCIe interrupts.
  292. */
  293. t7xx_pcie_mac_interrupts_en(t7xx_dev);
  294. t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
  295. }
  296. static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
  297. {
  298. int ret;
  299. ret = pcim_enable_device(t7xx_dev->pdev);
  300. if (ret)
  301. return ret;
  302. t7xx_pcie_mac_atr_init(t7xx_dev);
  303. t7xx_pcie_interrupt_reinit(t7xx_dev);
  304. if (is_d3) {
  305. t7xx_mhccif_init(t7xx_dev);
  306. return t7xx_pci_pm_reinit(t7xx_dev);
  307. }
  308. return 0;
  309. }
  310. static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
  311. {
  312. struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
  313. struct device *dev = &t7xx_dev->pdev->dev;
  314. int ret = -EINVAL;
  315. switch (event) {
  316. case FSM_CMD_STOP:
  317. ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
  318. break;
  319. case FSM_CMD_START:
  320. t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
  321. t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
  322. t7xx_dev->rgu_pci_irq_en = true;
  323. t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
  324. ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
  325. break;
  326. default:
  327. break;
  328. }
  329. if (ret)
  330. dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
  331. return ret;
  332. }
  333. static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
  334. {
  335. struct t7xx_pci_dev *t7xx_dev;
  336. struct md_pm_entity *entity;
  337. u32 prev_state;
  338. int ret = 0;
  339. t7xx_dev = pci_get_drvdata(pdev);
  340. if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
  341. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
  342. return 0;
  343. }
  344. t7xx_pcie_mac_interrupts_en(t7xx_dev);
  345. prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
  346. if (state_check) {
  347. /* For D3/L3 resume, the device could boot so quickly that the
  348. * initial value of the dummy register might be overwritten.
  349. * Identify new boots if the ATR source address register is not initialized.
  350. */
  351. u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
  352. ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
  353. if (prev_state == PM_RESUME_REG_STATE_L3 ||
  354. (prev_state == PM_RESUME_REG_STATE_INIT &&
  355. atr_reg_val == ATR_SRC_ADDR_INVALID)) {
  356. ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
  357. if (ret)
  358. return ret;
  359. ret = t7xx_pcie_reinit(t7xx_dev, true);
  360. if (ret)
  361. return ret;
  362. t7xx_clear_rgu_irq(t7xx_dev);
  363. return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
  364. }
  365. if (prev_state == PM_RESUME_REG_STATE_EXP ||
  366. prev_state == PM_RESUME_REG_STATE_L2_EXP) {
  367. if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
  368. ret = t7xx_pcie_reinit(t7xx_dev, false);
  369. if (ret)
  370. return ret;
  371. }
  372. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
  373. t7xx_dev->rgu_pci_irq_en = true;
  374. t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
  375. t7xx_mhccif_mask_clr(t7xx_dev,
  376. D2H_INT_EXCEPTION_INIT |
  377. D2H_INT_EXCEPTION_INIT_DONE |
  378. D2H_INT_EXCEPTION_CLEARQ_DONE |
  379. D2H_INT_EXCEPTION_ALLQ_RESET |
  380. D2H_INT_PORT_ENUM);
  381. return ret;
  382. }
  383. if (prev_state == PM_RESUME_REG_STATE_L2) {
  384. ret = t7xx_pcie_reinit(t7xx_dev, false);
  385. if (ret)
  386. return ret;
  387. } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
  388. prev_state != PM_RESUME_REG_STATE_INIT) {
  389. ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
  390. if (ret)
  391. return ret;
  392. t7xx_clear_rgu_irq(t7xx_dev);
  393. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
  394. return 0;
  395. }
  396. }
  397. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
  398. t7xx_wait_pm_config(t7xx_dev);
  399. list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
  400. if (entity->resume_early)
  401. entity->resume_early(t7xx_dev, entity->entity_param);
  402. }
  403. ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
  404. if (ret)
  405. dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
  406. ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
  407. if (ret)
  408. dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
  409. list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
  410. if (entity->resume) {
  411. ret = entity->resume(t7xx_dev, entity->entity_param);
  412. if (ret)
  413. dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
  414. entity->id, ret);
  415. }
  416. }
  417. t7xx_dev->rgu_pci_irq_en = true;
  418. t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
  419. iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
  420. pm_runtime_mark_last_busy(&pdev->dev);
  421. atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
  422. return ret;
  423. }
  424. static int t7xx_pci_pm_resume_noirq(struct device *dev)
  425. {
  426. struct pci_dev *pdev = to_pci_dev(dev);
  427. struct t7xx_pci_dev *t7xx_dev;
  428. t7xx_dev = pci_get_drvdata(pdev);
  429. t7xx_pcie_mac_interrupts_dis(t7xx_dev);
  430. return 0;
  431. }
  432. static void t7xx_pci_shutdown(struct pci_dev *pdev)
  433. {
  434. __t7xx_pci_pm_suspend(pdev);
  435. }
  436. static int t7xx_pci_pm_prepare(struct device *dev)
  437. {
  438. struct pci_dev *pdev = to_pci_dev(dev);
  439. struct t7xx_pci_dev *t7xx_dev;
  440. t7xx_dev = pci_get_drvdata(pdev);
  441. if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
  442. dev_warn(dev, "Not ready for system sleep.\n");
  443. return -ETIMEDOUT;
  444. }
  445. return 0;
  446. }
  447. static int t7xx_pci_pm_suspend(struct device *dev)
  448. {
  449. return __t7xx_pci_pm_suspend(to_pci_dev(dev));
  450. }
  451. static int t7xx_pci_pm_resume(struct device *dev)
  452. {
  453. return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
  454. }
  455. static int t7xx_pci_pm_thaw(struct device *dev)
  456. {
  457. return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
  458. }
  459. static int t7xx_pci_pm_runtime_suspend(struct device *dev)
  460. {
  461. return __t7xx_pci_pm_suspend(to_pci_dev(dev));
  462. }
  463. static int t7xx_pci_pm_runtime_resume(struct device *dev)
  464. {
  465. return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
  466. }
  467. static const struct dev_pm_ops t7xx_pci_pm_ops = {
  468. .prepare = t7xx_pci_pm_prepare,
  469. .suspend = t7xx_pci_pm_suspend,
  470. .resume = t7xx_pci_pm_resume,
  471. .resume_noirq = t7xx_pci_pm_resume_noirq,
  472. .freeze = t7xx_pci_pm_suspend,
  473. .thaw = t7xx_pci_pm_thaw,
  474. .poweroff = t7xx_pci_pm_suspend,
  475. .restore = t7xx_pci_pm_resume,
  476. .restore_noirq = t7xx_pci_pm_resume_noirq,
  477. .runtime_suspend = t7xx_pci_pm_runtime_suspend,
  478. .runtime_resume = t7xx_pci_pm_runtime_resume
  479. };
  480. static int t7xx_request_irq(struct pci_dev *pdev)
  481. {
  482. struct t7xx_pci_dev *t7xx_dev;
  483. int ret = 0, i;
  484. t7xx_dev = pci_get_drvdata(pdev);
  485. for (i = 0; i < EXT_INT_NUM; i++) {
  486. const char *irq_descr;
  487. int irq_vec;
  488. if (!t7xx_dev->intr_handler[i])
  489. continue;
  490. irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
  491. dev_driver_string(&pdev->dev), i);
  492. if (!irq_descr) {
  493. ret = -ENOMEM;
  494. break;
  495. }
  496. irq_vec = pci_irq_vector(pdev, i);
  497. ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
  498. t7xx_dev->intr_thread[i], 0, irq_descr,
  499. t7xx_dev->callback_param[i]);
  500. if (ret) {
  501. dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
  502. break;
  503. }
  504. }
  505. if (ret) {
  506. while (i--) {
  507. if (!t7xx_dev->intr_handler[i])
  508. continue;
  509. free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
  510. }
  511. }
  512. return ret;
  513. }
  514. static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
  515. {
  516. struct pci_dev *pdev = t7xx_dev->pdev;
  517. int ret;
  518. /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
  519. ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
  520. if (ret < 0) {
  521. dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
  522. return ret;
  523. }
  524. ret = t7xx_request_irq(pdev);
  525. if (ret) {
  526. pci_free_irq_vectors(pdev);
  527. return ret;
  528. }
  529. t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
  530. return 0;
  531. }
  532. static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
  533. {
  534. int ret, i;
  535. if (!t7xx_dev->pdev->msix_cap)
  536. return -EINVAL;
  537. ret = t7xx_setup_msix(t7xx_dev);
  538. if (ret)
  539. return ret;
  540. /* IPs enable interrupts when ready */
  541. for (i = 0; i < EXT_INT_NUM; i++)
  542. t7xx_pcie_mac_set_int(t7xx_dev, i);
  543. return 0;
  544. }
  545. static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
  546. {
  547. t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
  548. INFRACFG_AO_DEV_CHIP -
  549. t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
  550. }
  551. static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  552. {
  553. struct t7xx_pci_dev *t7xx_dev;
  554. int ret;
  555. t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
  556. if (!t7xx_dev)
  557. return -ENOMEM;
  558. pci_set_drvdata(pdev, t7xx_dev);
  559. t7xx_dev->pdev = pdev;
  560. ret = pcim_enable_device(pdev);
  561. if (ret)
  562. return ret;
  563. pci_set_master(pdev);
  564. ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
  565. pci_name(pdev));
  566. if (ret) {
  567. dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
  568. return -ENOMEM;
  569. }
  570. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  571. if (ret) {
  572. dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
  573. return ret;
  574. }
  575. ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
  576. if (ret) {
  577. dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
  578. return ret;
  579. }
  580. IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
  581. t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
  582. ret = t7xx_pci_pm_init(t7xx_dev);
  583. if (ret)
  584. return ret;
  585. t7xx_pcie_mac_atr_init(t7xx_dev);
  586. t7xx_pci_infracfg_ao_calc(t7xx_dev);
  587. t7xx_mhccif_init(t7xx_dev);
  588. ret = t7xx_md_init(t7xx_dev);
  589. if (ret)
  590. return ret;
  591. t7xx_pcie_mac_interrupts_dis(t7xx_dev);
  592. ret = t7xx_interrupt_init(t7xx_dev);
  593. if (ret) {
  594. t7xx_md_exit(t7xx_dev);
  595. return ret;
  596. }
  597. t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
  598. t7xx_pcie_mac_interrupts_en(t7xx_dev);
  599. return 0;
  600. }
  601. static void t7xx_pci_remove(struct pci_dev *pdev)
  602. {
  603. struct t7xx_pci_dev *t7xx_dev;
  604. int i;
  605. t7xx_dev = pci_get_drvdata(pdev);
  606. t7xx_md_exit(t7xx_dev);
  607. for (i = 0; i < EXT_INT_NUM; i++) {
  608. if (!t7xx_dev->intr_handler[i])
  609. continue;
  610. free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
  611. }
  612. pci_free_irq_vectors(t7xx_dev->pdev);
  613. }
  614. static const struct pci_device_id t7xx_pci_table[] = {
  615. { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
  616. { }
  617. };
  618. MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
  619. static struct pci_driver t7xx_pci_driver = {
  620. .name = "mtk_t7xx",
  621. .id_table = t7xx_pci_table,
  622. .probe = t7xx_pci_probe,
  623. .remove = t7xx_pci_remove,
  624. .driver.pm = &t7xx_pci_pm_ops,
  625. .shutdown = t7xx_pci_shutdown,
  626. };
  627. module_pci_driver(t7xx_pci_driver);
  628. MODULE_AUTHOR("MediaTek Inc");
  629. MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
  630. MODULE_LICENSE("GPL");