pci_qcom.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */
  3. #include "pci_platform.h"
  4. #include "debug.h"
  5. static struct cnss_msi_config msi_config = {
  6. .total_vectors = 32,
  7. .total_users = 4,
  8. .users = (struct cnss_msi_user[]) {
  9. { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
  10. { .name = "CE", .num_vectors = 10, .base_vector = 3 },
  11. { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
  12. { .name = "DP", .num_vectors = 18, .base_vector = 14 },
  13. },
  14. };
  15. int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
  16. {
  17. return msm_pcie_enumerate(rc_num);
  18. }
  19. int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
  20. {
  21. struct pci_dev *pci_dev = pci_priv->pci_dev;
  22. return msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
  23. pci_dev->bus->number, pci_dev, NULL,
  24. PM_OPTIONS_DEFAULT);
  25. }
  26. int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
  27. {
  28. struct pci_dev *pci_dev = pci_priv->pci_dev;
  29. return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
  30. MSM_PCIE_ENABLE_PC,
  31. pci_dev->bus->number, pci_dev, NULL,
  32. PM_OPTIONS_DEFAULT);
  33. }
  34. int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
  35. u16 link_speed, u16 link_width)
  36. {
  37. return msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
  38. link_speed, link_width);
  39. }
  40. int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
  41. u32 rc_num, u16 link_speed)
  42. {
  43. return msm_pcie_set_target_link_speed(rc_num, link_speed, false);
  44. }
  45. /**
  46. * _cnss_pci_prevent_l1() - Prevent PCIe L1 and L1 sub-states
  47. * @pci_priv: driver PCI bus context pointer
  48. *
  49. * This function shall call corresponding PCIe root complex driver APIs
  50. * to prevent PCIe link enter L1 and L1 sub-states. The APIs should also
  51. * bring link out of L1 or L1 sub-states if any and avoid synchronization
  52. * issues if any.
  53. *
  54. * Return: 0 for success, negative value for error
  55. */
  56. static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
  57. {
  58. return msm_pcie_prevent_l1(pci_priv->pci_dev);
  59. }
  60. /**
  61. * _cnss_pci_allow_l1() - Allow PCIe L1 and L1 sub-states
  62. * @pci_priv: driver PCI bus context pointer
  63. *
  64. * This function shall call corresponding PCIe root complex driver APIs
  65. * to allow PCIe link enter L1 and L1 sub-states. The APIs should avoid
  66. * synchronization issues if any.
  67. *
  68. * Return: 0 for success, negative value for error
  69. */
  70. static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv)
  71. {
  72. msm_pcie_allow_l1(pci_priv->pci_dev);
  73. }
  74. /**
  75. * cnss_pci_set_link_up() - Power on or resume PCIe link
  76. * @pci_priv: driver PCI bus context pointer
  77. *
  78. * This function shall call corresponding PCIe root complex driver APIs
  79. * to Power on or resume PCIe link.
  80. *
  81. * Return: 0 for success, negative value for error
  82. */
  83. static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
  84. {
  85. struct pci_dev *pci_dev = pci_priv->pci_dev;
  86. enum msm_pcie_pm_opt pm_ops = MSM_PCIE_RESUME;
  87. u32 pm_options = PM_OPTIONS_DEFAULT;
  88. int ret;
  89. ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
  90. NULL, pm_options);
  91. if (ret)
  92. cnss_pr_err("Failed to resume PCI link with default option, err = %d\n",
  93. ret);
  94. return ret;
  95. }
  96. /**
  97. * cnss_pci_set_link_down() - Power off or suspend PCIe link
  98. * @pci_priv: driver PCI bus context pointer
  99. *
  100. * This function shall call corresponding PCIe root complex driver APIs
  101. * to power off or suspend PCIe link.
  102. *
  103. * Return: 0 for success, negative value for error
  104. */
  105. static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
  106. {
  107. struct pci_dev *pci_dev = pci_priv->pci_dev;
  108. enum msm_pcie_pm_opt pm_ops;
  109. u32 pm_options = PM_OPTIONS_DEFAULT;
  110. int ret;
  111. if (pci_priv->drv_connected_last) {
  112. cnss_pr_vdbg("Use PCIe DRV suspend\n");
  113. pm_ops = MSM_PCIE_DRV_SUSPEND;
  114. } else {
  115. pm_ops = MSM_PCIE_SUSPEND;
  116. }
  117. ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
  118. NULL, pm_options);
  119. if (ret)
  120. cnss_pr_err("Failed to suspend PCI link with default option, err = %d\n",
  121. ret);
  122. return ret;
  123. }
  124. bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
  125. {
  126. struct pci_dev *root_port = pcie_find_root_port(pci_priv->pci_dev);
  127. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  128. struct device_node *root_of_node;
  129. bool drv_supported = false;
  130. if (!root_port) {
  131. cnss_pr_err("PCIe DRV is not supported as root port is null\n");
  132. pci_priv->drv_supported = false;
  133. return drv_supported;
  134. }
  135. root_of_node = root_port->dev.of_node;
  136. if (root_of_node->parent) {
  137. drv_supported = of_property_read_bool(root_of_node->parent,
  138. "qcom,drv-supported") ||
  139. of_property_read_bool(root_of_node->parent,
  140. "qcom,drv-name");
  141. }
  142. cnss_pr_dbg("PCIe DRV is %s\n",
  143. drv_supported ? "supported" : "not supported");
  144. pci_priv->drv_supported = drv_supported;
  145. if (drv_supported) {
  146. plat_priv->cap.cap_flag |= CNSS_HAS_DRV_SUPPORT;
  147. cnss_set_feature_list(plat_priv, CNSS_DRV_SUPPORT_V01);
  148. }
  149. return drv_supported;
  150. }
  151. static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
  152. {
  153. struct pci_dev *pci_dev;
  154. struct cnss_pci_data *pci_priv;
  155. struct device *dev;
  156. struct cnss_plat_data *plat_priv = NULL;
  157. int ret = 0;
  158. if (!notify)
  159. return;
  160. pci_dev = notify->user;
  161. if (!pci_dev)
  162. return;
  163. pci_priv = cnss_get_pci_priv(pci_dev);
  164. if (!pci_priv)
  165. return;
  166. dev = &pci_priv->pci_dev->dev;
  167. switch (notify->event) {
  168. case MSM_PCIE_EVENT_LINK_RECOVER:
  169. cnss_pr_dbg("PCI link recover callback\n");
  170. plat_priv = pci_priv->plat_priv;
  171. if (!plat_priv) {
  172. cnss_pr_err("plat_priv is NULL\n");
  173. return;
  174. }
  175. plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
  176. ret = msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
  177. pci_dev->bus->number, pci_dev, NULL,
  178. PM_OPTIONS_DEFAULT);
  179. if (ret)
  180. cnss_pci_handle_linkdown(pci_priv);
  181. break;
  182. case MSM_PCIE_EVENT_LINKDOWN:
  183. cnss_pr_dbg("PCI link down event callback\n");
  184. cnss_pci_handle_linkdown(pci_priv);
  185. break;
  186. case MSM_PCIE_EVENT_WAKEUP:
  187. if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
  188. cnss_pci_get_auto_suspended(pci_priv)) ||
  189. dev->power.runtime_status == RPM_SUSPENDING) {
  190. cnss_pci_set_monitor_wake_intr(pci_priv, false);
  191. cnss_pci_pm_request_resume(pci_priv);
  192. }
  193. break;
  194. case MSM_PCIE_EVENT_DRV_CONNECT:
  195. cnss_pr_dbg("DRV subsystem is connected\n");
  196. cnss_pci_set_drv_connected(pci_priv, 1);
  197. break;
  198. case MSM_PCIE_EVENT_DRV_DISCONNECT:
  199. cnss_pr_dbg("DRV subsystem is disconnected\n");
  200. if (cnss_pci_get_auto_suspended(pci_priv))
  201. cnss_pci_pm_request_resume(pci_priv);
  202. cnss_pci_set_drv_connected(pci_priv, 0);
  203. break;
  204. default:
  205. cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
  206. }
  207. }
  208. int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
  209. {
  210. int ret = 0;
  211. struct msm_pcie_register_event *pci_event;
  212. pci_event = &pci_priv->msm_pci_event;
  213. pci_event->events = MSM_PCIE_EVENT_LINK_RECOVER |
  214. MSM_PCIE_EVENT_LINKDOWN |
  215. MSM_PCIE_EVENT_WAKEUP;
  216. if (cnss_pci_is_drv_supported(pci_priv))
  217. pci_event->events = pci_event->events |
  218. MSM_PCIE_EVENT_DRV_CONNECT |
  219. MSM_PCIE_EVENT_DRV_DISCONNECT;
  220. pci_event->user = pci_priv->pci_dev;
  221. pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
  222. pci_event->callback = cnss_pci_event_cb;
  223. pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
  224. ret = msm_pcie_register_event(pci_event);
  225. if (ret)
  226. cnss_pr_err("Failed to register MSM PCI event, err = %d\n",
  227. ret);
  228. return ret;
  229. }
  230. void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
  231. {
  232. msm_pcie_deregister_event(&pci_priv->msm_pci_event);
  233. }
  234. int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
  235. bool control)
  236. {
  237. struct pci_dev *pci_dev = pci_priv->pci_dev;
  238. int ret = 0;
  239. u32 pm_options = PM_OPTIONS_DEFAULT;
  240. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  241. if (plat_priv->adsp_pc_enabled == control) {
  242. cnss_pr_dbg("ADSP power collapse already %s\n",
  243. control ? "Enabled" : "Disabled");
  244. return 0;
  245. }
  246. if (control)
  247. pm_options &= ~MSM_PCIE_CONFIG_NO_DRV_PC;
  248. else
  249. pm_options |= MSM_PCIE_CONFIG_NO_DRV_PC;
  250. ret = msm_pcie_pm_control(MSM_PCIE_DRV_PC_CTRL, pci_dev->bus->number,
  251. pci_dev, NULL, pm_options);
  252. if (ret)
  253. return ret;
  254. cnss_pr_dbg("%s ADSP power collapse\n", control ? "Enable" : "Disable");
  255. plat_priv->adsp_pc_enabled = control;
  256. return 0;
  257. }
  258. static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
  259. enum pci_link_status status)
  260. {
  261. u16 link_speed, link_width = pci_priv->def_link_width;
  262. u16 one_lane = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
  263. int ret;
  264. cnss_pr_vdbg("Set PCI link status to: %u\n", status);
  265. switch (status) {
  266. case PCI_GEN1:
  267. link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
  268. if (!link_width)
  269. link_width = one_lane;
  270. break;
  271. case PCI_GEN2:
  272. link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
  273. if (!link_width)
  274. link_width = one_lane;
  275. break;
  276. case PCI_DEF:
  277. link_speed = pci_priv->def_link_speed;
  278. if (!link_speed || !link_width) {
  279. cnss_pr_err("PCI link speed or width is not valid\n");
  280. return -EINVAL;
  281. }
  282. break;
  283. default:
  284. cnss_pr_err("Unknown PCI link status config: %u\n", status);
  285. return -EINVAL;
  286. }
  287. ret = cnss_pci_set_link_bandwidth(pci_priv, link_speed, link_width);
  288. if (!ret)
  289. pci_priv->cur_link_speed = link_speed;
  290. return ret;
  291. }
  292. int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
  293. {
  294. int ret = 0, retry = 0;
  295. cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
  296. if (link_up) {
  297. retry:
  298. ret = cnss_pci_set_link_up(pci_priv);
  299. if (ret && retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
  300. cnss_pr_dbg("Retry PCI link training #%d\n", retry);
  301. if (pci_priv->pci_link_down_ind)
  302. msleep(LINK_TRAINING_RETRY_DELAY_MS * retry);
  303. goto retry;
  304. }
  305. } else {
  306. /* Since DRV suspend cannot be done in Gen 3, set it to
  307. * Gen 2 if current link speed is larger than Gen 2.
  308. */
  309. if (pci_priv->drv_connected_last &&
  310. pci_priv->cur_link_speed > PCI_EXP_LNKSTA_CLS_5_0GB)
  311. cnss_set_pci_link_status(pci_priv, PCI_GEN2);
  312. ret = cnss_pci_set_link_down(pci_priv);
  313. }
  314. if (pci_priv->drv_connected_last) {
  315. if ((link_up && !ret) || (!link_up && ret))
  316. cnss_set_pci_link_status(pci_priv, PCI_DEF);
  317. }
  318. return ret;
  319. }
  320. int cnss_pci_prevent_l1(struct device *dev)
  321. {
  322. struct pci_dev *pci_dev = to_pci_dev(dev);
  323. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  324. int ret;
  325. if (!pci_priv) {
  326. cnss_pr_err("pci_priv is NULL\n");
  327. return -ENODEV;
  328. }
  329. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  330. cnss_pr_dbg("PCIe link is in suspend state\n");
  331. return -EIO;
  332. }
  333. if (pci_priv->pci_link_down_ind) {
  334. cnss_pr_err("PCIe link is down\n");
  335. return -EIO;
  336. }
  337. ret = _cnss_pci_prevent_l1(pci_priv);
  338. if (ret == -EIO) {
  339. cnss_pr_err("Failed to prevent PCIe L1, considered as link down\n");
  340. cnss_pci_link_down(dev);
  341. }
  342. return ret;
  343. }
  344. EXPORT_SYMBOL(cnss_pci_prevent_l1);
  345. void cnss_pci_allow_l1(struct device *dev)
  346. {
  347. struct pci_dev *pci_dev = to_pci_dev(dev);
  348. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  349. if (!pci_priv) {
  350. cnss_pr_err("pci_priv is NULL\n");
  351. return;
  352. }
  353. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  354. cnss_pr_dbg("PCIe link is in suspend state\n");
  355. return;
  356. }
  357. if (pci_priv->pci_link_down_ind) {
  358. cnss_pr_err("PCIe link is down\n");
  359. return;
  360. }
  361. _cnss_pci_allow_l1(pci_priv);
  362. }
  363. EXPORT_SYMBOL(cnss_pci_allow_l1);
  364. int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
  365. {
  366. pci_priv->msi_config = &msi_config;
  367. return 0;
  368. }
  369. static int cnss_pci_smmu_fault_handler(struct iommu_domain *domain,
  370. struct device *dev, unsigned long iova,
  371. int flags, void *handler_token)
  372. {
  373. struct cnss_pci_data *pci_priv = handler_token;
  374. cnss_fatal_err("SMMU fault happened with IOVA 0x%lx\n", iova);
  375. if (!pci_priv) {
  376. cnss_pr_err("pci_priv is NULL\n");
  377. return -ENODEV;
  378. }
  379. cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
  380. cnss_force_fw_assert(&pci_priv->pci_dev->dev);
  381. /* IOMMU driver requires -ENOSYS to print debug info. */
  382. return -ENOSYS;
  383. }
  384. int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
  385. {
  386. struct pci_dev *pci_dev = pci_priv->pci_dev;
  387. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  388. struct device_node *of_node;
  389. struct resource *res;
  390. const char *iommu_dma_type;
  391. u32 addr_win[2];
  392. int ret = 0;
  393. of_node = of_parse_phandle(pci_dev->dev.of_node, "qcom,iommu-group", 0);
  394. if (!of_node)
  395. return ret;
  396. cnss_pr_dbg("Initializing SMMU\n");
  397. pci_priv->iommu_domain = iommu_get_domain_for_dev(&pci_dev->dev);
  398. ret = of_property_read_string(of_node, "qcom,iommu-dma",
  399. &iommu_dma_type);
  400. if (!ret && !strcmp("fastmap", iommu_dma_type)) {
  401. cnss_pr_dbg("Enabling SMMU S1 stage\n");
  402. pci_priv->smmu_s1_enable = true;
  403. iommu_set_fault_handler(pci_priv->iommu_domain,
  404. cnss_pci_smmu_fault_handler, pci_priv);
  405. }
  406. ret = of_property_read_u32_array(of_node, "qcom,iommu-dma-addr-pool",
  407. addr_win, ARRAY_SIZE(addr_win));
  408. if (ret) {
  409. cnss_pr_err("Invalid SMMU size window, err = %d\n", ret);
  410. of_node_put(of_node);
  411. return ret;
  412. }
  413. pci_priv->smmu_iova_start = addr_win[0];
  414. pci_priv->smmu_iova_len = addr_win[1];
  415. cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: 0x%zx\n",
  416. &pci_priv->smmu_iova_start,
  417. pci_priv->smmu_iova_len);
  418. res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
  419. "smmu_iova_ipa");
  420. if (res) {
  421. pci_priv->smmu_iova_ipa_start = res->start;
  422. pci_priv->smmu_iova_ipa_current = res->start;
  423. pci_priv->smmu_iova_ipa_len = resource_size(res);
  424. cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: 0x%zx\n",
  425. &pci_priv->smmu_iova_ipa_start,
  426. pci_priv->smmu_iova_ipa_len);
  427. }
  428. pci_priv->iommu_geometry = of_property_read_bool(of_node,
  429. "qcom,iommu-geometry");
  430. cnss_pr_dbg("iommu_geometry: %d\n", pci_priv->iommu_geometry);
  431. of_node_put(of_node);
  432. return 0;
  433. }
  434. int _cnss_pci_get_reg_dump(struct cnss_pci_data *pci_priv,
  435. u8 *buf, u32 len)
  436. {
  437. return 0;
  438. }
  439. #if IS_ENABLED(CONFIG_ARCH_QCOM)
  440. /**
  441. * cnss_pci_of_reserved_mem_device_init() - Assign reserved memory region
  442. * to given PCI device
  443. * @pci_priv: driver PCI bus context pointer
  444. *
  445. * This function shall call corresponding of_reserved_mem_device* API to
  446. * assign reserved memory region to PCI device based on where the memory is
  447. * defined and attached to (platform device of_node or PCI device of_node)
  448. * in device tree.
  449. *
  450. * Return: 0 for success, negative value for error
  451. */
  452. int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
  453. {
  454. struct device *dev_pci = &pci_priv->pci_dev->dev;
  455. int ret;
  456. /* Use of_reserved_mem_device_init_by_idx() if reserved memory is
  457. * attached to platform device of_node.
  458. */
  459. ret = of_reserved_mem_device_init(dev_pci);
  460. if (ret)
  461. cnss_pr_err("Failed to init reserved mem device, err = %d\n",
  462. ret);
  463. if (dev_pci->cma_area)
  464. cnss_pr_dbg("CMA area is %s\n",
  465. cma_get_name(dev_pci->cma_area));
  466. return ret;
  467. }
  468. int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
  469. {
  470. return 0;
  471. }
  472. void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
  473. {
  474. }
  475. #endif