pci_qcom.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
  3. #include "pci_platform.h"
  4. #include "debug.h"
  5. static struct cnss_msi_config msi_config = {
  6. .total_vectors = 32,
  7. .total_users = MSI_USERS,
  8. .users = (struct cnss_msi_user[]) {
  9. { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
  10. { .name = "CE", .num_vectors = 10, .base_vector = 3 },
  11. { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
  12. { .name = "DP", .num_vectors = 18, .base_vector = 14 },
  13. },
  14. };
  15. #ifdef CONFIG_ONE_MSI_VECTOR
  16. /**
  17. * All the user share the same vector and msi data
  18. * For MHI user, we need pass IRQ array information to MHI component
  19. * MHI_IRQ_NUMBER is defined to specify this MHI IRQ array size
  20. */
  21. #define MHI_IRQ_NUMBER 3
  22. static struct cnss_msi_config msi_config_one_msi = {
  23. .total_vectors = 1,
  24. .total_users = 4,
  25. .users = (struct cnss_msi_user[]) {
  26. { .name = "MHI", .num_vectors = 1, .base_vector = 0 },
  27. { .name = "CE", .num_vectors = 1, .base_vector = 0 },
  28. { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
  29. { .name = "DP", .num_vectors = 1, .base_vector = 0 },
  30. },
  31. };
  32. #endif
  33. int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
  34. {
  35. return msm_pcie_enumerate(rc_num);
  36. }
  37. int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
  38. {
  39. struct pci_dev *pci_dev = pci_priv->pci_dev;
  40. return msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
  41. pci_dev->bus->number, pci_dev, NULL,
  42. PM_OPTIONS_DEFAULT);
  43. }
  44. int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
  45. {
  46. struct pci_dev *pci_dev = pci_priv->pci_dev;
  47. return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
  48. MSM_PCIE_ENABLE_PC,
  49. pci_dev->bus->number, pci_dev, NULL,
  50. PM_OPTIONS_DEFAULT);
  51. }
  52. int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
  53. u16 link_speed, u16 link_width)
  54. {
  55. return msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
  56. link_speed, link_width);
  57. }
  58. int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
  59. u32 rc_num, u16 link_speed)
  60. {
  61. return msm_pcie_set_target_link_speed(rc_num, link_speed, false);
  62. }
  63. /**
  64. * _cnss_pci_prevent_l1() - Prevent PCIe L1 and L1 sub-states
  65. * @pci_priv: driver PCI bus context pointer
  66. *
  67. * This function shall call corresponding PCIe root complex driver APIs
  68. * to prevent PCIe link enter L1 and L1 sub-states. The APIs should also
  69. * bring link out of L1 or L1 sub-states if any and avoid synchronization
  70. * issues if any.
  71. *
  72. * Return: 0 for success, negative value for error
  73. */
  74. static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
  75. {
  76. return msm_pcie_prevent_l1(pci_priv->pci_dev);
  77. }
  78. /**
  79. * _cnss_pci_allow_l1() - Allow PCIe L1 and L1 sub-states
  80. * @pci_priv: driver PCI bus context pointer
  81. *
  82. * This function shall call corresponding PCIe root complex driver APIs
  83. * to allow PCIe link enter L1 and L1 sub-states. The APIs should avoid
  84. * synchronization issues if any.
  85. *
  86. * Return: 0 for success, negative value for error
  87. */
  88. static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv)
  89. {
  90. msm_pcie_allow_l1(pci_priv->pci_dev);
  91. }
  92. /**
  93. * cnss_pci_set_link_up() - Power on or resume PCIe link
  94. * @pci_priv: driver PCI bus context pointer
  95. *
  96. * This function shall call corresponding PCIe root complex driver APIs
  97. * to Power on or resume PCIe link.
  98. *
  99. * Return: 0 for success, negative value for error
  100. */
  101. static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
  102. {
  103. struct pci_dev *pci_dev = pci_priv->pci_dev;
  104. enum msm_pcie_pm_opt pm_ops = MSM_PCIE_RESUME;
  105. u32 pm_options = PM_OPTIONS_DEFAULT;
  106. int ret;
  107. ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
  108. NULL, pm_options);
  109. if (ret)
  110. cnss_pr_err("Failed to resume PCI link with default option, err = %d\n",
  111. ret);
  112. return ret;
  113. }
  114. /**
  115. * cnss_pci_set_link_down() - Power off or suspend PCIe link
  116. * @pci_priv: driver PCI bus context pointer
  117. *
  118. * This function shall call corresponding PCIe root complex driver APIs
  119. * to power off or suspend PCIe link.
  120. *
  121. * Return: 0 for success, negative value for error
  122. */
  123. static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
  124. {
  125. struct pci_dev *pci_dev = pci_priv->pci_dev;
  126. enum msm_pcie_pm_opt pm_ops;
  127. u32 pm_options = PM_OPTIONS_DEFAULT;
  128. int ret;
  129. if (pci_priv->drv_connected_last) {
  130. cnss_pr_vdbg("Use PCIe DRV suspend\n");
  131. pm_ops = MSM_PCIE_DRV_SUSPEND;
  132. } else {
  133. pm_ops = MSM_PCIE_SUSPEND;
  134. }
  135. ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
  136. NULL, pm_options);
  137. if (ret)
  138. cnss_pr_err("Failed to suspend PCI link with default option, err = %d\n",
  139. ret);
  140. return ret;
  141. }
  142. void cnss_pci_update_drv_supported(struct cnss_pci_data *pci_priv)
  143. {
  144. struct pci_dev *root_port = pcie_find_root_port(pci_priv->pci_dev);
  145. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  146. struct device_node *root_of_node;
  147. bool drv_supported = false;
  148. if (!root_port) {
  149. cnss_pr_err("PCIe DRV is not supported as root port is null\n");
  150. pci_priv->drv_supported = false;
  151. return;
  152. }
  153. root_of_node = root_port->dev.of_node;
  154. if (root_of_node->parent) {
  155. drv_supported = of_property_read_bool(root_of_node->parent,
  156. "qcom,drv-supported") ||
  157. of_property_read_bool(root_of_node->parent,
  158. "qcom,drv-name");
  159. }
  160. cnss_pr_dbg("PCIe DRV is %s\n",
  161. drv_supported ? "supported" : "not supported");
  162. pci_priv->drv_supported = drv_supported;
  163. if (drv_supported) {
  164. plat_priv->cap.cap_flag |= CNSS_HAS_DRV_SUPPORT;
  165. cnss_set_feature_list(plat_priv, CNSS_DRV_SUPPORT_V01);
  166. }
  167. }
  168. static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
  169. {
  170. struct pci_dev *pci_dev;
  171. struct cnss_pci_data *pci_priv;
  172. struct device *dev;
  173. struct cnss_plat_data *plat_priv = NULL;
  174. int ret = 0;
  175. if (!notify)
  176. return;
  177. pci_dev = notify->user;
  178. if (!pci_dev)
  179. return;
  180. pci_priv = cnss_get_pci_priv(pci_dev);
  181. if (!pci_priv)
  182. return;
  183. dev = &pci_priv->pci_dev->dev;
  184. switch (notify->event) {
  185. case MSM_PCIE_EVENT_LINK_RECOVER:
  186. cnss_pr_dbg("PCI link recover callback\n");
  187. plat_priv = pci_priv->plat_priv;
  188. if (!plat_priv) {
  189. cnss_pr_err("plat_priv is NULL\n");
  190. return;
  191. }
  192. plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
  193. ret = msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
  194. pci_dev->bus->number, pci_dev, NULL,
  195. PM_OPTIONS_DEFAULT);
  196. if (ret)
  197. cnss_pci_handle_linkdown(pci_priv);
  198. break;
  199. case MSM_PCIE_EVENT_LINKDOWN:
  200. cnss_pr_dbg("PCI link down event callback\n");
  201. cnss_pci_handle_linkdown(pci_priv);
  202. break;
  203. case MSM_PCIE_EVENT_WAKEUP:
  204. cnss_pr_dbg("PCI Wake up event callback\n");
  205. if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
  206. cnss_pci_get_auto_suspended(pci_priv)) ||
  207. dev->power.runtime_status == RPM_SUSPENDING) {
  208. cnss_pci_set_monitor_wake_intr(pci_priv, false);
  209. cnss_pci_pm_request_resume(pci_priv);
  210. }
  211. complete(&pci_priv->wake_event_complete);
  212. break;
  213. case MSM_PCIE_EVENT_DRV_CONNECT:
  214. cnss_pr_dbg("DRV subsystem is connected\n");
  215. cnss_pci_set_drv_connected(pci_priv, 1);
  216. break;
  217. case MSM_PCIE_EVENT_DRV_DISCONNECT:
  218. cnss_pr_dbg("DRV subsystem is disconnected\n");
  219. if (cnss_pci_get_auto_suspended(pci_priv))
  220. cnss_pci_pm_request_resume(pci_priv);
  221. cnss_pci_set_drv_connected(pci_priv, 0);
  222. break;
  223. default:
  224. cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
  225. }
  226. }
  227. int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
  228. {
  229. int ret = 0;
  230. struct msm_pcie_register_event *pci_event;
  231. pci_event = &pci_priv->msm_pci_event;
  232. pci_event->events = MSM_PCIE_EVENT_LINK_RECOVER |
  233. MSM_PCIE_EVENT_LINKDOWN |
  234. MSM_PCIE_EVENT_WAKEUP;
  235. if (cnss_pci_get_drv_supported(pci_priv))
  236. pci_event->events = pci_event->events |
  237. MSM_PCIE_EVENT_DRV_CONNECT |
  238. MSM_PCIE_EVENT_DRV_DISCONNECT;
  239. pci_event->user = pci_priv->pci_dev;
  240. pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
  241. pci_event->callback = cnss_pci_event_cb;
  242. pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
  243. ret = msm_pcie_register_event(pci_event);
  244. if (ret)
  245. cnss_pr_err("Failed to register MSM PCI event, err = %d\n",
  246. ret);
  247. return ret;
  248. }
  249. void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
  250. {
  251. msm_pcie_deregister_event(&pci_priv->msm_pci_event);
  252. }
  253. int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
  254. bool control)
  255. {
  256. struct pci_dev *pci_dev = pci_priv->pci_dev;
  257. int ret = 0;
  258. u32 pm_options = PM_OPTIONS_DEFAULT;
  259. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  260. if (!cnss_pci_get_drv_supported(pci_priv))
  261. return 0;
  262. if (plat_priv->adsp_pc_enabled == control) {
  263. cnss_pr_dbg("ADSP power collapse already %s\n",
  264. control ? "Enabled" : "Disabled");
  265. return 0;
  266. }
  267. if (control)
  268. pm_options &= ~MSM_PCIE_CONFIG_NO_DRV_PC;
  269. else
  270. pm_options |= MSM_PCIE_CONFIG_NO_DRV_PC;
  271. ret = msm_pcie_pm_control(MSM_PCIE_DRV_PC_CTRL, pci_dev->bus->number,
  272. pci_dev, NULL, pm_options);
  273. if (ret)
  274. return ret;
  275. cnss_pr_dbg("%s ADSP power collapse\n", control ? "Enable" : "Disable");
  276. plat_priv->adsp_pc_enabled = control;
  277. return 0;
  278. }
  279. static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
  280. enum pci_link_status status)
  281. {
  282. u16 link_speed, link_width = pci_priv->def_link_width;
  283. u16 one_lane = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
  284. int ret;
  285. cnss_pr_vdbg("Set PCI link status to: %u\n", status);
  286. switch (status) {
  287. case PCI_GEN1:
  288. link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
  289. if (!link_width)
  290. link_width = one_lane;
  291. break;
  292. case PCI_GEN2:
  293. link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
  294. if (!link_width)
  295. link_width = one_lane;
  296. break;
  297. case PCI_DEF:
  298. link_speed = pci_priv->def_link_speed;
  299. if (!link_speed || !link_width) {
  300. cnss_pr_err("PCI link speed or width is not valid\n");
  301. return -EINVAL;
  302. }
  303. break;
  304. default:
  305. cnss_pr_err("Unknown PCI link status config: %u\n", status);
  306. return -EINVAL;
  307. }
  308. ret = cnss_pci_set_link_bandwidth(pci_priv, link_speed, link_width);
  309. if (!ret)
  310. pci_priv->cur_link_speed = link_speed;
  311. return ret;
  312. }
  313. int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
  314. {
  315. int ret = 0, retry = 0;
  316. struct cnss_plat_data *plat_priv;
  317. int sw_ctrl_gpio;
  318. plat_priv = pci_priv->plat_priv;
  319. sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
  320. cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
  321. if (link_up) {
  322. retry:
  323. ret = cnss_pci_set_link_up(pci_priv);
  324. if (ret && retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
  325. cnss_pr_dbg("Retry PCI link training #%d\n", retry);
  326. cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
  327. cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
  328. if (pci_priv->pci_link_down_ind)
  329. msleep(LINK_TRAINING_RETRY_DELAY_MS * retry);
  330. goto retry;
  331. }
  332. } else {
  333. /* Since DRV suspend cannot be done in Gen 3, set it to
  334. * Gen 2 if current link speed is larger than Gen 2.
  335. */
  336. if (pci_priv->drv_connected_last &&
  337. pci_priv->cur_link_speed > PCI_EXP_LNKSTA_CLS_5_0GB)
  338. cnss_set_pci_link_status(pci_priv, PCI_GEN2);
  339. ret = cnss_pci_set_link_down(pci_priv);
  340. }
  341. if (pci_priv->drv_connected_last) {
  342. if ((link_up && !ret) || (!link_up && ret))
  343. cnss_set_pci_link_status(pci_priv, PCI_DEF);
  344. }
  345. return ret;
  346. }
  347. int cnss_pci_prevent_l1(struct device *dev)
  348. {
  349. struct pci_dev *pci_dev = to_pci_dev(dev);
  350. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  351. int ret;
  352. if (!pci_priv) {
  353. cnss_pr_err("pci_priv is NULL\n");
  354. return -ENODEV;
  355. }
  356. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  357. cnss_pr_dbg("PCIe link is in suspend state\n");
  358. return -EIO;
  359. }
  360. if (pci_priv->pci_link_down_ind) {
  361. cnss_pr_err("PCIe link is down\n");
  362. return -EIO;
  363. }
  364. ret = _cnss_pci_prevent_l1(pci_priv);
  365. if (ret == -EIO) {
  366. cnss_pr_err("Failed to prevent PCIe L1, considered as link down\n");
  367. cnss_pci_link_down(dev);
  368. }
  369. return ret;
  370. }
  371. EXPORT_SYMBOL(cnss_pci_prevent_l1);
  372. void cnss_pci_allow_l1(struct device *dev)
  373. {
  374. struct pci_dev *pci_dev = to_pci_dev(dev);
  375. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  376. if (!pci_priv) {
  377. cnss_pr_err("pci_priv is NULL\n");
  378. return;
  379. }
  380. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  381. cnss_pr_dbg("PCIe link is in suspend state\n");
  382. return;
  383. }
  384. if (pci_priv->pci_link_down_ind) {
  385. cnss_pr_err("PCIe link is down\n");
  386. return;
  387. }
  388. _cnss_pci_allow_l1(pci_priv);
  389. }
  390. EXPORT_SYMBOL(cnss_pci_allow_l1);
  391. int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
  392. {
  393. pci_priv->msi_config = &msi_config;
  394. return 0;
  395. }
  396. #ifdef CONFIG_ONE_MSI_VECTOR
  397. int cnss_pci_get_one_msi_assignment(struct cnss_pci_data *pci_priv)
  398. {
  399. pci_priv->msi_config = &msi_config_one_msi;
  400. return 0;
  401. }
  402. bool cnss_pci_fallback_one_msi(struct cnss_pci_data *pci_priv,
  403. int *num_vectors)
  404. {
  405. struct pci_dev *pci_dev = pci_priv->pci_dev;
  406. struct cnss_msi_config *msi_config;
  407. cnss_pci_get_one_msi_assignment(pci_priv);
  408. msi_config = pci_priv->msi_config;
  409. if (!msi_config) {
  410. cnss_pr_err("one msi_config is NULL!\n");
  411. return false;
  412. }
  413. *num_vectors = pci_alloc_irq_vectors(pci_dev,
  414. msi_config->total_vectors,
  415. msi_config->total_vectors,
  416. PCI_IRQ_MSI);
  417. if (*num_vectors < 0) {
  418. cnss_pr_err("Failed to get one MSI vector!\n");
  419. return false;
  420. }
  421. cnss_pr_dbg("request MSI one vector\n");
  422. return true;
  423. }
  424. bool cnss_pci_is_one_msi(struct cnss_pci_data *pci_priv)
  425. {
  426. return pci_priv && pci_priv->msi_config &&
  427. (pci_priv->msi_config->total_vectors == 1);
  428. }
  429. int cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data *pci_priv)
  430. {
  431. return MHI_IRQ_NUMBER;
  432. }
  433. bool cnss_pci_is_force_one_msi(struct cnss_pci_data *pci_priv)
  434. {
  435. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  436. return test_bit(FORCE_ONE_MSI, &plat_priv->ctrl_params.quirks);
  437. }
  438. #else
  439. int cnss_pci_get_one_msi_assignment(struct cnss_pci_data *pci_priv)
  440. {
  441. return 0;
  442. }
  443. bool cnss_pci_fallback_one_msi(struct cnss_pci_data *pci_priv,
  444. int *num_vectors)
  445. {
  446. return false;
  447. }
  448. bool cnss_pci_is_one_msi(struct cnss_pci_data *pci_priv)
  449. {
  450. return false;
  451. }
  452. int cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data *pci_priv)
  453. {
  454. return 0;
  455. }
  456. bool cnss_pci_is_force_one_msi(struct cnss_pci_data *pci_priv)
  457. {
  458. return false;
  459. }
  460. #endif
  461. static int cnss_pci_smmu_fault_handler(struct iommu_domain *domain,
  462. struct device *dev, unsigned long iova,
  463. int flags, void *handler_token)
  464. {
  465. struct cnss_pci_data *pci_priv = handler_token;
  466. cnss_fatal_err("SMMU fault happened with IOVA 0x%lx\n", iova);
  467. if (!pci_priv) {
  468. cnss_pr_err("pci_priv is NULL\n");
  469. return -ENODEV;
  470. }
  471. pci_priv->is_smmu_fault = true;
  472. cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
  473. cnss_force_fw_assert(&pci_priv->pci_dev->dev);
  474. /* IOMMU driver requires -ENOSYS to print debug info. */
  475. return -ENOSYS;
  476. }
  477. int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
  478. {
  479. struct pci_dev *pci_dev = pci_priv->pci_dev;
  480. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  481. struct device_node *of_node;
  482. struct resource *res;
  483. const char *iommu_dma_type;
  484. u32 addr_win[2];
  485. int ret = 0;
  486. of_node = of_parse_phandle(pci_dev->dev.of_node, "qcom,iommu-group", 0);
  487. if (!of_node)
  488. return ret;
  489. cnss_pr_dbg("Initializing SMMU\n");
  490. pci_priv->iommu_domain = iommu_get_domain_for_dev(&pci_dev->dev);
  491. ret = of_property_read_string(of_node, "qcom,iommu-dma",
  492. &iommu_dma_type);
  493. if (!ret && !strcmp("fastmap", iommu_dma_type)) {
  494. cnss_pr_dbg("Enabling SMMU S1 stage\n");
  495. pci_priv->smmu_s1_enable = true;
  496. iommu_set_fault_handler(pci_priv->iommu_domain,
  497. cnss_pci_smmu_fault_handler, pci_priv);
  498. cnss_register_iommu_fault_handler_irq(pci_priv);
  499. }
  500. ret = of_property_read_u32_array(of_node, "qcom,iommu-dma-addr-pool",
  501. addr_win, ARRAY_SIZE(addr_win));
  502. if (ret) {
  503. cnss_pr_err("Invalid SMMU size window, err = %d\n", ret);
  504. of_node_put(of_node);
  505. return ret;
  506. }
  507. pci_priv->smmu_iova_start = addr_win[0];
  508. pci_priv->smmu_iova_len = addr_win[1];
  509. cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: 0x%zx\n",
  510. &pci_priv->smmu_iova_start,
  511. pci_priv->smmu_iova_len);
  512. res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
  513. "smmu_iova_ipa");
  514. if (res) {
  515. pci_priv->smmu_iova_ipa_start = res->start;
  516. pci_priv->smmu_iova_ipa_current = res->start;
  517. pci_priv->smmu_iova_ipa_len = resource_size(res);
  518. cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: 0x%zx\n",
  519. &pci_priv->smmu_iova_ipa_start,
  520. pci_priv->smmu_iova_ipa_len);
  521. }
  522. pci_priv->iommu_geometry = of_property_read_bool(of_node,
  523. "qcom,iommu-geometry");
  524. cnss_pr_dbg("iommu_geometry: %d\n", pci_priv->iommu_geometry);
  525. of_node_put(of_node);
  526. return 0;
  527. }
  528. int _cnss_pci_get_reg_dump(struct cnss_pci_data *pci_priv,
  529. u8 *buf, u32 len)
  530. {
  531. return msm_pcie_reg_dump(pci_priv->pci_dev, buf, len);
  532. }
  533. #if IS_ENABLED(CONFIG_ARCH_QCOM)
  534. /**
  535. * cnss_pci_of_reserved_mem_device_init() - Assign reserved memory region
  536. * to given PCI device
  537. * @pci_priv: driver PCI bus context pointer
  538. *
  539. * This function shall call corresponding of_reserved_mem_device* API to
  540. * assign reserved memory region to PCI device based on where the memory is
  541. * defined and attached to (platform device of_node or PCI device of_node)
  542. * in device tree.
  543. *
  544. * Return: 0 for success, negative value for error
  545. */
  546. int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
  547. {
  548. struct device *dev_pci = &pci_priv->pci_dev->dev;
  549. int ret;
  550. /* Use of_reserved_mem_device_init_by_idx() if reserved memory is
  551. * attached to platform device of_node.
  552. */
  553. ret = of_reserved_mem_device_init(dev_pci);
  554. if (ret) {
  555. if (ret == -EINVAL)
  556. cnss_pr_vdbg("Ignore, no specific reserved-memory assigned\n");
  557. else
  558. cnss_pr_err("Failed to init reserved mem device, err = %d\n",
  559. ret);
  560. }
  561. if (dev_pci->cma_area)
  562. cnss_pr_dbg("CMA area is %s\n",
  563. cma_get_name(dev_pci->cma_area));
  564. return ret;
  565. }
  566. int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
  567. {
  568. return 0;
  569. }
  570. void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
  571. {
  572. }
  573. #endif