pci_qcom.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
  3. #include "pci_platform.h"
  4. #include "debug.h"
  5. static struct cnss_msi_config msi_config = {
  6. .total_vectors = 32,
  7. .total_users = MSI_USERS,
  8. .users = (struct cnss_msi_user[]) {
  9. { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
  10. { .name = "CE", .num_vectors = 10, .base_vector = 3 },
  11. { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
  12. { .name = "DP", .num_vectors = 18, .base_vector = 14 },
  13. },
  14. };
  15. #ifdef CONFIG_ONE_MSI_VECTOR
  16. /**
  17. * All the user share the same vector and msi data
  18. * For MHI user, we need pass IRQ array information to MHI component
  19. * MHI_IRQ_NUMBER is defined to specify this MHI IRQ array size
  20. */
  21. #define MHI_IRQ_NUMBER 3
  22. static struct cnss_msi_config msi_config_one_msi = {
  23. .total_vectors = 1,
  24. .total_users = 4,
  25. .users = (struct cnss_msi_user[]) {
  26. { .name = "MHI", .num_vectors = 1, .base_vector = 0 },
  27. { .name = "CE", .num_vectors = 1, .base_vector = 0 },
  28. { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
  29. { .name = "DP", .num_vectors = 1, .base_vector = 0 },
  30. },
  31. };
  32. #endif
  33. #define ENUM_RETRY_MAX_TIMES 8
  34. #define ENUM_RETRY_DELAY_MS 500
  35. int _cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
  36. {
  37. u32 retry = 0;
  38. int ret;
  39. if (plat_priv->pcie_switch_type == PCIE_SWITCH_NTN3) {
  40. while (retry++ < ENUM_RETRY_MAX_TIMES) {
  41. ret = msm_pcie_enumerate(rc_num);
  42. /* For PCIe switch platform, cnss_probe may called
  43. * before PCIe switch hardware ready, wait for
  44. * msm_pcie_enumerate complete.
  45. */
  46. if (ret == -EPROBE_DEFER) {
  47. cnss_pr_dbg("PCIe RC%d not ready, retry:%dth\n",
  48. rc_num, retry);
  49. msleep(ENUM_RETRY_DELAY_MS);
  50. }
  51. }
  52. } else {
  53. return msm_pcie_enumerate(rc_num);
  54. }
  55. return ret;
  56. }
  57. int cnss_pci_assert_perst(struct cnss_pci_data *pci_priv)
  58. {
  59. struct pci_dev *pci_dev = pci_priv->pci_dev;
  60. return msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
  61. pci_dev->bus->number, pci_dev, NULL,
  62. PM_OPTIONS_DEFAULT);
  63. }
  64. int cnss_pci_disable_pc(struct cnss_pci_data *pci_priv, bool vote)
  65. {
  66. struct pci_dev *pci_dev = pci_priv->pci_dev;
  67. return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
  68. MSM_PCIE_ENABLE_PC,
  69. pci_dev->bus->number, pci_dev, NULL,
  70. PM_OPTIONS_DEFAULT);
  71. }
  72. int cnss_pci_set_link_bandwidth(struct cnss_pci_data *pci_priv,
  73. u16 link_speed, u16 link_width)
  74. {
  75. return msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
  76. link_speed, link_width);
  77. }
  78. int cnss_pci_set_max_link_speed(struct cnss_pci_data *pci_priv,
  79. u32 rc_num, u16 link_speed)
  80. {
  81. return msm_pcie_set_target_link_speed(rc_num, link_speed, false);
  82. }
  83. /**
  84. * _cnss_pci_prevent_l1() - Prevent PCIe L1 and L1 sub-states
  85. * @pci_priv: driver PCI bus context pointer
  86. *
  87. * This function shall call corresponding PCIe root complex driver APIs
  88. * to prevent PCIe link enter L1 and L1 sub-states. The APIs should also
  89. * bring link out of L1 or L1 sub-states if any and avoid synchronization
  90. * issues if any.
  91. *
  92. * Return: 0 for success, negative value for error
  93. */
  94. static int _cnss_pci_prevent_l1(struct cnss_pci_data *pci_priv)
  95. {
  96. return msm_pcie_prevent_l1(pci_priv->pci_dev);
  97. }
  98. /**
  99. * _cnss_pci_allow_l1() - Allow PCIe L1 and L1 sub-states
  100. * @pci_priv: driver PCI bus context pointer
  101. *
  102. * This function shall call corresponding PCIe root complex driver APIs
  103. * to allow PCIe link enter L1 and L1 sub-states. The APIs should avoid
  104. * synchronization issues if any.
  105. *
  106. * Return: 0 for success, negative value for error
  107. */
  108. static void _cnss_pci_allow_l1(struct cnss_pci_data *pci_priv)
  109. {
  110. msm_pcie_allow_l1(pci_priv->pci_dev);
  111. }
  112. /**
  113. * cnss_pci_set_link_up() - Power on or resume PCIe link
  114. * @pci_priv: driver PCI bus context pointer
  115. *
  116. * This function shall call corresponding PCIe root complex driver APIs
  117. * to Power on or resume PCIe link.
  118. *
  119. * Return: 0 for success, negative value for error
  120. */
  121. static int cnss_pci_set_link_up(struct cnss_pci_data *pci_priv)
  122. {
  123. struct pci_dev *pci_dev = pci_priv->pci_dev;
  124. enum msm_pcie_pm_opt pm_ops = MSM_PCIE_RESUME;
  125. u32 pm_options = PM_OPTIONS_DEFAULT;
  126. int ret;
  127. ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
  128. NULL, pm_options);
  129. if (ret)
  130. cnss_pr_err("Failed to resume PCI link with default option, err = %d\n",
  131. ret);
  132. return ret;
  133. }
  134. /**
  135. * cnss_pci_set_link_down() - Power off or suspend PCIe link
  136. * @pci_priv: driver PCI bus context pointer
  137. *
  138. * This function shall call corresponding PCIe root complex driver APIs
  139. * to power off or suspend PCIe link.
  140. *
  141. * Return: 0 for success, negative value for error
  142. */
  143. static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
  144. {
  145. struct pci_dev *pci_dev = pci_priv->pci_dev;
  146. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  147. enum msm_pcie_pm_opt pm_ops;
  148. u32 pm_options = PM_OPTIONS_DEFAULT;
  149. int ret;
  150. if (pci_priv->drv_connected_last) {
  151. cnss_pr_vdbg("Use PCIe DRV suspend\n");
  152. pm_ops = MSM_PCIE_DRV_SUSPEND;
  153. } else {
  154. if (plat_priv && PCIE_SWITCH_NTN3 == plat_priv->pcie_switch_type) {
  155. cnss_pr_dbg("Skip suspend from client side for pcie switch case\n");
  156. return 0;
  157. }
  158. pm_ops = MSM_PCIE_SUSPEND;
  159. }
  160. ret = msm_pcie_pm_control(pm_ops, pci_dev->bus->number, pci_dev,
  161. NULL, pm_options);
  162. if (ret)
  163. cnss_pr_err("Failed to suspend PCI link with default option, err = %d\n",
  164. ret);
  165. return ret;
  166. }
  167. void cnss_pci_update_drv_supported(struct cnss_pci_data *pci_priv)
  168. {
  169. struct pci_dev *root_port = pcie_find_root_port(pci_priv->pci_dev);
  170. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  171. struct device_node *root_of_node;
  172. bool drv_supported = false;
  173. if (!root_port) {
  174. cnss_pr_err("PCIe DRV is not supported as root port is null\n");
  175. pci_priv->drv_supported = false;
  176. return;
  177. }
  178. root_of_node = root_port->dev.of_node;
  179. if (root_of_node->parent) {
  180. drv_supported = of_property_read_bool(root_of_node->parent,
  181. "qcom,drv-supported") ||
  182. of_property_read_bool(root_of_node->parent,
  183. "qcom,drv-name");
  184. }
  185. cnss_pr_dbg("PCIe DRV is %s\n",
  186. drv_supported ? "supported" : "not supported");
  187. pci_priv->drv_supported = drv_supported;
  188. if (drv_supported) {
  189. plat_priv->cap.cap_flag |= CNSS_HAS_DRV_SUPPORT;
  190. cnss_set_feature_list(plat_priv, CNSS_DRV_SUPPORT_V01);
  191. }
  192. }
  193. static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
  194. {
  195. struct pci_dev *pci_dev;
  196. struct cnss_pci_data *pci_priv;
  197. struct device *dev;
  198. struct cnss_plat_data *plat_priv = NULL;
  199. int ret = 0;
  200. if (!notify)
  201. return;
  202. pci_dev = notify->user;
  203. if (!pci_dev)
  204. return;
  205. pci_priv = cnss_get_pci_priv(pci_dev);
  206. if (!pci_priv)
  207. return;
  208. dev = &pci_priv->pci_dev->dev;
  209. switch (notify->event) {
  210. case MSM_PCIE_EVENT_LINK_RECOVER:
  211. cnss_pr_dbg("PCI link recover callback\n");
  212. plat_priv = pci_priv->plat_priv;
  213. if (!plat_priv) {
  214. cnss_pr_err("plat_priv is NULL\n");
  215. return;
  216. }
  217. plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
  218. ret = msm_pcie_pm_control(MSM_PCIE_HANDLE_LINKDOWN,
  219. pci_dev->bus->number, pci_dev, NULL,
  220. PM_OPTIONS_DEFAULT);
  221. if (ret)
  222. cnss_pci_handle_linkdown(pci_priv);
  223. break;
  224. case MSM_PCIE_EVENT_LINKDOWN:
  225. cnss_pr_dbg("PCI link down event callback\n");
  226. cnss_pci_handle_linkdown(pci_priv);
  227. break;
  228. case MSM_PCIE_EVENT_WAKEUP:
  229. cnss_pr_dbg("PCI Wake up event callback\n");
  230. if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
  231. cnss_pci_get_auto_suspended(pci_priv)) ||
  232. dev->power.runtime_status == RPM_SUSPENDING) {
  233. cnss_pci_set_monitor_wake_intr(pci_priv, false);
  234. cnss_pci_pm_request_resume(pci_priv);
  235. }
  236. complete(&pci_priv->wake_event_complete);
  237. break;
  238. case MSM_PCIE_EVENT_DRV_CONNECT:
  239. cnss_pr_dbg("DRV subsystem is connected\n");
  240. cnss_pci_set_drv_connected(pci_priv, 1);
  241. break;
  242. case MSM_PCIE_EVENT_DRV_DISCONNECT:
  243. cnss_pr_dbg("DRV subsystem is disconnected\n");
  244. if (cnss_pci_get_auto_suspended(pci_priv))
  245. cnss_pci_pm_request_resume(pci_priv);
  246. cnss_pci_set_drv_connected(pci_priv, 0);
  247. break;
  248. default:
  249. cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
  250. }
  251. }
  252. int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
  253. {
  254. int ret = 0;
  255. struct msm_pcie_register_event *pci_event;
  256. pci_event = &pci_priv->msm_pci_event;
  257. pci_event->events = MSM_PCIE_EVENT_LINK_RECOVER |
  258. MSM_PCIE_EVENT_LINKDOWN |
  259. MSM_PCIE_EVENT_WAKEUP;
  260. if (cnss_pci_get_drv_supported(pci_priv))
  261. pci_event->events = pci_event->events |
  262. MSM_PCIE_EVENT_DRV_CONNECT |
  263. MSM_PCIE_EVENT_DRV_DISCONNECT;
  264. pci_event->user = pci_priv->pci_dev;
  265. pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
  266. pci_event->callback = cnss_pci_event_cb;
  267. pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
  268. ret = msm_pcie_register_event(pci_event);
  269. if (ret)
  270. cnss_pr_err("Failed to register MSM PCI event, err = %d\n",
  271. ret);
  272. return ret;
  273. }
  274. void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
  275. {
  276. msm_pcie_deregister_event(&pci_priv->msm_pci_event);
  277. }
  278. int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
  279. bool control)
  280. {
  281. struct pci_dev *pci_dev = pci_priv->pci_dev;
  282. int ret = 0;
  283. u32 pm_options = PM_OPTIONS_DEFAULT;
  284. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  285. if (!cnss_pci_get_drv_supported(pci_priv))
  286. return 0;
  287. if (plat_priv->adsp_pc_enabled == control) {
  288. cnss_pr_dbg("ADSP power collapse already %s\n",
  289. control ? "Enabled" : "Disabled");
  290. return 0;
  291. }
  292. if (control)
  293. pm_options &= ~MSM_PCIE_CONFIG_NO_DRV_PC;
  294. else
  295. pm_options |= MSM_PCIE_CONFIG_NO_DRV_PC;
  296. ret = msm_pcie_pm_control(MSM_PCIE_DRV_PC_CTRL, pci_dev->bus->number,
  297. pci_dev, NULL, pm_options);
  298. if (ret)
  299. return ret;
  300. cnss_pr_dbg("%s ADSP power collapse\n", control ? "Enable" : "Disable");
  301. plat_priv->adsp_pc_enabled = control;
  302. return 0;
  303. }
  304. static int cnss_set_pci_link_status(struct cnss_pci_data *pci_priv,
  305. enum pci_link_status status)
  306. {
  307. u16 link_speed, link_width = pci_priv->def_link_width;
  308. u16 one_lane = PCI_EXP_LNKSTA_NLW_X1 >> PCI_EXP_LNKSTA_NLW_SHIFT;
  309. int ret;
  310. cnss_pr_vdbg("Set PCI link status to: %u\n", status);
  311. switch (status) {
  312. case PCI_GEN1:
  313. link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
  314. if (!link_width)
  315. link_width = one_lane;
  316. break;
  317. case PCI_GEN2:
  318. link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
  319. if (!link_width)
  320. link_width = one_lane;
  321. break;
  322. case PCI_DEF:
  323. link_speed = pci_priv->def_link_speed;
  324. if (!link_speed || !link_width) {
  325. cnss_pr_err("PCI link speed or width is not valid\n");
  326. return -EINVAL;
  327. }
  328. break;
  329. default:
  330. cnss_pr_err("Unknown PCI link status config: %u\n", status);
  331. return -EINVAL;
  332. }
  333. ret = cnss_pci_set_link_bandwidth(pci_priv, link_speed, link_width);
  334. if (!ret)
  335. pci_priv->cur_link_speed = link_speed;
  336. return ret;
  337. }
  338. int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
  339. {
  340. int ret = 0, retry = 0;
  341. struct cnss_plat_data *plat_priv;
  342. int sw_ctrl_gpio;
  343. plat_priv = pci_priv->plat_priv;
  344. sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
  345. cnss_pr_vdbg("%s PCI link\n", link_up ? "Resuming" : "Suspending");
  346. if (link_up) {
  347. retry:
  348. ret = cnss_pci_set_link_up(pci_priv);
  349. if (ret && retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
  350. cnss_pr_dbg("Retry PCI link training #%d\n", retry);
  351. cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
  352. cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
  353. if (pci_priv->pci_link_down_ind)
  354. msleep(LINK_TRAINING_RETRY_DELAY_MS * retry);
  355. goto retry;
  356. }
  357. } else {
  358. /* Since DRV suspend cannot be done in Gen 3, set it to
  359. * Gen 2 if current link speed is larger than Gen 2.
  360. */
  361. if (pci_priv->drv_connected_last &&
  362. pci_priv->cur_link_speed > PCI_EXP_LNKSTA_CLS_5_0GB)
  363. cnss_set_pci_link_status(pci_priv, PCI_GEN2);
  364. ret = cnss_pci_set_link_down(pci_priv);
  365. }
  366. if (pci_priv->drv_connected_last) {
  367. if ((link_up && !ret) || (!link_up && ret))
  368. cnss_set_pci_link_status(pci_priv, PCI_DEF);
  369. }
  370. return ret;
  371. }
  372. #ifdef CONFIG_PCIE_SWITCH_SUPPORT
  373. int cnss_pci_dsp_link_control(struct cnss_pci_data *pci_priv,
  374. bool link_enable)
  375. {
  376. if (!pci_priv)
  377. return -ENODEV;
  378. pci_priv->pci_dsp_link_status = link_enable;
  379. return msm_pcie_dsp_link_control(pci_priv->pci_dev, link_enable);
  380. }
  381. int cnss_pci_set_dsp_link_status(struct cnss_pci_data *pci_priv,
  382. bool link_enable)
  383. {
  384. if (!pci_priv)
  385. return -ENODEV;
  386. pci_priv->pci_dsp_link_status = link_enable;
  387. return 0;
  388. }
  389. int cnss_pci_get_dsp_link_status(struct cnss_pci_data *pci_priv)
  390. {
  391. if (!pci_priv)
  392. return -ENODEV;
  393. return pci_priv->pci_dsp_link_status;
  394. }
  395. int cnss_pci_dsp_link_enable(struct cnss_pci_data *pci_priv)
  396. {
  397. int ret = 0;
  398. int retry_count = 0;
  399. struct cnss_plat_data *plat_priv;
  400. if (!pci_priv)
  401. return -ENODEV;
  402. plat_priv = pci_priv->plat_priv;
  403. /* For PCIe switch platform, wait for link train of DSP<->WLAN complete
  404. */
  405. while (retry_count++ < DSP_LINK_ENABLE_RETRY_COUNT_MAX) {
  406. ret = cnss_pci_dsp_link_control(pci_priv, true);
  407. if (!ret)
  408. break;
  409. cnss_pci_dsp_link_control(pci_priv, false);
  410. cnss_pr_err("DSP<->WLAN link train failed, retry...\n");
  411. cnss_select_pinctrl_state(plat_priv, false);
  412. usleep_range(DSP_LINK_ENABLE_DELAY_TIME_US_MIN,
  413. DSP_LINK_ENABLE_DELAY_TIME_US_MAX);
  414. ret = cnss_select_pinctrl_enable(plat_priv);
  415. if (ret) {
  416. cnss_pr_err("Failed to select pinctrl state, err = %d\n", ret);
  417. return ret;
  418. }
  419. usleep_range(DSP_LINK_ENABLE_DELAY_TIME_US_MIN,
  420. DSP_LINK_ENABLE_DELAY_TIME_US_MAX);
  421. }
  422. return ret;
  423. }
  424. #ifdef CONFIG_PCIE_SWITCH_RETRAIN_LINK_SUPPORT
  425. int cnss_pci_dsp_link_retrain(struct cnss_pci_data *pci_priv,
  426. u16 target_link_speed)
  427. {
  428. int ret = 0;
  429. if (!pci_priv)
  430. return -ENODEV;
  431. cnss_pr_dbg("Setting DSP <-> EP link speed:0x%x\n", target_link_speed);
  432. ret = msm_pcie_retrain_port_link(pci_priv->pci_dev, target_link_speed);
  433. if (ret) {
  434. cnss_pr_err("Failed to retrain link, err = %d\n", ret);
  435. return ret;
  436. }
  437. pci_priv->def_link_speed = target_link_speed;
  438. return ret;
  439. }
  440. #else
  441. int cnss_pci_dsp_link_retrain(struct cnss_pci_data *pci_priv,
  442. u16 target_link_speed)
  443. {
  444. return -EOPNOTSUPP;
  445. }
  446. #endif
  447. #else
  448. int cnss_pci_dsp_link_control(struct cnss_pci_data *pci_priv,
  449. bool link_enable)
  450. {
  451. return -EOPNOTSUPP;
  452. }
  453. int cnss_pci_set_dsp_link_status(struct cnss_pci_data *pci_priv,
  454. bool link_enable)
  455. {
  456. return -EOPNOTSUPP;
  457. }
  458. int cnss_pci_get_dsp_link_status(struct cnss_pci_data *pci_priv)
  459. {
  460. return -EOPNOTSUPP;
  461. }
  462. int cnss_pci_dsp_link_enable(struct cnss_pci_data *pci_priv)
  463. {
  464. return -EOPNOTSUPP;
  465. }
  466. int cnss_pci_dsp_link_retrain(struct cnss_pci_data *pci_priv,
  467. u16 target_link_speed)
  468. {
  469. return -EOPNOTSUPP;
  470. }
  471. #endif
  472. int cnss_pci_prevent_l1(struct device *dev)
  473. {
  474. struct pci_dev *pci_dev = to_pci_dev(dev);
  475. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  476. int ret;
  477. if (!pci_priv) {
  478. cnss_pr_err("pci_priv is NULL\n");
  479. return -ENODEV;
  480. }
  481. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  482. cnss_pr_dbg("PCIe link is in suspend state\n");
  483. return -EIO;
  484. }
  485. if (pci_priv->pci_link_down_ind) {
  486. cnss_pr_err("PCIe link is down\n");
  487. return -EIO;
  488. }
  489. ret = _cnss_pci_prevent_l1(pci_priv);
  490. if (ret == -EIO) {
  491. cnss_pr_err("Failed to prevent PCIe L1, considered as link down\n");
  492. cnss_pci_link_down(dev);
  493. }
  494. return ret;
  495. }
  496. EXPORT_SYMBOL(cnss_pci_prevent_l1);
  497. void cnss_pci_allow_l1(struct device *dev)
  498. {
  499. struct pci_dev *pci_dev = to_pci_dev(dev);
  500. struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
  501. if (!pci_priv) {
  502. cnss_pr_err("pci_priv is NULL\n");
  503. return;
  504. }
  505. if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
  506. cnss_pr_dbg("PCIe link is in suspend state\n");
  507. return;
  508. }
  509. if (pci_priv->pci_link_down_ind) {
  510. cnss_pr_err("PCIe link is down\n");
  511. return;
  512. }
  513. _cnss_pci_allow_l1(pci_priv);
  514. }
  515. EXPORT_SYMBOL(cnss_pci_allow_l1);
  516. int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
  517. {
  518. pci_priv->msi_config = &msi_config;
  519. return 0;
  520. }
  521. #ifdef CONFIG_ONE_MSI_VECTOR
  522. int cnss_pci_get_one_msi_assignment(struct cnss_pci_data *pci_priv)
  523. {
  524. pci_priv->msi_config = &msi_config_one_msi;
  525. return 0;
  526. }
  527. bool cnss_pci_fallback_one_msi(struct cnss_pci_data *pci_priv,
  528. int *num_vectors)
  529. {
  530. struct pci_dev *pci_dev = pci_priv->pci_dev;
  531. struct cnss_msi_config *msi_config;
  532. cnss_pci_get_one_msi_assignment(pci_priv);
  533. msi_config = pci_priv->msi_config;
  534. if (!msi_config) {
  535. cnss_pr_err("one msi_config is NULL!\n");
  536. return false;
  537. }
  538. *num_vectors = pci_alloc_irq_vectors(pci_dev,
  539. msi_config->total_vectors,
  540. msi_config->total_vectors,
  541. PCI_IRQ_MSI);
  542. if (*num_vectors < 0) {
  543. cnss_pr_err("Failed to get one MSI vector!\n");
  544. return false;
  545. }
  546. cnss_pr_dbg("request MSI one vector\n");
  547. return true;
  548. }
  549. bool cnss_pci_is_one_msi(struct cnss_pci_data *pci_priv)
  550. {
  551. return pci_priv && pci_priv->msi_config &&
  552. (pci_priv->msi_config->total_vectors == 1);
  553. }
  554. int cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data *pci_priv)
  555. {
  556. return MHI_IRQ_NUMBER;
  557. }
  558. bool cnss_pci_is_force_one_msi(struct cnss_pci_data *pci_priv)
  559. {
  560. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  561. return test_bit(FORCE_ONE_MSI, &plat_priv->ctrl_params.quirks);
  562. }
  563. #else
  564. int cnss_pci_get_one_msi_assignment(struct cnss_pci_data *pci_priv)
  565. {
  566. return 0;
  567. }
  568. bool cnss_pci_fallback_one_msi(struct cnss_pci_data *pci_priv,
  569. int *num_vectors)
  570. {
  571. return false;
  572. }
  573. bool cnss_pci_is_one_msi(struct cnss_pci_data *pci_priv)
  574. {
  575. return false;
  576. }
  577. int cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data *pci_priv)
  578. {
  579. return 0;
  580. }
  581. bool cnss_pci_is_force_one_msi(struct cnss_pci_data *pci_priv)
  582. {
  583. return false;
  584. }
  585. #endif
  586. static int cnss_pci_smmu_fault_handler(struct iommu_domain *domain,
  587. struct device *dev, unsigned long iova,
  588. int flags, void *handler_token)
  589. {
  590. struct cnss_pci_data *pci_priv = handler_token;
  591. cnss_fatal_err("SMMU fault happened with IOVA 0x%lx\n", iova);
  592. if (!pci_priv) {
  593. cnss_pr_err("pci_priv is NULL\n");
  594. return -ENODEV;
  595. }
  596. pci_priv->is_smmu_fault = true;
  597. cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
  598. cnss_force_fw_assert(&pci_priv->pci_dev->dev);
  599. /* IOMMU driver requires -ENOSYS to print debug info. */
  600. return -ENOSYS;
  601. }
  602. int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
  603. {
  604. struct pci_dev *pci_dev = pci_priv->pci_dev;
  605. struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
  606. struct device_node *of_node;
  607. struct resource *res;
  608. const char *iommu_dma_type;
  609. u32 addr_win[2];
  610. int ret = 0;
  611. of_node = of_parse_phandle(pci_dev->dev.of_node, "qcom,iommu-group", 0);
  612. if (!of_node)
  613. return ret;
  614. cnss_pr_dbg("Initializing SMMU\n");
  615. pci_priv->iommu_domain = iommu_get_domain_for_dev(&pci_dev->dev);
  616. ret = of_property_read_string(of_node, "qcom,iommu-dma",
  617. &iommu_dma_type);
  618. if (!ret && !strcmp("fastmap", iommu_dma_type)) {
  619. cnss_pr_dbg("Enabling SMMU S1 stage\n");
  620. pci_priv->smmu_s1_enable = true;
  621. iommu_set_fault_handler(pci_priv->iommu_domain,
  622. cnss_pci_smmu_fault_handler, pci_priv);
  623. cnss_register_iommu_fault_handler_irq(pci_priv);
  624. }
  625. ret = of_property_read_u32_array(of_node, "qcom,iommu-dma-addr-pool",
  626. addr_win, ARRAY_SIZE(addr_win));
  627. if (ret) {
  628. cnss_pr_err("Invalid SMMU size window, err = %d\n", ret);
  629. of_node_put(of_node);
  630. return ret;
  631. }
  632. pci_priv->smmu_iova_start = addr_win[0];
  633. pci_priv->smmu_iova_len = addr_win[1];
  634. cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: 0x%zx\n",
  635. &pci_priv->smmu_iova_start,
  636. pci_priv->smmu_iova_len);
  637. res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
  638. "smmu_iova_ipa");
  639. if (res) {
  640. pci_priv->smmu_iova_ipa_start = res->start;
  641. pci_priv->smmu_iova_ipa_current = res->start;
  642. pci_priv->smmu_iova_ipa_len = resource_size(res);
  643. cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: 0x%zx\n",
  644. &pci_priv->smmu_iova_ipa_start,
  645. pci_priv->smmu_iova_ipa_len);
  646. }
  647. pci_priv->iommu_geometry = of_property_read_bool(of_node,
  648. "qcom,iommu-geometry");
  649. cnss_pr_dbg("iommu_geometry: %d\n", pci_priv->iommu_geometry);
  650. of_node_put(of_node);
  651. return 0;
  652. }
  653. int _cnss_pci_get_reg_dump(struct cnss_pci_data *pci_priv,
  654. u8 *buf, u32 len)
  655. {
  656. return msm_pcie_reg_dump(pci_priv->pci_dev, buf, len);
  657. }
  658. #if IS_ENABLED(CONFIG_ARCH_QCOM)
  659. /**
  660. * cnss_pci_of_reserved_mem_device_init() - Assign reserved memory region
  661. * to given PCI device
  662. * @pci_priv: driver PCI bus context pointer
  663. *
  664. * This function shall call corresponding of_reserved_mem_device* API to
  665. * assign reserved memory region to PCI device based on where the memory is
  666. * defined and attached to (platform device of_node or PCI device of_node)
  667. * in device tree.
  668. *
  669. * Return: 0 for success, negative value for error
  670. */
  671. int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
  672. {
  673. struct device *dev_pci = &pci_priv->pci_dev->dev;
  674. int ret;
  675. /* Use of_reserved_mem_device_init_by_idx() if reserved memory is
  676. * attached to platform device of_node.
  677. */
  678. ret = of_reserved_mem_device_init(dev_pci);
  679. if (ret) {
  680. if (ret == -EINVAL)
  681. cnss_pr_vdbg("Ignore, no specific reserved-memory assigned\n");
  682. else
  683. cnss_pr_err("Failed to init reserved mem device, err = %d\n",
  684. ret);
  685. }
  686. if (dev_pci->cma_area)
  687. cnss_pr_dbg("CMA area is %s\n",
  688. cma_get_name(dev_pci->cma_area));
  689. return ret;
  690. }
  691. int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
  692. {
  693. return 0;
  694. }
  695. void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
  696. {
  697. }
  698. #endif