ipa_client.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <asm/barrier.h>
  6. #include <linux/delay.h>
  7. #include <linux/device.h>
  8. #include "ipa_i.h"
  9. #include "linux/msm_gsi.h"
  10. /*
  11. * These values were determined empirically and shows good E2E bi-
  12. * directional throughputs
  13. */
  14. #define IPA_HOLB_TMR_EN 0x1
  15. #define IPA_HOLB_TMR_DIS 0x0
  16. #define IPA_POLL_AGGR_STATE_RETRIES_NUM 3
  17. #define IPA_POLL_AGGR_STATE_SLEEP_MSEC 1
  18. #define IPA_PKT_FLUSH_TO_US 100
  19. #define IPA_POLL_FOR_EMPTINESS_NUM 50
  20. #define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
  21. #define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
  22. #define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
  23. /* xfer_rsc_idx should be 7 bits */
  24. #define IPA_XFER_RSC_IDX_MAX 127
  25. static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
  26. bool *is_empty);
  27. static void ipa3_start_gsi_debug_monitor(u32 clnt_hdl);
  28. int ipa3_enable_data_path(u32 clnt_hdl)
  29. {
  30. struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
  31. struct ipa_ep_cfg_holb holb_cfg;
  32. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  33. int res = 0;
  34. struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
  35. /* Assign the resource group for pipe */
  36. memset(&rsrc_grp, 0, sizeof(rsrc_grp));
  37. rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
  38. if (rsrc_grp.rsrc_grp == -1) {
  39. IPAERR("invalid group for client %d\n", ep->client);
  40. WARN_ON(1);
  41. return -EFAULT;
  42. }
  43. IPADBG("Setting group %d for pipe %d\n",
  44. rsrc_grp.rsrc_grp, clnt_hdl);
  45. ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
  46. &rsrc_grp);
  47. IPADBG("Enabling data path\n");
  48. if (IPA_CLIENT_IS_CONS(ep->client)) {
  49. memset(&holb_cfg, 0, sizeof(holb_cfg));
  50. /*
  51. * Set HOLB on USB DPL CONS to avoid IPA stall
  52. * if DPL client is not pulling the data
  53. * on other end from IPA hw.
  54. */
  55. if ((ep->client == IPA_CLIENT_USB_DPL_CONS) ||
  56. (ep->client == IPA_CLIENT_MHI_DPL_CONS))
  57. holb_cfg.en = IPA_HOLB_TMR_EN;
  58. else
  59. holb_cfg.en = IPA_HOLB_TMR_DIS;
  60. holb_cfg.tmr_val = 0;
  61. res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
  62. }
  63. /* Enable the pipe */
  64. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  65. if (IPA_CLIENT_IS_CONS(ep->client) &&
  66. (ep->keep_ipa_awake ||
  67. ipa3_ctx->resume_on_connect[ep->client] ||
  68. !ipa3_should_pipe_be_suspended(ep->client))) {
  69. memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
  70. ep_cfg_ctrl.ipa_ep_suspend = false;
  71. res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
  72. }
  73. }
  74. return res;
  75. }
  76. int ipa3_disable_data_path(u32 clnt_hdl)
  77. {
  78. struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
  79. struct ipa_ep_cfg_holb holb_cfg;
  80. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  81. struct ipa_ep_cfg_aggr ep_aggr;
  82. int res = 0;
  83. IPADBG("Disabling data path\n");
  84. if (IPA_CLIENT_IS_CONS(ep->client)) {
  85. memset(&holb_cfg, 0, sizeof(holb_cfg));
  86. holb_cfg.en = IPA_HOLB_TMR_EN;
  87. holb_cfg.tmr_val = 0;
  88. res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
  89. }
  90. /*
  91. * for IPA 4.0 and above aggregation frame is closed together with
  92. * channel STOP
  93. */
  94. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  95. /* Suspend the pipe */
  96. if (IPA_CLIENT_IS_CONS(ep->client)) {
  97. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  98. ep_cfg_ctrl.ipa_ep_suspend = true;
  99. res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
  100. }
  101. udelay(IPA_PKT_FLUSH_TO_US);
  102. ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl,
  103. &ep_aggr);
  104. if (ep_aggr.aggr_en) {
  105. res = ipa3_tag_aggr_force_close(clnt_hdl);
  106. if (res) {
  107. IPAERR("tag process timeout client:%d err:%d\n",
  108. clnt_hdl, res);
  109. ipa_assert();
  110. }
  111. }
  112. }
  113. return res;
  114. }
  115. int ipa3_reset_gsi_channel(u32 clnt_hdl)
  116. {
  117. struct ipa3_ep_context *ep;
  118. int result = -EFAULT;
  119. enum gsi_status gsi_res;
  120. bool undo_aggr_value = false;
  121. struct ipahal_reg_clkon_cfg fields;
  122. IPADBG("entry\n");
  123. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  124. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  125. IPAERR("Bad parameter\n");
  126. return -EINVAL;
  127. }
  128. ep = &ipa3_ctx->ep[clnt_hdl];
  129. if (!ep->keep_ipa_awake)
  130. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  131. /*
  132. * IPAv4.0 HW has a limitation where WSEQ in MBIM NTH header is not
  133. * reset to 0 when MBIM pipe is reset. Workaround is to disable
  134. * HW clock gating for AGGR block using IPA_CLKON_CFG reg. undo flag to
  135. * disable the bit after reset is finished
  136. */
  137. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  138. if (ep->cfg.aggr.aggr == IPA_MBIM_16 &&
  139. ep->cfg.aggr.aggr_en != IPA_BYPASS_AGGR) {
  140. ipahal_read_reg_fields(IPA_CLKON_CFG, &fields);
  141. if (fields.open_aggr_wrapper) {
  142. undo_aggr_value = true;
  143. fields.open_aggr_wrapper = false;
  144. ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
  145. }
  146. }
  147. }
  148. /*
  149. * Reset channel
  150. * If the reset called after stop, need to wait 1ms
  151. */
  152. msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
  153. gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
  154. if (gsi_res != GSI_STATUS_SUCCESS) {
  155. IPAERR("Error resetting channel: %d\n", gsi_res);
  156. result = -EFAULT;
  157. goto reset_chan_fail;
  158. }
  159. /* undo the aggr value if flag was set above*/
  160. if (undo_aggr_value) {
  161. fields.open_aggr_wrapper = false;
  162. ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
  163. }
  164. if (!ep->keep_ipa_awake)
  165. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  166. IPADBG("exit\n");
  167. return 0;
  168. reset_chan_fail:
  169. /* undo the aggr value if flag was set above*/
  170. if (undo_aggr_value) {
  171. fields.open_aggr_wrapper = false;
  172. ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
  173. }
  174. if (!ep->keep_ipa_awake)
  175. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  176. return result;
  177. }
  178. int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
  179. {
  180. struct ipa3_ep_context *ep;
  181. int result = -EFAULT;
  182. enum gsi_status gsi_res;
  183. IPADBG("entry\n");
  184. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  185. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  186. IPAERR("Bad parameter.\n");
  187. return -EINVAL;
  188. }
  189. ep = &ipa3_ctx->ep[clnt_hdl];
  190. if (!ep->keep_ipa_awake)
  191. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  192. /* Reset event ring */
  193. gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
  194. if (gsi_res != GSI_STATUS_SUCCESS) {
  195. IPAERR("Error resetting event: %d\n", gsi_res);
  196. result = -EFAULT;
  197. goto reset_evt_fail;
  198. }
  199. if (!ep->keep_ipa_awake)
  200. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  201. IPADBG("exit\n");
  202. return 0;
  203. reset_evt_fail:
  204. if (!ep->keep_ipa_awake)
  205. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  206. return result;
  207. }
  208. static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params)
  209. {
  210. if (params->client >= IPA_CLIENT_MAX)
  211. return false;
  212. else
  213. return true;
  214. }
  215. static void ipa3_start_gsi_debug_monitor(u32 clnt_hdl)
  216. {
  217. struct IpaHwOffloadStatsAllocCmdData_t *gsi_info;
  218. struct ipa3_ep_context *ep;
  219. enum ipa_client_type client_type;
  220. IPADBG("entry\n");
  221. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  222. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  223. IPAERR("Bad parameters.\n");
  224. return;
  225. }
  226. ep = &ipa3_ctx->ep[clnt_hdl];
  227. client_type = ipa3_get_client_mapping(clnt_hdl);
  228. /* start uC gsi dbg stats monitor */
  229. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
  230. ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
  231. switch (client_type) {
  232. case IPA_CLIENT_MHI_PRIME_TETH_PROD:
  233. gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
  234. gsi_info->ch_id_info[0].ch_id = ep->gsi_chan_hdl;
  235. gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
  236. ipa3_uc_debug_stats_alloc(*gsi_info);
  237. break;
  238. case IPA_CLIENT_MHI_PRIME_TETH_CONS:
  239. gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
  240. gsi_info->ch_id_info[1].ch_id = ep->gsi_chan_hdl;
  241. gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
  242. ipa3_uc_debug_stats_alloc(*gsi_info);
  243. break;
  244. case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
  245. gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
  246. gsi_info->ch_id_info[2].ch_id = ep->gsi_chan_hdl;
  247. gsi_info->ch_id_info[2].dir = DIR_PRODUCER;
  248. ipa3_uc_debug_stats_alloc(*gsi_info);
  249. break;
  250. case IPA_CLIENT_MHI_PRIME_RMNET_CONS:
  251. gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
  252. gsi_info->ch_id_info[3].ch_id = ep->gsi_chan_hdl;
  253. gsi_info->ch_id_info[3].dir = DIR_CONSUMER;
  254. ipa3_uc_debug_stats_alloc(*gsi_info);
  255. break;
  256. case IPA_CLIENT_USB_PROD:
  257. gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
  258. gsi_info->ch_id_info[0].ch_id = ep->gsi_chan_hdl;
  259. gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
  260. ipa3_uc_debug_stats_alloc(*gsi_info);
  261. break;
  262. case IPA_CLIENT_USB_CONS:
  263. gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
  264. gsi_info->ch_id_info[1].ch_id = ep->gsi_chan_hdl;
  265. gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
  266. ipa3_uc_debug_stats_alloc(*gsi_info);
  267. break;
  268. default:
  269. IPADBG("client_type %d not supported\n",
  270. client_type);
  271. }
  272. }
  273. }
  274. int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map,
  275. enum ipa_smmu_cb_type cb_type)
  276. {
  277. struct iommu_domain *smmu_domain;
  278. int res;
  279. if (!VALID_IPA_SMMU_CB_TYPE(cb_type)) {
  280. IPAERR("invalid cb_type\n");
  281. return -EINVAL;
  282. }
  283. if (ipa3_ctx->s1_bypass_arr[cb_type]) {
  284. IPADBG("CB# %d is set to s1 bypass\n", cb_type);
  285. return 0;
  286. }
  287. smmu_domain = ipa3_get_smmu_domain_by_type(cb_type);
  288. if (!smmu_domain) {
  289. IPAERR("invalid smmu domain\n");
  290. return -EINVAL;
  291. }
  292. if (map) {
  293. res = ipa3_iommu_map(smmu_domain, phys_addr, phys_addr,
  294. PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
  295. } else {
  296. res = iommu_unmap(smmu_domain, phys_addr, PAGE_SIZE);
  297. res = (res != PAGE_SIZE);
  298. }
  299. if (res) {
  300. IPAERR("Fail to %s reg 0x%pa\n", map ? "map" : "unmap",
  301. &phys_addr);
  302. return -EINVAL;
  303. }
  304. IPADBG("Peer reg 0x%pa %s\n", &phys_addr, map ? "map" : "unmap");
  305. return 0;
  306. }
  307. int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
  308. enum ipa_smmu_cb_type cb_type)
  309. {
  310. struct iommu_domain *smmu_domain;
  311. int res;
  312. phys_addr_t phys;
  313. unsigned long va;
  314. struct scatterlist *sg;
  315. int count = 0;
  316. size_t len;
  317. int i;
  318. struct page *page;
  319. if (!VALID_IPA_SMMU_CB_TYPE(cb_type)) {
  320. IPAERR("invalid cb_type\n");
  321. return -EINVAL;
  322. }
  323. if (ipa3_ctx->s1_bypass_arr[cb_type]) {
  324. IPADBG("CB# %d is set to s1 bypass\n", cb_type);
  325. return 0;
  326. }
  327. smmu_domain = ipa3_get_smmu_domain_by_type(cb_type);
  328. if (!smmu_domain) {
  329. IPAERR("invalid smmu domain\n");
  330. return -EINVAL;
  331. }
  332. /*
  333. * USB GSI driver would update sgt irrespective of USB S1
  334. * is enable or bypass.
  335. * If USB S1 is enabled using IOMMU, iova != pa.
  336. * If USB S1 is bypass, iova == pa.
  337. */
  338. if (map) {
  339. if (sgt != NULL) {
  340. va = rounddown(iova, PAGE_SIZE);
  341. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  342. page = sg_page(sg);
  343. phys = page_to_phys(page);
  344. len = PAGE_ALIGN(sg->offset + sg->length);
  345. res = ipa3_iommu_map(smmu_domain, va, phys,
  346. len, IOMMU_READ | IOMMU_WRITE);
  347. if (res) {
  348. IPAERR("Fail to map pa=%pa\n", &phys);
  349. return -EINVAL;
  350. }
  351. va += len;
  352. count++;
  353. }
  354. } else {
  355. res = ipa3_iommu_map(smmu_domain,
  356. rounddown(iova, PAGE_SIZE),
  357. rounddown(iova, PAGE_SIZE),
  358. roundup(size + iova -
  359. rounddown(iova, PAGE_SIZE),
  360. PAGE_SIZE),
  361. IOMMU_READ | IOMMU_WRITE);
  362. if (res) {
  363. IPAERR("Fail to map 0x%llx\n", iova);
  364. return -EINVAL;
  365. }
  366. }
  367. } else {
  368. res = iommu_unmap(smmu_domain,
  369. rounddown(iova, PAGE_SIZE),
  370. roundup(size + iova - rounddown(iova, PAGE_SIZE),
  371. PAGE_SIZE));
  372. if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE),
  373. PAGE_SIZE)) {
  374. IPAERR("Fail to unmap 0x%llx\n", iova);
  375. return -EINVAL;
  376. }
  377. }
  378. IPADBG("Peer buff %s 0x%llx\n", map ? "map" : "unmap", iova);
  379. return 0;
  380. }
  381. static enum ipa_client_cb_type ipa_get_client_cb_type(
  382. enum ipa_client_type client_type)
  383. {
  384. enum ipa_client_cb_type client_cb;
  385. if (client_type == IPA_CLIENT_USB_PROD ||
  386. client_type == IPA_CLIENT_USB_CONS) {
  387. IPADBG("USB Client registered\n");
  388. client_cb = IPA_USB_CLNT;
  389. } else if (client_type == IPA_CLIENT_MHI_PROD ||
  390. client_type == IPA_CLIENT_MHI_CONS) {
  391. IPADBG("MHI Client registered\n");
  392. client_cb = IPA_MHI_CLNT;
  393. } else {
  394. IPAERR("Invalid IPA client\n");
  395. client_cb = IPA_MAX_CLNT;
  396. }
  397. return client_cb;
  398. }
  399. void ipa3_register_client_callback(int (*client_cb)(bool is_lock),
  400. bool (*teth_port_state)(void),
  401. enum ipa_client_type client_type)
  402. {
  403. enum ipa_client_cb_type client;
  404. IPADBG("entry\n");
  405. client = ipa_get_client_cb_type(client_type);
  406. if (client == IPA_MAX_CLNT)
  407. return;
  408. if (client_cb == NULL) {
  409. IPAERR("Bad Param");
  410. return;
  411. }
  412. if (!ipa3_ctx->client_lock_unlock[client])
  413. ipa3_ctx->client_lock_unlock[client] = client_cb;
  414. if (!ipa3_ctx->get_teth_port_state[client])
  415. ipa3_ctx->get_teth_port_state[client] = teth_port_state;
  416. IPADBG("exit\n");
  417. }
  418. void ipa3_deregister_client_callback(enum ipa_client_type client_type)
  419. {
  420. enum ipa_client_cb_type client_cb;
  421. IPADBG("entry\n");
  422. client_cb = ipa_get_client_cb_type(client_type);
  423. if (client_cb == IPA_MAX_CLNT)
  424. return;
  425. if (ipa3_ctx->client_lock_unlock[client_cb] == NULL &&
  426. ipa3_ctx->get_teth_port_state[client_cb] == NULL) {
  427. IPAERR("client_lock_unlock is already NULL");
  428. return;
  429. }
  430. ipa3_ctx->client_lock_unlock[client_cb] = NULL;
  431. ipa3_ctx->get_teth_port_state[client_cb] = NULL;
  432. IPADBG("exit\n");
  433. }
  434. static void client_lock_unlock_cb(enum ipa_client_type client, bool is_lock)
  435. {
  436. enum ipa_client_cb_type client_cb;
  437. IPADBG("entry\n");
  438. client_cb = ipa_get_client_cb_type(client);
  439. if (client_cb == IPA_MAX_CLNT)
  440. return;
  441. if (ipa3_ctx->client_lock_unlock[client_cb])
  442. ipa3_ctx->client_lock_unlock[client_cb](is_lock);
  443. IPADBG("exit\n");
  444. }
  445. int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
  446. struct ipa_req_chan_out_params *out_params)
  447. {
  448. int ipa_ep_idx;
  449. int result = -EFAULT;
  450. struct ipa3_ep_context *ep;
  451. struct ipahal_reg_ep_cfg_status ep_status;
  452. unsigned long gsi_dev_hdl;
  453. enum gsi_status gsi_res;
  454. const struct ipa_gsi_ep_config *gsi_ep_cfg_ptr;
  455. IPADBG("entry\n");
  456. if (params == NULL || out_params == NULL ||
  457. !ipa3_is_legal_params(params)) {
  458. IPAERR("bad parameters\n");
  459. return -EINVAL;
  460. }
  461. ipa_ep_idx = ipa3_get_ep_mapping(params->client);
  462. if (ipa_ep_idx == -1) {
  463. IPAERR("fail to alloc EP.\n");
  464. goto fail;
  465. }
  466. ep = &ipa3_ctx->ep[ipa_ep_idx];
  467. if (ep->valid) {
  468. IPAERR("EP already allocated.\n");
  469. goto fail;
  470. }
  471. memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
  472. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  473. ep->skip_ep_cfg = params->skip_ep_cfg;
  474. ep->valid = 1;
  475. ep->client = params->client;
  476. ep->client_notify = params->notify;
  477. ep->priv = params->priv;
  478. ep->keep_ipa_awake = params->keep_ipa_awake;
  479. /* Config QMB for USB_CONS ep */
  480. if (!IPA_CLIENT_IS_PROD(ep->client)) {
  481. IPADBG("Configuring QMB on USB CONS pipe\n");
  482. if (ipa_ep_idx >= ipa3_ctx->ipa_num_pipes ||
  483. ipa3_ctx->ep[ipa_ep_idx].valid == 0) {
  484. IPAERR("bad parm.\n");
  485. return -EINVAL;
  486. }
  487. result = ipa3_cfg_ep_cfg(ipa_ep_idx, &params->ipa_ep_cfg.cfg);
  488. if (result) {
  489. IPAERR("fail to configure QMB.\n");
  490. return result;
  491. }
  492. }
  493. if (!ep->skip_ep_cfg) {
  494. if (ipa3_cfg_ep(ipa_ep_idx, &params->ipa_ep_cfg)) {
  495. IPAERR("fail to configure EP.\n");
  496. goto ipa_cfg_ep_fail;
  497. }
  498. /* Setting EP status 0 */
  499. memset(&ep_status, 0, sizeof(ep_status));
  500. if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
  501. IPAERR("fail to configure status of EP.\n");
  502. goto ipa_cfg_ep_fail;
  503. }
  504. IPADBG("ep configuration successful\n");
  505. } else {
  506. IPADBG("Skipping endpoint configuration.\n");
  507. }
  508. out_params->clnt_hdl = ipa_ep_idx;
  509. result = ipa3_enable_data_path(out_params->clnt_hdl);
  510. if (result) {
  511. IPAERR("enable data path failed res=%d clnt=%d.\n", result,
  512. out_params->clnt_hdl);
  513. goto ipa_cfg_ep_fail;
  514. }
  515. gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl;
  516. gsi_res = gsi_alloc_evt_ring(&params->evt_ring_params, gsi_dev_hdl,
  517. &ep->gsi_evt_ring_hdl);
  518. if (gsi_res != GSI_STATUS_SUCCESS) {
  519. IPAERR("Error allocating event ring: %d\n", gsi_res);
  520. result = -EFAULT;
  521. goto ipa_cfg_ep_fail;
  522. }
  523. gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
  524. params->evt_scratch);
  525. if (gsi_res != GSI_STATUS_SUCCESS) {
  526. IPAERR("Error writing event ring scratch: %d\n", gsi_res);
  527. result = -EFAULT;
  528. goto write_evt_scratch_fail;
  529. }
  530. gsi_ep_cfg_ptr = ipa3_get_gsi_ep_info(ep->client);
  531. if (gsi_ep_cfg_ptr == NULL) {
  532. IPAERR("Error ipa3_get_gsi_ep_info ret NULL\n");
  533. result = -EFAULT;
  534. goto write_evt_scratch_fail;
  535. }
  536. params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl;
  537. params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num;
  538. params->chan_params.prefetch_mode = gsi_ep_cfg_ptr->prefetch_mode;
  539. params->chan_params.empty_lvl_threshold =
  540. gsi_ep_cfg_ptr->prefetch_threshold;
  541. gsi_res = gsi_alloc_channel(&params->chan_params, gsi_dev_hdl,
  542. &ep->gsi_chan_hdl);
  543. if (gsi_res != GSI_STATUS_SUCCESS) {
  544. IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res,
  545. params->chan_params.ch_id);
  546. result = -EFAULT;
  547. goto write_evt_scratch_fail;
  548. }
  549. memcpy(&ep->chan_scratch, &params->chan_scratch,
  550. sizeof(union __packed gsi_channel_scratch));
  551. /*
  552. * Update scratch for MCS smart prefetch:
  553. * Starting IPA4.5, smart prefetch implemented by H/W.
  554. * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
  555. * so keep the fields zero.
  556. */
  557. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  558. ep->chan_scratch.xdci.max_outstanding_tre =
  559. params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv;
  560. }
  561. gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
  562. params->chan_scratch);
  563. if (gsi_res != GSI_STATUS_SUCCESS) {
  564. IPAERR("Error writing channel scratch: %d\n", gsi_res);
  565. result = -EFAULT;
  566. goto write_chan_scratch_fail;
  567. }
  568. gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl,
  569. &out_params->db_reg_phs_addr_lsb,
  570. &out_params->db_reg_phs_addr_msb);
  571. if (gsi_res != GSI_STATUS_SUCCESS) {
  572. IPAERR("Error querying channel DB registers addresses: %d\n",
  573. gsi_res);
  574. result = -EFAULT;
  575. goto write_chan_scratch_fail;
  576. }
  577. ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len;
  578. ep->gsi_mem_info.evt_ring_base_addr =
  579. params->evt_ring_params.ring_base_addr;
  580. ep->gsi_mem_info.evt_ring_base_vaddr =
  581. params->evt_ring_params.ring_base_vaddr;
  582. ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len;
  583. ep->gsi_mem_info.chan_ring_base_addr =
  584. params->chan_params.ring_base_addr;
  585. ep->gsi_mem_info.chan_ring_base_vaddr =
  586. params->chan_params.ring_base_vaddr;
  587. ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
  588. if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client))
  589. ipa3_install_dflt_flt_rules(ipa_ep_idx);
  590. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  591. IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx);
  592. IPADBG("exit\n");
  593. return 0;
  594. write_chan_scratch_fail:
  595. gsi_dealloc_channel(ep->gsi_chan_hdl);
  596. write_evt_scratch_fail:
  597. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  598. ipa_cfg_ep_fail:
  599. memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
  600. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  601. fail:
  602. return result;
  603. }
  604. int ipa3_set_usb_max_packet_size(
  605. enum ipa_usb_max_usb_packet_size usb_max_packet_size)
  606. {
  607. struct gsi_device_scratch dev_scratch;
  608. enum gsi_status gsi_res;
  609. IPADBG("entry\n");
  610. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  611. memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch));
  612. dev_scratch.mhi_base_chan_idx_valid = false;
  613. dev_scratch.max_usb_pkt_size_valid = true;
  614. dev_scratch.max_usb_pkt_size = usb_max_packet_size;
  615. gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
  616. &dev_scratch);
  617. if (gsi_res != GSI_STATUS_SUCCESS) {
  618. IPAERR("Error writing device scratch: %d\n", gsi_res);
  619. return -EFAULT;
  620. }
  621. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  622. IPADBG("exit\n");
  623. return 0;
  624. }
  625. /* This function called as part of usb pipe resume */
  626. int ipa3_xdci_connect(u32 clnt_hdl)
  627. {
  628. int result;
  629. struct ipa3_ep_context *ep;
  630. IPADBG("entry\n");
  631. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  632. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  633. IPAERR("Bad parameter.\n");
  634. return -EINVAL;
  635. }
  636. ep = &ipa3_ctx->ep[clnt_hdl];
  637. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  638. result = ipa3_start_gsi_channel(clnt_hdl);
  639. if (result) {
  640. IPAERR("failed to start gsi channel clnt_hdl=%u\n", clnt_hdl);
  641. goto exit;
  642. }
  643. result = ipa3_enable_data_path(clnt_hdl);
  644. if (result) {
  645. IPAERR("enable data path failed res=%d clnt_hdl=%d.\n", result,
  646. clnt_hdl);
  647. goto stop_ch;
  648. }
  649. IPADBG("exit\n");
  650. goto exit;
  651. stop_ch:
  652. (void)ipa3_stop_gsi_channel(clnt_hdl);
  653. exit:
  654. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  655. return result;
  656. }
  657. /* This function called as part of usb pipe connect */
  658. int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
  659. {
  660. struct ipa3_ep_context *ep;
  661. int result = -EFAULT;
  662. enum gsi_status gsi_res;
  663. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  664. IPADBG("entry\n");
  665. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  666. ipa3_ctx->ep[clnt_hdl].valid == 0 ||
  667. xferrscidx > IPA_XFER_RSC_IDX_MAX) {
  668. IPAERR("Bad parameters.\n");
  669. return -EINVAL;
  670. }
  671. ep = &ipa3_ctx->ep[clnt_hdl];
  672. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  673. if (xferrscidx_valid) {
  674. ep->chan_scratch.xdci.xferrscidx = xferrscidx;
  675. gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
  676. ep->chan_scratch);
  677. if (gsi_res != GSI_STATUS_SUCCESS) {
  678. IPAERR("Error writing channel scratch: %d\n", gsi_res);
  679. goto write_chan_scratch_fail;
  680. }
  681. }
  682. if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
  683. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  684. ep_cfg_ctrl.ipa_ep_delay = true;
  685. ep->ep_delay_set = true;
  686. result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
  687. if (result)
  688. IPAERR("client (ep: %d) failed result=%d\n",
  689. clnt_hdl, result);
  690. else
  691. IPADBG("client (ep: %d) success\n", clnt_hdl);
  692. } else {
  693. ep->ep_delay_set = false;
  694. }
  695. gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
  696. if (gsi_res != GSI_STATUS_SUCCESS) {
  697. IPAERR("Error starting channel: %d\n", gsi_res);
  698. goto write_chan_scratch_fail;
  699. }
  700. ipa3_start_gsi_debug_monitor(clnt_hdl);
  701. if (!ep->keep_ipa_awake)
  702. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  703. IPADBG("exit\n");
  704. return 0;
  705. write_chan_scratch_fail:
  706. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  707. return result;
  708. }
  709. int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
  710. unsigned long chan_hdl)
  711. {
  712. enum gsi_status gsi_res;
  713. memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
  714. gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
  715. if (gsi_res != GSI_STATUS_SUCCESS) {
  716. IPAERR("Error querying channel info: %d\n", gsi_res);
  717. return -EFAULT;
  718. }
  719. if (!gsi_chan_info->evt_valid) {
  720. IPAERR("Event info invalid\n");
  721. return -EFAULT;
  722. }
  723. return 0;
  724. }
  725. static bool ipa3_is_xdci_channel_with_given_info_empty(
  726. struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info)
  727. {
  728. bool is_empty = false;
  729. if (!IPA_CLIENT_IS_CONS(ep->client)) {
  730. /* For UL channel: chan.RP == chan.WP */
  731. is_empty = (chan_info->rp == chan_info->wp);
  732. } else {
  733. /* For DL channel: */
  734. if (chan_info->wp !=
  735. (ep->gsi_mem_info.chan_ring_base_addr +
  736. ep->gsi_mem_info.chan_ring_len -
  737. GSI_CHAN_RE_SIZE_16B)) {
  738. /* if chan.WP != LINK TRB: chan.WP == evt.RP */
  739. is_empty = (chan_info->wp == chan_info->evt_rp);
  740. } else {
  741. /*
  742. * if chan.WP == LINK TRB: chan.base_xfer_ring_addr
  743. * == evt.RP
  744. */
  745. is_empty = (ep->gsi_mem_info.chan_ring_base_addr ==
  746. chan_info->evt_rp);
  747. }
  748. }
  749. return is_empty;
  750. }
  751. static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
  752. bool *is_empty)
  753. {
  754. struct gsi_chan_info chan_info;
  755. int res;
  756. if (!ep || !is_empty || !ep->valid) {
  757. IPAERR("Input Error\n");
  758. return -EFAULT;
  759. }
  760. res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
  761. if (res) {
  762. IPAERR("Failed to get GSI channel info\n");
  763. return -EFAULT;
  764. }
  765. *is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info);
  766. return 0;
  767. }
  768. int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
  769. u32 source_pipe_bitmask)
  770. {
  771. struct ipa_enable_force_clear_datapath_req_msg_v01 req;
  772. int result;
  773. if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
  774. IPADBG("APQ platform - ignore force clear\n");
  775. return 0;
  776. }
  777. memset(&req, 0, sizeof(req));
  778. req.request_id = request_id;
  779. req.source_pipe_bitmask = source_pipe_bitmask;
  780. if (throttle_source) {
  781. req.throttle_source_valid = 1;
  782. req.throttle_source = 1;
  783. }
  784. result = ipa3_qmi_enable_force_clear_datapath_send(&req);
  785. if (result) {
  786. IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n",
  787. result);
  788. return result;
  789. }
  790. return 0;
  791. }
  792. int ipa3_disable_force_clear(u32 request_id)
  793. {
  794. struct ipa_disable_force_clear_datapath_req_msg_v01 req;
  795. int result;
  796. if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
  797. IPADBG("APQ platform - ignore force clear\n");
  798. return 0;
  799. }
  800. memset(&req, 0, sizeof(req));
  801. req.request_id = request_id;
  802. result = ipa3_qmi_disable_force_clear_datapath_send(&req);
  803. if (result) {
  804. IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n",
  805. result);
  806. return result;
  807. }
  808. return 0;
  809. }
  810. /* Clocks should be voted before invoking this function */
  811. static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc)
  812. {
  813. int res;
  814. IPADBG("entry\n");
  815. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  816. ipa3_ctx->ep[clnt_hdl].valid == 0 ||
  817. !stop_in_proc) {
  818. IPAERR("Bad parameter.\n");
  819. return -EINVAL;
  820. }
  821. res = ipa3_stop_gsi_channel(clnt_hdl);
  822. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  823. res != -GSI_STATUS_TIMED_OUT) {
  824. IPAERR("xDCI stop channel failed res=%d\n", res);
  825. return -EFAULT;
  826. }
  827. if (res)
  828. *stop_in_proc = true;
  829. else
  830. *stop_in_proc = false;
  831. IPADBG("xDCI channel is %s (result=%d)\n",
  832. res ? "STOP_IN_PROC/TimeOut" : "STOP", res);
  833. IPADBG("exit\n");
  834. return 0;
  835. }
  836. /* Clocks should be voted before invoking this function */
  837. static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
  838. bool *stop_in_proc)
  839. {
  840. unsigned long jiffies_start;
  841. unsigned long jiffies_timeout =
  842. msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC);
  843. int res;
  844. IPADBG("entry\n");
  845. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  846. ipa3_ctx->ep[clnt_hdl].valid == 0 ||
  847. !stop_in_proc) {
  848. IPAERR("Bad parameter.\n");
  849. return -EINVAL;
  850. }
  851. jiffies_start = jiffies;
  852. while (1) {
  853. res = ipa3_xdci_stop_gsi_channel(clnt_hdl,
  854. stop_in_proc);
  855. if (res) {
  856. IPAERR("failed to stop xDCI channel hdl=%d\n",
  857. clnt_hdl);
  858. return res;
  859. }
  860. if (!*stop_in_proc) {
  861. IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl);
  862. return res;
  863. }
  864. /*
  865. * Give chance to the previous stop request to be accomplished
  866. * before the retry
  867. */
  868. udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC);
  869. if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
  870. IPADBG("timeout waiting for xDCI channel emptiness\n");
  871. return res;
  872. }
  873. }
  874. }
  875. /* Clocks should be voted for before invoking this function */
  876. static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
  877. u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl,
  878. bool remove_delay)
  879. {
  880. int result;
  881. bool is_empty = false;
  882. int i;
  883. bool stop_in_proc;
  884. struct ipa3_ep_context *ep;
  885. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  886. IPADBG("entry\n");
  887. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  888. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  889. IPAERR("Bad parameter.\n");
  890. return -EINVAL;
  891. }
  892. ep = &ipa3_ctx->ep[clnt_hdl];
  893. /* first try to stop the channel */
  894. result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
  895. &stop_in_proc);
  896. if (result) {
  897. IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
  898. clnt_hdl, ep->client);
  899. goto exit;
  900. }
  901. if (!stop_in_proc)
  902. goto exit;
  903. if (remove_delay && ep->ep_delay_set) {
  904. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  905. ep_cfg_ctrl.ipa_ep_delay = false;
  906. result = ipa3_cfg_ep_ctrl(clnt_hdl,
  907. &ep_cfg_ctrl);
  908. if (result) {
  909. IPAERR
  910. ("client (ep: %d) failed to remove delay result=%d\n",
  911. clnt_hdl, result);
  912. } else {
  913. IPADBG("client (ep: %d) delay removed\n",
  914. clnt_hdl);
  915. ep->ep_delay_set = false;
  916. }
  917. }
  918. /* if stop_in_proc, lets wait for emptiness */
  919. for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
  920. result = ipa3_is_xdci_channel_empty(ep, &is_empty);
  921. if (result)
  922. goto exit;
  923. if (is_empty)
  924. break;
  925. udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
  926. }
  927. /* In case of empty, lets try to stop the channel again */
  928. if (is_empty) {
  929. result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
  930. &stop_in_proc);
  931. if (result) {
  932. IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
  933. clnt_hdl, ep->client);
  934. goto exit;
  935. }
  936. if (!stop_in_proc)
  937. goto exit;
  938. }
  939. /* if still stop_in_proc or not empty, activate force clear */
  940. if (should_force_clear) {
  941. result = ipa3_enable_force_clear(qmi_req_id, false,
  942. source_pipe_bitmask);
  943. if (result) {
  944. struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
  945. /*
  946. * assuming here modem SSR\shutdown, AP can remove
  947. * the delay in this case
  948. */
  949. IPAERR(
  950. "failed to force clear %d, remove delay from SCND reg\n"
  951. , result);
  952. ep_ctrl_scnd.endp_delay = false;
  953. ipahal_write_reg_n_fields(
  954. IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl,
  955. &ep_ctrl_scnd);
  956. }
  957. }
  958. /* with force clear, wait for emptiness */
  959. for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
  960. result = ipa3_is_xdci_channel_empty(ep, &is_empty);
  961. if (result)
  962. goto disable_force_clear_and_exit;
  963. if (is_empty)
  964. break;
  965. udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
  966. }
  967. /* try to stop for the last time */
  968. result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
  969. &stop_in_proc);
  970. if (result) {
  971. IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
  972. clnt_hdl, ep->client);
  973. goto disable_force_clear_and_exit;
  974. }
  975. result = stop_in_proc ? -EFAULT : 0;
  976. disable_force_clear_and_exit:
  977. if (should_force_clear)
  978. ipa3_disable_force_clear(qmi_req_id);
  979. exit:
  980. if (remove_delay && ep->ep_delay_set) {
  981. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  982. ep_cfg_ctrl.ipa_ep_delay = false;
  983. result = ipa3_cfg_ep_ctrl(clnt_hdl,
  984. &ep_cfg_ctrl);
  985. if (result) {
  986. IPAERR
  987. ("client (ep: %d) failed to remove delay result=%d\n",
  988. clnt_hdl, result);
  989. } else {
  990. IPADBG("client (ep: %d) delay removed\n",
  991. clnt_hdl);
  992. ep->ep_delay_set = false;
  993. }
  994. }
  995. IPADBG("exit\n");
  996. return result;
  997. }
  998. /*
  999. * Set reset ep_delay for CLIENT PROD pipe
  1000. * Clocks, should be voted before calling this API
  1001. * locks should be taken before calling this API
  1002. */
  1003. int ipa3_set_reset_client_prod_pipe_delay(bool set_reset,
  1004. enum ipa_client_type client)
  1005. {
  1006. int result = 0;
  1007. int pipe_idx;
  1008. struct ipa3_ep_context *ep;
  1009. struct ipa_ep_cfg_ctrl ep_ctrl;
  1010. memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1011. ep_ctrl.ipa_ep_delay = set_reset;
  1012. if (IPA_CLIENT_IS_CONS(client)) {
  1013. IPAERR("client (%d) not PROD\n", client);
  1014. return -EINVAL;
  1015. }
  1016. pipe_idx = ipa3_get_ep_mapping(client);
  1017. if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
  1018. IPAERR("client (%d) not valid\n", client);
  1019. return -EINVAL;
  1020. }
  1021. ep = &ipa3_ctx->ep[pipe_idx];
  1022. /* Setting delay on USB_PROD with skip_ep_cfg */
  1023. client_lock_unlock_cb(client, true);
  1024. if (ep->valid && ep->skip_ep_cfg) {
  1025. ep->ep_delay_set = ep_ctrl.ipa_ep_delay;
  1026. result = ipa3_cfg_ep_ctrl(pipe_idx, &ep_ctrl);
  1027. if (result)
  1028. IPAERR("client (ep: %d) failed result=%d\n",
  1029. pipe_idx, result);
  1030. else
  1031. IPADBG("client (ep: %d) success\n", pipe_idx);
  1032. }
  1033. client_lock_unlock_cb(client, false);
  1034. return result;
  1035. }
  1036. static bool ipa3_get_teth_port_status(enum ipa_client_type client)
  1037. {
  1038. enum ipa_client_cb_type client_cb;
  1039. client_cb = ipa_get_client_cb_type(client);
  1040. if (client_cb == IPA_MAX_CLNT)
  1041. return false;
  1042. if (ipa3_ctx->get_teth_port_state[client_cb])
  1043. return ipa3_ctx->get_teth_port_state[client_cb]();
  1044. return false;
  1045. }
  1046. /*
  1047. * Start/stop the CLIENT PROD pipes in SSR scenarios
  1048. */
  1049. int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
  1050. bool start_chnl)
  1051. {
  1052. int result = 0;
  1053. int pipe_idx;
  1054. struct ipa3_ep_context *ep;
  1055. if (IPA_CLIENT_IS_CONS(client)) {
  1056. IPAERR("client (%d) not PROD\n", client);
  1057. return -EINVAL;
  1058. }
  1059. pipe_idx = ipa3_get_ep_mapping(client);
  1060. if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
  1061. IPAERR("client (%d) not valid\n", client);
  1062. return -EINVAL;
  1063. }
  1064. client_lock_unlock_cb(client, true);
  1065. ep = &ipa3_ctx->ep[pipe_idx];
  1066. if (ep->valid && ep->skip_ep_cfg && ipa3_get_teth_port_status(client)) {
  1067. if (start_chnl)
  1068. result = ipa3_start_gsi_channel(pipe_idx);
  1069. else
  1070. result = ipa3_stop_gsi_channel(pipe_idx);
  1071. }
  1072. client_lock_unlock_cb(client, false);
  1073. return result;
  1074. }
  1075. int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset,
  1076. enum ipa_client_type client)
  1077. {
  1078. int pipe_idx;
  1079. struct ipa3_ep_context *ep;
  1080. struct ipa_ep_cfg_ctrl ep_suspend;
  1081. struct ipa_ep_cfg_holb ep_holb;
  1082. memset(&ep_suspend, 0, sizeof(ep_suspend));
  1083. memset(&ep_holb, 0, sizeof(ep_holb));
  1084. ep_suspend.ipa_ep_suspend = set_reset;
  1085. ep_holb.tmr_val = 0;
  1086. ep_holb.en = set_reset;
  1087. if (IPA_CLIENT_IS_PROD(client)) {
  1088. IPAERR("client (%d) not CONS\n", client);
  1089. return -EINVAL;
  1090. }
  1091. pipe_idx = ipa3_get_ep_mapping(client);
  1092. if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
  1093. IPAERR("client (%d) not valid\n", client);
  1094. return -EINVAL;
  1095. }
  1096. ep = &ipa3_ctx->ep[pipe_idx];
  1097. /* Setting sus/holb on MHI_CONS with skip_ep_cfg */
  1098. client_lock_unlock_cb(client, true);
  1099. if (ep->valid && ep->skip_ep_cfg) {
  1100. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
  1101. ipahal_write_reg_n_fields(
  1102. IPA_ENDP_INIT_CTRL_n,
  1103. pipe_idx, &ep_suspend);
  1104. /*
  1105. * ipa3_cfg_ep_holb is not used here because we are
  1106. * setting HOLB on Q6 pipes, and from APPS perspective
  1107. * they are not valid, therefore, the above function
  1108. * will fail.
  1109. */
  1110. ipahal_write_reg_n_fields(
  1111. IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
  1112. pipe_idx, &ep_holb);
  1113. ipahal_write_reg_n_fields(
  1114. IPA_ENDP_INIT_HOL_BLOCK_EN_n,
  1115. pipe_idx, &ep_holb);
  1116. /* IPA4.5 issue requires HOLB_EN to be written twice */
  1117. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
  1118. ipahal_write_reg_n_fields(
  1119. IPA_ENDP_INIT_HOL_BLOCK_EN_n,
  1120. pipe_idx, &ep_holb);
  1121. }
  1122. client_lock_unlock_cb(client, false);
  1123. return 0;
  1124. }
  1125. void ipa3_xdci_ep_delay_rm(u32 clnt_hdl)
  1126. {
  1127. struct ipa3_ep_context *ep;
  1128. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1129. int result;
  1130. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1131. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  1132. IPAERR("bad parm.\n");
  1133. return;
  1134. }
  1135. ep = &ipa3_ctx->ep[clnt_hdl];
  1136. if (ep->ep_delay_set) {
  1137. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1138. ep_cfg_ctrl.ipa_ep_delay = false;
  1139. if (!ep->keep_ipa_awake)
  1140. IPA_ACTIVE_CLIENTS_INC_EP
  1141. (ipa3_get_client_mapping(clnt_hdl));
  1142. result = ipa3_cfg_ep_ctrl(clnt_hdl,
  1143. &ep_cfg_ctrl);
  1144. if (!ep->keep_ipa_awake)
  1145. IPA_ACTIVE_CLIENTS_DEC_EP
  1146. (ipa3_get_client_mapping(clnt_hdl));
  1147. if (result) {
  1148. IPAERR
  1149. ("client (ep: %d) failed to remove delay result=%d\n",
  1150. clnt_hdl, result);
  1151. } else {
  1152. IPADBG("client (ep: %d) delay removed\n",
  1153. clnt_hdl);
  1154. ep->ep_delay_set = false;
  1155. }
  1156. }
  1157. }
  1158. int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
  1159. {
  1160. struct ipa3_ep_context *ep;
  1161. int result;
  1162. u32 source_pipe_bitmask = 0;
  1163. IPADBG("entry\n");
  1164. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1165. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  1166. IPAERR("Bad parameter.\n");
  1167. return -EINVAL;
  1168. }
  1169. ep = &ipa3_ctx->ep[clnt_hdl];
  1170. if (!ep->keep_ipa_awake)
  1171. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  1172. ipa3_disable_data_path(clnt_hdl);
  1173. if (!IPA_CLIENT_IS_CONS(ep->client)) {
  1174. IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
  1175. clnt_hdl, ep->client);
  1176. source_pipe_bitmask = 1 <<
  1177. ipa3_get_ep_mapping(ep->client);
  1178. result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
  1179. source_pipe_bitmask, should_force_clear, clnt_hdl,
  1180. true);
  1181. if (result) {
  1182. IPAERR("Fail to stop UL channel with data drain\n");
  1183. WARN_ON(1);
  1184. goto stop_chan_fail;
  1185. }
  1186. } else {
  1187. IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n",
  1188. clnt_hdl, ep->client);
  1189. result = ipa3_stop_gsi_channel(clnt_hdl);
  1190. if (result) {
  1191. IPAERR("Error stopping channel (CONS client): %d\n",
  1192. result);
  1193. goto stop_chan_fail;
  1194. }
  1195. }
  1196. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  1197. IPADBG("exit\n");
  1198. return 0;
  1199. stop_chan_fail:
  1200. if (!ep->keep_ipa_awake)
  1201. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  1202. return result;
  1203. }
  1204. int ipa3_release_gsi_channel(u32 clnt_hdl)
  1205. {
  1206. struct ipa3_ep_context *ep;
  1207. int result = -EFAULT;
  1208. enum gsi_status gsi_res;
  1209. IPADBG("entry\n");
  1210. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1211. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  1212. IPAERR("Bad parameter.\n");
  1213. return -EINVAL;
  1214. }
  1215. ep = &ipa3_ctx->ep[clnt_hdl];
  1216. if (!ep->keep_ipa_awake)
  1217. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  1218. gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
  1219. if (gsi_res != GSI_STATUS_SUCCESS) {
  1220. IPAERR("Error deallocating channel: %d\n", gsi_res);
  1221. goto dealloc_chan_fail;
  1222. }
  1223. gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  1224. if (gsi_res != GSI_STATUS_SUCCESS) {
  1225. IPAERR("Error deallocating event: %d\n", gsi_res);
  1226. goto dealloc_chan_fail;
  1227. }
  1228. if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client))
  1229. ipa3_delete_dflt_flt_rules(clnt_hdl);
  1230. if (!ep->keep_ipa_awake)
  1231. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  1232. memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
  1233. IPADBG("exit\n");
  1234. return 0;
  1235. dealloc_chan_fail:
  1236. if (!ep->keep_ipa_awake)
  1237. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  1238. return result;
  1239. }
  1240. int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
  1241. bool should_force_clear, u32 qmi_req_id, bool is_dpl)
  1242. {
  1243. struct ipa3_ep_context *ul_ep = NULL;
  1244. struct ipa3_ep_context *dl_ep;
  1245. int result = -EFAULT;
  1246. u32 source_pipe_bitmask = 0;
  1247. bool dl_data_pending = true;
  1248. bool ul_data_pending = true;
  1249. int i;
  1250. bool is_empty = false;
  1251. struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info;
  1252. int aggr_active_bitmap = 0;
  1253. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1254. /* In case of DPL, dl is the DPL channel/client */
  1255. IPADBG("entry\n");
  1256. if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1257. ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
  1258. (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1259. ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
  1260. IPAERR("Bad parameter.\n");
  1261. return -EINVAL;
  1262. }
  1263. dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
  1264. if (!is_dpl)
  1265. ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
  1266. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
  1267. result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info,
  1268. dl_ep->gsi_chan_hdl);
  1269. if (result)
  1270. goto disable_clk_and_exit;
  1271. if (!is_dpl) {
  1272. result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
  1273. ul_ep->gsi_chan_hdl);
  1274. if (result)
  1275. goto disable_clk_and_exit;
  1276. }
  1277. for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
  1278. if (!dl_data_pending && !ul_data_pending)
  1279. break;
  1280. result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
  1281. if (result)
  1282. goto disable_clk_and_exit;
  1283. if (!is_empty) {
  1284. dl_data_pending = true;
  1285. break;
  1286. }
  1287. dl_data_pending = false;
  1288. if (!is_dpl) {
  1289. result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty);
  1290. if (result)
  1291. goto disable_clk_and_exit;
  1292. ul_data_pending = !is_empty;
  1293. } else {
  1294. ul_data_pending = false;
  1295. }
  1296. udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
  1297. }
  1298. if (!dl_data_pending) {
  1299. aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
  1300. if (aggr_active_bitmap & (1 << dl_clnt_hdl)) {
  1301. IPADBG("DL/DPL data pending due to open aggr. frame\n");
  1302. dl_data_pending = true;
  1303. }
  1304. }
  1305. if (dl_data_pending) {
  1306. IPAERR("DL/DPL data pending, can't suspend\n");
  1307. result = -EFAULT;
  1308. goto disable_clk_and_exit;
  1309. }
  1310. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  1311. /* Suspend the DL/DPL EP */
  1312. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1313. ep_cfg_ctrl.ipa_ep_suspend = true;
  1314. ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
  1315. }
  1316. /*
  1317. * Check if DL/DPL channel is empty again, data could enter the channel
  1318. * before its IPA EP was suspended
  1319. */
  1320. result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
  1321. if (result)
  1322. goto unsuspend_dl_and_exit;
  1323. if (!is_empty) {
  1324. IPAERR("DL/DPL data pending, can't suspend\n");
  1325. result = -EFAULT;
  1326. goto unsuspend_dl_and_exit;
  1327. }
  1328. /* Stop DL channel */
  1329. result = ipa3_stop_gsi_channel(dl_clnt_hdl);
  1330. if (result) {
  1331. IPAERR("Error stopping DL/DPL channel: %d\n", result);
  1332. result = -EFAULT;
  1333. goto unsuspend_dl_and_exit;
  1334. }
  1335. /* STOP UL channel */
  1336. if (!is_dpl) {
  1337. source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
  1338. result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
  1339. source_pipe_bitmask, should_force_clear, ul_clnt_hdl,
  1340. false);
  1341. if (result) {
  1342. IPAERR("Error stopping UL channel: result = %d\n",
  1343. result);
  1344. goto start_dl_and_exit;
  1345. }
  1346. }
  1347. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
  1348. IPADBG("exit\n");
  1349. return 0;
  1350. start_dl_and_exit:
  1351. gsi_start_channel(dl_ep->gsi_chan_hdl);
  1352. ipa3_start_gsi_debug_monitor(dl_clnt_hdl);
  1353. unsuspend_dl_and_exit:
  1354. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  1355. /* Unsuspend the DL EP */
  1356. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1357. ep_cfg_ctrl.ipa_ep_suspend = false;
  1358. ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
  1359. }
  1360. disable_clk_and_exit:
  1361. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
  1362. return result;
  1363. }
  1364. int ipa3_start_gsi_channel(u32 clnt_hdl)
  1365. {
  1366. struct ipa3_ep_context *ep;
  1367. int result = -EFAULT;
  1368. enum gsi_status gsi_res;
  1369. enum ipa_client_type client_type;
  1370. IPADBG("entry\n");
  1371. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1372. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  1373. IPAERR("Bad parameters.\n");
  1374. return -EINVAL;
  1375. }
  1376. ep = &ipa3_ctx->ep[clnt_hdl];
  1377. client_type = ipa3_get_client_mapping(clnt_hdl);
  1378. if (!ep->keep_ipa_awake)
  1379. IPA_ACTIVE_CLIENTS_INC_EP(client_type);
  1380. gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
  1381. if (gsi_res != GSI_STATUS_SUCCESS) {
  1382. IPAERR("Error starting channel: %d\n", gsi_res);
  1383. goto start_chan_fail;
  1384. }
  1385. ipa3_start_gsi_debug_monitor(clnt_hdl);
  1386. if (!ep->keep_ipa_awake)
  1387. IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
  1388. IPADBG("exit\n");
  1389. return 0;
  1390. start_chan_fail:
  1391. if (!ep->keep_ipa_awake)
  1392. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  1393. return result;
  1394. }
  1395. int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
  1396. {
  1397. struct ipa3_ep_context *ul_ep = NULL;
  1398. struct ipa3_ep_context *dl_ep = NULL;
  1399. enum gsi_status gsi_res;
  1400. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1401. /* In case of DPL, dl is the DPL channel/client */
  1402. IPADBG("entry\n");
  1403. if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1404. ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
  1405. (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1406. ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
  1407. IPAERR("Bad parameter.\n");
  1408. return -EINVAL;
  1409. }
  1410. dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
  1411. if (!is_dpl)
  1412. ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
  1413. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
  1414. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  1415. /* Unsuspend the DL/DPL EP */
  1416. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1417. ep_cfg_ctrl.ipa_ep_suspend = false;
  1418. ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
  1419. }
  1420. /* Start DL channel */
  1421. gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl);
  1422. if (gsi_res != GSI_STATUS_SUCCESS)
  1423. IPAERR("Error starting DL channel: %d\n", gsi_res);
  1424. ipa3_start_gsi_debug_monitor(dl_clnt_hdl);
  1425. /* Start UL channel */
  1426. if (!is_dpl) {
  1427. gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
  1428. if (gsi_res != GSI_STATUS_SUCCESS)
  1429. IPAERR("Error starting UL channel: %d\n", gsi_res);
  1430. ipa3_start_gsi_debug_monitor(ul_clnt_hdl);
  1431. }
  1432. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
  1433. IPADBG("exit\n");
  1434. return 0;
  1435. }
  1436. /**
  1437. * ipa3_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before
  1438. * client disconnect.
  1439. * @clnt_hdl: [in] opaque client handle assigned by IPA to client
  1440. *
  1441. * Should be called by the driver of the peripheral that wants to remove
  1442. * ep delay on IPA consumer ipe before disconnect in non GPI mode. this api
  1443. * expects caller to take responsibility to free any needed headers, routing
  1444. * and filtering tables and rules as needed.
  1445. *
  1446. * Returns: 0 on success, negative on failure
  1447. *
  1448. * Note: Should not be called from atomic context
  1449. */
  1450. int ipa3_clear_endpoint_delay(u32 clnt_hdl)
  1451. {
  1452. struct ipa3_ep_context *ep;
  1453. struct ipa_ep_cfg_ctrl ep_ctrl = {0};
  1454. struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0};
  1455. int res;
  1456. if (unlikely(!ipa3_ctx)) {
  1457. IPAERR("IPA driver was not initialized\n");
  1458. return -EINVAL;
  1459. }
  1460. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1461. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  1462. IPAERR("bad parm.\n");
  1463. return -EINVAL;
  1464. }
  1465. ep = &ipa3_ctx->ep[clnt_hdl];
  1466. if (!ipa3_ctx->tethered_flow_control) {
  1467. IPADBG("APPS flow control is not enabled\n");
  1468. /* Send a message to modem to disable flow control honoring. */
  1469. req.request_id = clnt_hdl;
  1470. req.source_pipe_bitmask = 1 << clnt_hdl;
  1471. res = ipa3_qmi_enable_force_clear_datapath_send(&req);
  1472. if (res) {
  1473. IPADBG("enable_force_clear_datapath failed %d\n",
  1474. res);
  1475. }
  1476. ep->qmi_request_sent = true;
  1477. }
  1478. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  1479. /* Set disconnect in progress flag so further flow control events are
  1480. * not honored.
  1481. */
  1482. spin_lock(&ipa3_ctx->disconnect_lock);
  1483. ep->disconnect_in_progress = true;
  1484. spin_unlock(&ipa3_ctx->disconnect_lock);
  1485. /* If flow is disabled at this point, restore the ep state.*/
  1486. ep_ctrl.ipa_ep_delay = false;
  1487. ep_ctrl.ipa_ep_suspend = false;
  1488. ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
  1489. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  1490. IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
  1491. return 0;
  1492. }