ipa_uc_ntn.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #define IPA_UC_NTN_DB_PA_TX 0x79620DC
  7. #define IPA_UC_NTN_DB_PA_RX 0x79620D8
  8. static void ipa3_uc_ntn_event_log_info_handler(
  9. struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
  10. {
  11. struct Ipa3HwEventInfoData_t *statsPtr = &uc_event_top_mmio->statsInfo;
  12. if ((uc_event_top_mmio->protocolMask &
  13. (1 << IPA_HW_PROTOCOL_ETH)) == 0) {
  14. IPAERR("NTN protocol missing 0x%x\n",
  15. uc_event_top_mmio->protocolMask);
  16. return;
  17. }
  18. if (statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.size !=
  19. sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
  20. IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
  21. sizeof(struct Ipa3HwStatsNTNInfoData_t),
  22. statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.size);
  23. return;
  24. }
  25. ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst =
  26. uc_event_top_mmio->statsInfo.baseAddrOffset +
  27. statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.offset;
  28. IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
  29. if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
  30. sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
  31. ipa3_ctx->ctrl->ipa_reg_base_ofst +
  32. ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) +
  33. ipa3_ctx->smem_sz) {
  34. IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
  35. ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
  36. return;
  37. }
  38. ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
  39. ioremap(ipa3_ctx->ipa_wrapper_base +
  40. ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
  41. sizeof(struct Ipa3HwStatsNTNInfoData_t));
  42. if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
  43. IPAERR("fail to ioremap uc ntn stats\n");
  44. return;
  45. }
  46. }
  47. /**
  48. * ipa2_get_wdi_stats() - Query WDI statistics from uc
  49. * @stats: [inout] stats blob from client populated by driver
  50. *
  51. * Returns: 0 on success, negative on failure
  52. *
  53. * @note Cannot be called from atomic context
  54. *
  55. */
  56. int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
  57. {
  58. #define TX_STATS(y) stats->tx_ch_stats[0].y = \
  59. ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
  60. #define RX_STATS(y) stats->rx_ch_stats[0].y = \
  61. ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
  62. if (unlikely(!ipa3_ctx)) {
  63. IPAERR("IPA driver was not initialized\n");
  64. return -EINVAL;
  65. }
  66. if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
  67. IPAERR("bad parms stats=%pK ntn_stats=%pK\n",
  68. stats,
  69. ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
  70. return -EINVAL;
  71. }
  72. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  73. TX_STATS(num_pkts_processed);
  74. TX_STATS(ring_stats.ringFull);
  75. TX_STATS(ring_stats.ringEmpty);
  76. TX_STATS(ring_stats.ringUsageHigh);
  77. TX_STATS(ring_stats.ringUsageLow);
  78. TX_STATS(ring_stats.RingUtilCount);
  79. TX_STATS(gsi_stats.bamFifoFull);
  80. TX_STATS(gsi_stats.bamFifoEmpty);
  81. TX_STATS(gsi_stats.bamFifoUsageHigh);
  82. TX_STATS(gsi_stats.bamFifoUsageLow);
  83. TX_STATS(gsi_stats.bamUtilCount);
  84. TX_STATS(num_db);
  85. TX_STATS(num_qmb_int_handled);
  86. TX_STATS(ipa_pipe_number);
  87. RX_STATS(num_pkts_processed);
  88. RX_STATS(ring_stats.ringFull);
  89. RX_STATS(ring_stats.ringEmpty);
  90. RX_STATS(ring_stats.ringUsageHigh);
  91. RX_STATS(ring_stats.ringUsageLow);
  92. RX_STATS(ring_stats.RingUtilCount);
  93. RX_STATS(gsi_stats.bamFifoFull);
  94. RX_STATS(gsi_stats.bamFifoEmpty);
  95. RX_STATS(gsi_stats.bamFifoUsageHigh);
  96. RX_STATS(gsi_stats.bamFifoUsageLow);
  97. RX_STATS(gsi_stats.bamUtilCount);
  98. RX_STATS(num_db);
  99. RX_STATS(num_qmb_int_handled);
  100. RX_STATS(ipa_pipe_number);
  101. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  102. return 0;
  103. }
  104. int ipa3_ntn_uc_reg_rdyCB(void (*ipa_ready_cb)(void *), void *user_data)
  105. {
  106. int ret;
  107. if (!ipa3_ctx) {
  108. IPAERR("IPA ctx is null\n");
  109. return -ENXIO;
  110. }
  111. ret = ipa3_uc_state_check();
  112. if (ret) {
  113. ipa3_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
  114. ipa3_ctx->uc_ntn_ctx.priv = user_data;
  115. return 0;
  116. }
  117. return -EEXIST;
  118. }
  119. void ipa3_ntn_uc_dereg_rdyCB(void)
  120. {
  121. ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL;
  122. ipa3_ctx->uc_ntn_ctx.priv = NULL;
  123. }
  124. static void ipa3_uc_ntn_loaded_handler(void)
  125. {
  126. if (!ipa3_ctx) {
  127. IPAERR("IPA ctx is null\n");
  128. return;
  129. }
  130. if (ipa3_ctx->uc_ntn_ctx.uc_ready_cb) {
  131. ipa3_ctx->uc_ntn_ctx.uc_ready_cb(
  132. ipa3_ctx->uc_ntn_ctx.priv);
  133. ipa3_ctx->uc_ntn_ctx.uc_ready_cb =
  134. NULL;
  135. ipa3_ctx->uc_ntn_ctx.priv = NULL;
  136. }
  137. }
  138. int ipa3_ntn_init(void)
  139. {
  140. struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
  141. uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
  142. ipa3_uc_ntn_event_log_info_handler;
  143. uc_ntn_cbs.ipa_uc_loaded_hdlr =
  144. ipa3_uc_ntn_loaded_handler;
  145. ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
  146. return 0;
  147. }
  148. static int ipa3_uc_send_ntn_setup_pipe_cmd(
  149. struct ipa_ntn_setup_info *ntn_info, u8 dir)
  150. {
  151. int ipa_ep_idx;
  152. int result = 0;
  153. struct ipa_mem_buffer cmd;
  154. struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
  155. struct IpaHwOffloadSetUpCmdData_t *cmd_data;
  156. struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data_v4_0;
  157. if (ntn_info == NULL) {
  158. IPAERR("invalid input\n");
  159. return -EINVAL;
  160. }
  161. ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
  162. if (ipa_ep_idx == -1) {
  163. IPAERR("fail to get ep idx.\n");
  164. return -EFAULT;
  165. }
  166. IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
  167. IPADBG("ring_base_pa = 0x%pa\n",
  168. &ntn_info->ring_base_pa);
  169. IPADBG("ring_base_iova = 0x%pa\n",
  170. &ntn_info->ring_base_iova);
  171. IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
  172. IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
  173. IPADBG("buff_pool_base_iova = 0x%pa\n", &ntn_info->buff_pool_base_iova);
  174. IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
  175. IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
  176. IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
  177. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
  178. cmd.size = sizeof(*cmd_data_v4_0);
  179. else
  180. cmd.size = sizeof(*cmd_data);
  181. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  182. &cmd.phys_base, GFP_KERNEL);
  183. if (cmd.base == NULL) {
  184. IPAERR("fail to get DMA memory.\n");
  185. return -ENOMEM;
  186. }
  187. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  188. cmd_data_v4_0 = (struct IpaHwOffloadSetUpCmdData_t_v4_0 *)
  189. cmd.base;
  190. cmd_data_v4_0->protocol = IPA_HW_PROTOCOL_ETH;
  191. Ntn_params = &cmd_data_v4_0->SetupCh_params.NtnSetupCh_params;
  192. } else {
  193. cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
  194. cmd_data->protocol = IPA_HW_PROTOCOL_ETH;
  195. Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
  196. }
  197. if (ntn_info->smmu_enabled) {
  198. Ntn_params->ring_base_pa = (u32)ntn_info->ring_base_iova;
  199. Ntn_params->buff_pool_base_pa =
  200. (u32)ntn_info->buff_pool_base_iova;
  201. } else {
  202. Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
  203. Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
  204. }
  205. Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
  206. Ntn_params->num_buffers = ntn_info->num_buffers;
  207. Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
  208. Ntn_params->data_buff_size = ntn_info->data_buff_size;
  209. Ntn_params->ipa_pipe_number = ipa_ep_idx;
  210. Ntn_params->dir = dir;
  211. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  212. IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
  213. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  214. false, 10*HZ);
  215. if (result)
  216. result = -EFAULT;
  217. dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
  218. return result;
  219. }
  220. static int ipa3_smmu_map_uc_ntn_pipes(struct ipa_ntn_setup_info *params,
  221. bool map)
  222. {
  223. struct iommu_domain *smmu_domain;
  224. int result;
  225. int i;
  226. u64 iova;
  227. phys_addr_t pa;
  228. u64 iova_p;
  229. phys_addr_t pa_p;
  230. u32 size_p;
  231. if (params->data_buff_size > PAGE_SIZE) {
  232. IPAERR("invalid data buff size\n");
  233. return -EINVAL;
  234. }
  235. result = ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa,
  236. PAGE_SIZE), map, IPA_SMMU_CB_UC);
  237. if (result) {
  238. IPAERR("failed to %s uC regs %d\n",
  239. map ? "map" : "unmap", result);
  240. goto fail;
  241. }
  242. if (params->smmu_enabled) {
  243. IPADBG("smmu is enabled on EMAC\n");
  244. result = ipa3_smmu_map_peer_buff((u64)params->ring_base_iova,
  245. params->ntn_ring_size, map, params->ring_base_sgt,
  246. IPA_SMMU_CB_UC);
  247. if (result) {
  248. IPAERR("failed to %s ntn ring %d\n",
  249. map ? "map" : "unmap", result);
  250. goto fail_map_ring;
  251. }
  252. result = ipa3_smmu_map_peer_buff(
  253. (u64)params->buff_pool_base_iova,
  254. params->num_buffers * 4, map,
  255. params->buff_pool_base_sgt, IPA_SMMU_CB_UC);
  256. if (result) {
  257. IPAERR("failed to %s pool buffs %d\n",
  258. map ? "map" : "unmap", result);
  259. goto fail_map_buffer_smmu_enabled;
  260. }
  261. } else {
  262. IPADBG("smmu is disabled on EMAC\n");
  263. result = ipa3_smmu_map_peer_buff((u64)params->ring_base_pa,
  264. params->ntn_ring_size, map, NULL, IPA_SMMU_CB_UC);
  265. if (result) {
  266. IPAERR("failed to %s ntn ring %d\n",
  267. map ? "map" : "unmap", result);
  268. goto fail_map_ring;
  269. }
  270. result = ipa3_smmu_map_peer_buff(params->buff_pool_base_pa,
  271. params->num_buffers * 4, map, NULL, IPA_SMMU_CB_UC);
  272. if (result) {
  273. IPAERR("failed to %s pool buffs %d\n",
  274. map ? "map" : "unmap", result);
  275. goto fail_map_buffer_smmu_disabled;
  276. }
  277. }
  278. if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
  279. IPADBG("AP SMMU is set to s1 bypass\n");
  280. return 0;
  281. }
  282. smmu_domain = ipa3_get_smmu_domain();
  283. if (!smmu_domain) {
  284. IPAERR("invalid smmu domain\n");
  285. return -EINVAL;
  286. }
  287. for (i = 0; i < params->num_buffers; i++) {
  288. iova = (u64)params->data_buff_list[i].iova;
  289. pa = (phys_addr_t)params->data_buff_list[i].pa;
  290. IPA_SMMU_ROUND_TO_PAGE(iova, pa, params->data_buff_size, iova_p,
  291. pa_p, size_p);
  292. IPADBG("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" :
  293. "unmapping", iova_p, &pa_p, size_p);
  294. if (map) {
  295. result = ipa3_iommu_map(smmu_domain, iova_p, pa_p,
  296. size_p, IOMMU_READ | IOMMU_WRITE);
  297. if (result)
  298. IPAERR("Fail to map 0x%llx\n", iova);
  299. } else {
  300. result = iommu_unmap(smmu_domain, iova_p, size_p);
  301. if (result != params->data_buff_size)
  302. IPAERR("Fail to unmap 0x%llx\n", iova);
  303. }
  304. if (result) {
  305. if (params->smmu_enabled)
  306. goto fail_map_data_buff_smmu_enabled;
  307. else
  308. goto fail_map_data_buff_smmu_disabled;
  309. }
  310. }
  311. return 0;
  312. fail_map_data_buff_smmu_enabled:
  313. ipa3_smmu_map_peer_buff((u64)params->buff_pool_base_iova,
  314. params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC);
  315. goto fail_map_buffer_smmu_enabled;
  316. fail_map_data_buff_smmu_disabled:
  317. ipa3_smmu_map_peer_buff(params->buff_pool_base_pa,
  318. params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC);
  319. goto fail_map_buffer_smmu_disabled;
  320. fail_map_buffer_smmu_enabled:
  321. ipa3_smmu_map_peer_buff((u64)params->ring_base_iova,
  322. params->ntn_ring_size, !map, params->ring_base_sgt,
  323. IPA_SMMU_CB_UC);
  324. goto fail_map_ring;
  325. fail_map_buffer_smmu_disabled:
  326. ipa3_smmu_map_peer_buff((u64)params->ring_base_pa,
  327. params->ntn_ring_size, !map, NULL, IPA_SMMU_CB_UC);
  328. fail_map_ring:
  329. ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa,
  330. PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  331. fail:
  332. return result;
  333. }
  334. /**
  335. * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
  336. */
  337. int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
  338. ipa_notify_cb notify, void *priv, u8 hdr_len,
  339. struct ipa_ntn_conn_out_params *outp)
  340. {
  341. struct ipa3_ep_context *ep_ul;
  342. struct ipa3_ep_context *ep_dl;
  343. int ipa_ep_idx_ul;
  344. int ipa_ep_idx_dl;
  345. int result = 0;
  346. if (in == NULL) {
  347. IPAERR("invalid input\n");
  348. return -EINVAL;
  349. }
  350. ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
  351. if (ipa_ep_idx_ul == IPA_EP_NOT_ALLOCATED ||
  352. ipa_ep_idx_ul >= IPA3_MAX_NUM_PIPES) {
  353. IPAERR("fail to alloc UL EP ipa_ep_idx_ul=%d\n",
  354. ipa_ep_idx_ul);
  355. return -EFAULT;
  356. }
  357. ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
  358. if (ipa_ep_idx_dl == IPA_EP_NOT_ALLOCATED ||
  359. ipa_ep_idx_dl >= IPA3_MAX_NUM_PIPES) {
  360. IPAERR("fail to alloc DL EP ipa_ep_idx_dl=%d\n",
  361. ipa_ep_idx_dl);
  362. return -EFAULT;
  363. }
  364. ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
  365. ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
  366. if (ep_ul->valid || ep_dl->valid) {
  367. IPAERR("EP already allocated ul:%d dl:%d\n",
  368. ep_ul->valid, ep_dl->valid);
  369. return -EFAULT;
  370. }
  371. memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
  372. memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
  373. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  374. /* setup ul ep cfg */
  375. ep_ul->valid = 1;
  376. ep_ul->client = in->ul.client;
  377. ep_ul->client_notify = notify;
  378. ep_ul->priv = priv;
  379. memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
  380. ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
  381. ep_ul->cfg.hdr.hdr_len = hdr_len;
  382. ep_ul->cfg.mode.mode = IPA_BASIC;
  383. if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
  384. IPAERR("fail to setup ul pipe cfg\n");
  385. result = -EFAULT;
  386. goto fail;
  387. }
  388. result = ipa3_smmu_map_uc_ntn_pipes(&in->ul, true);
  389. if (result) {
  390. IPAERR("failed to map SMMU for UL %d\n", result);
  391. goto fail;
  392. }
  393. result = ipa3_enable_data_path(ipa_ep_idx_ul);
  394. if (result) {
  395. IPAERR("Enable data path failed res=%d pipe=%d.\n", result,
  396. ipa_ep_idx_ul);
  397. result = -EFAULT;
  398. goto fail_smmu_unmap_ul;
  399. }
  400. if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
  401. IPAERR("fail to send cmd to uc for ul pipe\n");
  402. result = -EFAULT;
  403. goto fail_disable_dp_ul;
  404. }
  405. ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
  406. outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
  407. ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
  408. IPADBG("client %d (ep: %d) connected\n", in->ul.client,
  409. ipa_ep_idx_ul);
  410. /* setup dl ep cfg */
  411. ep_dl->valid = 1;
  412. ep_dl->client = in->dl.client;
  413. memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
  414. ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
  415. ep_dl->cfg.hdr.hdr_len = hdr_len;
  416. ep_dl->cfg.mode.mode = IPA_BASIC;
  417. if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
  418. IPAERR("fail to setup dl pipe cfg\n");
  419. result = -EFAULT;
  420. goto fail_disable_dp_ul;
  421. }
  422. result = ipa3_smmu_map_uc_ntn_pipes(&in->dl, true);
  423. if (result) {
  424. IPAERR("failed to map SMMU for DL %d\n", result);
  425. goto fail_disable_dp_ul;
  426. }
  427. result = ipa3_enable_data_path(ipa_ep_idx_dl);
  428. if (result) {
  429. IPAERR("Enable data path failed res=%d pipe=%d.\n", result,
  430. ipa_ep_idx_dl);
  431. result = -EFAULT;
  432. goto fail_smmu_unmap_dl;
  433. }
  434. if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
  435. IPAERR("fail to send cmd to uc for dl pipe\n");
  436. result = -EFAULT;
  437. goto fail_disable_dp_dl;
  438. }
  439. outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
  440. ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
  441. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  442. IPADBG("client %d (ep: %d) connected\n", in->dl.client,
  443. ipa_ep_idx_dl);
  444. return 0;
  445. fail_disable_dp_dl:
  446. ipa3_disable_data_path(ipa_ep_idx_dl);
  447. fail_smmu_unmap_dl:
  448. ipa3_smmu_map_uc_ntn_pipes(&in->dl, false);
  449. fail_disable_dp_ul:
  450. ipa3_disable_data_path(ipa_ep_idx_ul);
  451. fail_smmu_unmap_ul:
  452. ipa3_smmu_map_uc_ntn_pipes(&in->ul, false);
  453. fail:
  454. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  455. return result;
  456. }
  457. /**
  458. * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
  459. */
  460. int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
  461. int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params)
  462. {
  463. struct ipa_mem_buffer cmd;
  464. struct ipa3_ep_context *ep_ul, *ep_dl;
  465. struct IpaHwOffloadCommonChCmdData_t *cmd_data;
  466. struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data_v4_0;
  467. union Ipa3HwNtnCommonChCmdData_t *tear;
  468. int result = 0;
  469. IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
  470. IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
  471. ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
  472. ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
  473. if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
  474. ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
  475. IPAERR("channel bad state: ul %d dl %d\n",
  476. ep_ul->uc_offload_state, ep_dl->uc_offload_state);
  477. return -EFAULT;
  478. }
  479. atomic_set(&ep_ul->disconnect_in_progress, 1);
  480. atomic_set(&ep_dl->disconnect_in_progress, 1);
  481. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
  482. cmd.size = sizeof(*cmd_data_v4_0);
  483. else
  484. cmd.size = sizeof(*cmd_data);
  485. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  486. &cmd.phys_base, GFP_KERNEL);
  487. if (cmd.base == NULL) {
  488. IPAERR("fail to get DMA memory.\n");
  489. return -ENOMEM;
  490. }
  491. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  492. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  493. cmd_data_v4_0 = (struct IpaHwOffloadCommonChCmdData_t_v4_0 *)
  494. cmd.base;
  495. cmd_data_v4_0->protocol = IPA_HW_PROTOCOL_ETH;
  496. tear = &cmd_data_v4_0->CommonCh_params.NtnCommonCh_params;
  497. } else {
  498. cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
  499. cmd_data->protocol = IPA_HW_PROTOCOL_ETH;
  500. tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
  501. }
  502. /* teardown the DL pipe */
  503. ipa3_disable_data_path(ipa_ep_idx_dl);
  504. /*
  505. * Reset ep before sending cmd otherwise disconnect
  506. * during data transfer will result into
  507. * enormous suspend interrupts
  508. */
  509. memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
  510. IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
  511. tear->params.ipa_pipe_number = ipa_ep_idx_dl;
  512. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  513. IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
  514. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  515. false, 10*HZ);
  516. if (result) {
  517. IPAERR("fail to tear down dl pipe\n");
  518. result = -EFAULT;
  519. goto fail;
  520. }
  521. /* unmap the DL pipe */
  522. result = ipa3_smmu_map_uc_ntn_pipes(&params->dl, false);
  523. if (result) {
  524. IPAERR("failed to unmap SMMU for DL %d\n", result);
  525. goto fail;
  526. }
  527. /* teardown the UL pipe */
  528. ipa3_disable_data_path(ipa_ep_idx_ul);
  529. tear->params.ipa_pipe_number = ipa_ep_idx_ul;
  530. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  531. IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
  532. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  533. false, 10*HZ);
  534. if (result) {
  535. IPAERR("fail to tear down ul pipe\n");
  536. result = -EFAULT;
  537. goto fail;
  538. }
  539. /* unmap the UL pipe */
  540. result = ipa3_smmu_map_uc_ntn_pipes(&params->ul, false);
  541. if (result) {
  542. IPAERR("failed to unmap SMMU for UL %d\n", result);
  543. goto fail;
  544. }
  545. ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
  546. memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
  547. IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
  548. fail:
  549. dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
  550. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  551. return result;
  552. }