dp_ipa.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. /*
  2. * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #ifdef IPA_OFFLOAD
  17. #include <qdf_ipa_wdi3.h>
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_api.h>
  21. #include <hif.h>
  22. #include <htt.h>
  23. #include <wdi_event.h>
  24. #include <queue.h>
  25. #include "dp_types.h"
  26. #include "dp_htt.h"
  27. #include "dp_tx.h"
  28. #include "dp_ipa.h"
  29. /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
  30. #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048)
  31. /**
  32. * dp_tx_ipa_uc_detach - Free autonomy TX resources
  33. * @soc: data path instance
  34. * @pdev: core txrx pdev context
  35. *
  36. * Free allocated TX buffers with WBM SRNG
  37. *
  38. * Return: none
  39. */
  40. static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  41. {
  42. int idx;
  43. uint32_t ring_base_align = 8;
  44. /*
  45. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  46. * unsigned int uc_tx_buf_sz =
  47. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  48. */
  49. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  50. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  51. for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
  52. if (soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]) {
  53. qdf_mem_free_consistent(
  54. soc->osdev, soc->osdev->dev,
  55. alloc_size,
  56. soc->ipa_uc_tx_rsc.
  57. tx_buf_pool_vaddr_unaligned[idx],
  58. soc->ipa_uc_tx_rsc.
  59. tx_buf_pool_paddr_unaligned[idx],
  60. 0);
  61. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
  62. (void *)NULL;
  63. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned[idx] =
  64. (qdf_dma_addr_t)NULL;
  65. }
  66. }
  67. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  68. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
  69. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned);
  70. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned = NULL;
  71. }
  72. /**
  73. * dp_rx_ipa_uc_detach - free autonomy RX resources
  74. * @soc: data path instance
  75. * @pdev: core txrx pdev context
  76. *
  77. * This function will detach DP RX into main device context
  78. * will free DP Rx resources.
  79. *
  80. * Return: none
  81. */
  82. static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  83. {
  84. }
  85. int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  86. {
  87. /* TX resource detach */
  88. dp_tx_ipa_uc_detach(soc, pdev);
  89. /* RX resource detach */
  90. dp_rx_ipa_uc_detach(soc, pdev);
  91. return QDF_STATUS_SUCCESS; /* success */
  92. }
  93. /**
  94. * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
  95. * @soc: data path instance
  96. * @pdev: Physical device handle
  97. *
  98. * Allocate TX buffer from non-cacheable memory
  99. * Attache allocated TX buffers with WBM SRNG
  100. *
  101. * Return: int
  102. */
  103. static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  104. {
  105. uint32_t tx_buffer_count;
  106. uint32_t ring_base_align = 8;
  107. void *buffer_vaddr_unaligned;
  108. void *buffer_vaddr;
  109. qdf_dma_addr_t buffer_paddr_unaligned;
  110. qdf_dma_addr_t buffer_paddr;
  111. struct hal_srng *wbm_srng =
  112. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  113. struct hal_srng_params srng_params;
  114. uint32_t paddr_lo;
  115. uint32_t paddr_hi;
  116. void *ring_entry;
  117. int num_entries;
  118. int retval = QDF_STATUS_SUCCESS;
  119. /*
  120. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  121. * unsigned int uc_tx_buf_sz =
  122. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  123. */
  124. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  125. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  126. hal_get_srng_params(soc->hal_soc, (void *)wbm_srng, &srng_params);
  127. num_entries = srng_params.num_entries;
  128. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  129. "%s: requested %d buffers to be posted to wbm ring",
  130. __func__, num_entries);
  131. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
  132. qdf_mem_malloc(num_entries *
  133. sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
  134. if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
  135. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  136. "%s: IPA WBM Ring Tx buf pool vaddr alloc fail",
  137. __func__);
  138. return -ENOMEM;
  139. }
  140. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned =
  141. qdf_mem_malloc(num_entries *
  142. sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned));
  143. if (!soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned) {
  144. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  145. "%s: IPA WBM Ring Tx buf pool paddr alloc fail",
  146. __func__);
  147. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  148. return -ENOMEM;
  149. }
  150. hal_srng_access_start(soc->hal_soc, (void *)wbm_srng);
  151. /*
  152. * Allocate Tx buffers as many as possible
  153. * Populate Tx buffers into WBM2IPA ring
  154. * This initial buffer population will simulate H/W as source ring,
  155. * and update HP
  156. */
  157. for (tx_buffer_count = 0;
  158. tx_buffer_count < num_entries - 1; tx_buffer_count++) {
  159. buffer_vaddr_unaligned = qdf_mem_alloc_consistent(soc->osdev,
  160. soc->osdev->dev, alloc_size, &buffer_paddr_unaligned);
  161. if (!buffer_vaddr_unaligned)
  162. break;
  163. ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
  164. (void *)wbm_srng);
  165. if (!ring_entry) {
  166. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  167. "%s: Failed to get WBM ring entry\n",
  168. __func__);
  169. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  170. alloc_size, buffer_vaddr_unaligned,
  171. buffer_paddr_unaligned, 0);
  172. break;
  173. }
  174. buffer_vaddr = (void *)qdf_align((unsigned long)
  175. buffer_vaddr_unaligned, ring_base_align);
  176. buffer_paddr = buffer_paddr_unaligned +
  177. ((unsigned long)(buffer_vaddr) -
  178. (unsigned long)buffer_vaddr_unaligned);
  179. paddr_lo = ((u64)buffer_paddr & 0x00000000ffffffff);
  180. paddr_hi = ((u64)buffer_paddr & 0x0000001f00000000) >> 32;
  181. HAL_WBM_PADDR_LO_SET(ring_entry, paddr_lo);
  182. HAL_WBM_PADDR_HI_SET(ring_entry, paddr_hi);
  183. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
  184. = buffer_vaddr_unaligned;
  185. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned[tx_buffer_count]
  186. = buffer_paddr_unaligned;
  187. }
  188. hal_srng_access_end(soc->hal_soc, wbm_srng);
  189. soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
  190. if (tx_buffer_count) {
  191. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  192. "%s: IPA WDI TX buffer: %d allocated\n",
  193. __func__, tx_buffer_count);
  194. } else {
  195. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  196. "%s: No IPA WDI TX buffer allocated\n",
  197. __func__);
  198. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  199. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
  200. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned);
  201. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned = NULL;
  202. retval = -ENOMEM;
  203. }
  204. return retval;
  205. }
  206. /**
  207. * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
  208. * @soc: data path instance
  209. * @pdev: core txrx pdev context
  210. *
  211. * This function will attach a DP RX instance into the main
  212. * device (SOC) context.
  213. *
  214. * Return: QDF_STATUS_SUCCESS: success
  215. * QDF_STATUS_E_RESOURCES: Error return
  216. */
  217. static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  218. {
  219. return QDF_STATUS_SUCCESS;
  220. }
  221. int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  222. {
  223. int error;
  224. /* TX resource attach */
  225. error = dp_tx_ipa_uc_attach(soc, pdev);
  226. if (error) {
  227. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  228. "%s: DP IPA UC TX attach fail code %d\n",
  229. __func__, error);
  230. return error;
  231. }
  232. /* RX resource attach */
  233. error = dp_rx_ipa_uc_attach(soc, pdev);
  234. if (error) {
  235. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  236. "%s: DP IPA UC RX attach fail code %d\n",
  237. __func__, error);
  238. dp_tx_ipa_uc_detach(soc, pdev);
  239. return error;
  240. }
  241. return QDF_STATUS_SUCCESS; /* success */
  242. }
  243. /*
  244. * dp_ipa_ring_resource_setup() - setup IPA ring resources
  245. * @soc: data path SoC handle
  246. *
  247. * Return: none
  248. */
  249. int dp_ipa_ring_resource_setup(struct dp_soc *soc,
  250. struct dp_pdev *pdev)
  251. {
  252. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  253. struct hal_srng *hal_srng;
  254. struct hal_srng_params srng_params;
  255. qdf_dma_addr_t hp_addr;
  256. unsigned long addr_offset, dev_base_paddr;
  257. /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
  258. hal_srng = soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
  259. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  260. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
  261. srng_params.ring_base_paddr;
  262. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
  263. srng_params.ring_base_vaddr;
  264. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
  265. (srng_params.num_entries * srng_params.entry_size) << 2;
  266. /*
  267. * For the register backed memory addresses, use the scn->mem_pa to
  268. * calculate the physical address of the shadow registers
  269. */
  270. dev_base_paddr =
  271. (unsigned long)
  272. ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
  273. addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
  274. (unsigned long)(hal_soc->dev_base_addr);
  275. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
  276. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  277. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  278. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_tcl_hp_paddr=%x",
  279. __func__, (unsigned int)addr_offset,
  280. (unsigned int)dev_base_paddr,
  281. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr));
  282. /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
  283. hal_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  284. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  285. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
  286. srng_params.ring_base_paddr;
  287. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
  288. srng_params.ring_base_vaddr;
  289. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
  290. (srng_params.num_entries * srng_params.entry_size) << 2;
  291. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  292. (unsigned long)(hal_soc->dev_base_addr);
  293. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
  294. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  295. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  296. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x",
  297. __func__, (unsigned int)addr_offset,
  298. (unsigned int)dev_base_paddr,
  299. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr));
  300. /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
  301. hal_srng = soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  302. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  303. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
  304. srng_params.ring_base_paddr;
  305. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
  306. srng_params.ring_base_vaddr;
  307. soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
  308. (srng_params.num_entries * srng_params.entry_size) << 2;
  309. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  310. (unsigned long)(hal_soc->dev_base_addr);
  311. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
  312. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  313. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  314. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_reo_tp_paddr=%x",
  315. __func__, (unsigned int)addr_offset,
  316. (unsigned int)dev_base_paddr,
  317. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr));
  318. hal_srng = pdev->rx_refill_buf_ring2.hal_srng;
  319. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  320. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
  321. srng_params.ring_base_paddr;
  322. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
  323. srng_params.ring_base_vaddr;
  324. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
  325. (srng_params.num_entries * srng_params.entry_size) << 2;
  326. hp_addr = hal_srng_get_hp_addr(hal_soc, (void *)hal_srng);
  327. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = hp_addr;
  328. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  329. "%s: ipa_rx_refill_buf_hp_paddr=%x", __func__,
  330. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr));
  331. return 0;
  332. }
  333. /**
  334. * dp_ipa_uc_get_resource() - Client request resource information
  335. * @ppdev - handle to the device instance
  336. *
  337. * IPA client will request IPA UC related resource information
  338. * Resource information will be distributed to IPA module
  339. * All of the required resources should be pre-allocated
  340. *
  341. * Return: QDF_STATUS
  342. */
  343. QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *ppdev)
  344. {
  345. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  346. struct dp_soc *soc = pdev->soc;
  347. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  348. ipa_res->tx_ring_base_paddr =
  349. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr;
  350. ipa_res->tx_ring_size =
  351. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size;
  352. ipa_res->tx_num_alloc_buffer =
  353. (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  354. ipa_res->tx_comp_ring_base_paddr =
  355. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr;
  356. ipa_res->tx_comp_ring_size =
  357. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size;
  358. ipa_res->rx_rdy_ring_base_paddr =
  359. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr;
  360. ipa_res->rx_rdy_ring_size =
  361. soc->ipa_uc_rx_rsc.ipa_reo_ring_size;
  362. ipa_res->rx_refill_ring_base_paddr =
  363. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr;
  364. ipa_res->rx_refill_ring_size =
  365. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size;
  366. if ((0 == ipa_res->tx_comp_ring_base_paddr) ||
  367. (0 == ipa_res->rx_rdy_ring_base_paddr))
  368. return QDF_STATUS_E_FAILURE;
  369. return QDF_STATUS_SUCCESS;
  370. }
  371. /**
  372. * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG
  373. * @ppdev - handle to the device instance
  374. *
  375. * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
  376. * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
  377. *
  378. * Return: none
  379. */
  380. QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *ppdev)
  381. {
  382. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  383. struct dp_soc *soc = pdev->soc;
  384. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  385. struct hal_srng *wbm_srng =
  386. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  387. struct hal_srng *reo_srng =
  388. soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  389. hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
  390. hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
  391. hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
  392. return QDF_STATUS_SUCCESS;
  393. }
  394. /**
  395. * dp_ipa_op_response() - Handle OP command response from firmware
  396. * @ppdev - handle to the device instance
  397. * @op_msg: op response message from firmware
  398. *
  399. * Return: none
  400. */
  401. QDF_STATUS dp_ipa_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
  402. {
  403. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  404. if (pdev->ipa_uc_op_cb) {
  405. pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
  406. } else {
  407. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  408. "%s: IPA callback function is not registered", __func__);
  409. qdf_mem_free(op_msg);
  410. return QDF_STATUS_E_FAILURE;
  411. }
  412. return QDF_STATUS_SUCCESS;
  413. }
  414. /**
  415. * dp_ipa_register_op_cb() - Register OP handler function
  416. * @ppdev - handle to the device instance
  417. * @op_cb: handler function pointer
  418. *
  419. * Return: none
  420. */
  421. QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *ppdev,
  422. ipa_uc_op_cb_type op_cb,
  423. void *usr_ctxt)
  424. {
  425. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  426. pdev->ipa_uc_op_cb = op_cb;
  427. pdev->usr_ctxt = usr_ctxt;
  428. return QDF_STATUS_SUCCESS;
  429. }
  430. /**
  431. * dp_ipa_get_stat() - Get firmware wdi status
  432. * @ppdev - handle to the device instance
  433. *
  434. * Return: none
  435. */
  436. QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *ppdev)
  437. {
  438. /* TBD */
  439. return QDF_STATUS_SUCCESS;
  440. }
  441. /**
  442. * dp_tx_send_ipa_data_frame() - send IPA data frame
  443. * @vdev: vdev
  444. * @skb: skb
  445. *
  446. * Return: skb/ NULL is for success
  447. */
  448. qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
  449. {
  450. qdf_nbuf_t ret;
  451. /* Terminate the (single-element) list of tx frames */
  452. qdf_nbuf_set_next(skb, NULL);
  453. ret = dp_tx_send((struct dp_vdev_t *)vdev, skb);
  454. if (ret) {
  455. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  456. "%s: Failed to tx", __func__);
  457. return ret;
  458. }
  459. return NULL;
  460. }
  461. /**
  462. * dp_ipa_enable_autonomy() – Enable autonomy RX path
  463. * @pdev - handle to the device instance
  464. *
  465. * Set all RX packet route to IPA REO ring
  466. * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
  467. * Return: none
  468. */
  469. QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *ppdev)
  470. {
  471. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  472. struct dp_soc *soc = pdev->soc;
  473. uint32_t remap_val;
  474. /* Call HAL API to remap REO rings to REO2IPA ring */
  475. remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
  476. HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW4) |
  477. HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW4) |
  478. HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW4) |
  479. HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW4) |
  480. HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
  481. HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
  482. HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
  483. hal_reo_remap_IX0(soc->hal_soc, remap_val);
  484. return QDF_STATUS_SUCCESS;
  485. }
  486. /**
  487. * dp_ipa_disable_autonomy() – Disable autonomy RX path
  488. * @ppdev - handle to the device instance
  489. *
  490. * Disable RX packet routing to IPA REO
  491. * Program Destination_Ring_Ctrl_IX_0 REO register to disable
  492. * Return: none
  493. */
  494. QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *ppdev)
  495. {
  496. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  497. struct dp_soc *soc = pdev->soc;
  498. uint32_t remap_val;
  499. /* Call HAL API to remap REO rings to REO2IPA ring */
  500. remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
  501. HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW1) |
  502. HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW2) |
  503. HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW3) |
  504. HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW2) |
  505. HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
  506. HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
  507. HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
  508. hal_reo_remap_IX0(soc->hal_soc, remap_val);
  509. return QDF_STATUS_SUCCESS;
  510. }
  511. /* This should be configurable per H/W configuration enable status */
  512. #define L3_HEADER_PADDING 2
  513. /**
  514. * dp_ipa_setup() - Setup and connect IPA pipes
  515. * @ppdev - handle to the device instance
  516. * @ipa_i2w_cb: IPA to WLAN callback
  517. * @ipa_w2i_cb: WLAN to IPA callback
  518. * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
  519. * @ipa_desc_size: IPA descriptor size
  520. * @ipa_priv: handle to the HTT instance
  521. * @is_rm_enabled: Is IPA RM enabled or not
  522. * @tx_pipe_handle: pointer to Tx pipe handle
  523. * @rx_pipe_handle: pointer to Rx pipe handle
  524. *
  525. * Return: QDF_STATUS
  526. */
  527. QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
  528. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  529. uint32_t ipa_desc_size, void *ipa_priv,
  530. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  531. uint32_t *rx_pipe_handle)
  532. {
  533. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  534. struct dp_soc *soc = pdev->soc;
  535. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  536. qdf_ipa_wdi3_setup_info_t *tx;
  537. qdf_ipa_wdi3_setup_info_t *rx;
  538. qdf_ipa_wdi3_conn_in_params_t pipe_in;
  539. qdf_ipa_wdi3_conn_out_params_t pipe_out;
  540. struct tcl_data_cmd *tcl_desc_ptr;
  541. uint8_t *desc_addr;
  542. uint32_t desc_size;
  543. int ret;
  544. qdf_mem_zero(&tx, sizeof(struct ipa_wdi3_setup_info));
  545. qdf_mem_zero(&rx, sizeof(struct ipa_wdi3_setup_info));
  546. qdf_mem_zero(&pipe_in, sizeof(pipe_in));
  547. qdf_mem_zero(&pipe_out, sizeof(pipe_out));
  548. /* TX PIPE */
  549. /**
  550. * Transfer Ring: WBM Ring
  551. * Transfer Ring Doorbell PA: WBM Tail Pointer Address
  552. * Event Ring: TCL ring
  553. * Event Ring Doorbell PA: TCL Head Pointer Address
  554. */
  555. tx = &QDF_IPA_WDI3_CONN_IN_PARAMS_TX(&pipe_in);
  556. QDF_IPA_WDI3_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
  557. QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  558. QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
  559. QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
  560. QDF_IPA_WDI3_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
  561. QDF_IPA_WDI3_SETUP_INFO_MODE(tx) = IPA_BASIC;
  562. QDF_IPA_WDI3_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
  563. QDF_IPA_WDI3_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
  564. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
  565. ipa_res->tx_comp_ring_base_paddr;
  566. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
  567. ipa_res->tx_comp_ring_size;
  568. /* WBM Tail Pointer Address */
  569. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
  570. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  571. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
  572. ipa_res->tx_ring_base_paddr;
  573. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
  574. /* TCL Head Pointer Address */
  575. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
  576. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  577. QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
  578. ipa_res->tx_num_alloc_buffer;
  579. QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(tx) = 0;
  580. /* Preprogram TCL descriptor */
  581. desc_addr =
  582. (uint8_t *)QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
  583. desc_size = sizeof(struct tcl_data_cmd);
  584. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  585. tcl_desc_ptr = (struct tcl_data_cmd *)
  586. (QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
  587. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  588. HAL_RX_BUF_RBM_SW2_BM;
  589. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  590. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  591. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  592. /* RX PIPE */
  593. /**
  594. * Transfer Ring: REO Ring
  595. * Transfer Ring Doorbell PA: REO Tail Pointer Address
  596. * Event Ring: FW ring
  597. * Event Ring Doorbell PA: FW Head Pointer Address
  598. */
  599. rx = &QDF_IPA_WDI3_CONN_IN_PARAMS_RX(&pipe_in);
  600. QDF_IPA_WDI3_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
  601. QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  602. QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
  603. QDF_IPA_WDI3_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
  604. QDF_IPA_WDI3_SETUP_INFO_MODE(rx) = IPA_BASIC;
  605. QDF_IPA_WDI3_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
  606. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = ipa_res->rx_rdy_ring_base_paddr;
  607. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(rx) = ipa_res->rx_rdy_ring_size;
  608. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = /* REO Tail Pointer Address */
  609. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  610. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(rx) = ipa_res->rx_refill_ring_base_paddr;
  611. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(rx) = ipa_res->rx_refill_ring_size;
  612. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = /* FW Head Pointer Address */
  613. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  614. QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
  615. QDF_IPA_WDI3_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
  616. QDF_IPA_WDI3_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
  617. /* Connect WDI IPA PIPE */
  618. ret = qdf_ipa_wdi3_conn_pipes(&pipe_in, &pipe_out);
  619. if (ret) {
  620. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  621. "%s: ipa_wdi3_conn_pipes: IPA pipe setup failed: ret=%d",
  622. __func__, ret);
  623. return QDF_STATUS_E_FAILURE;
  624. }
  625. /* IPA uC Doorbell registers */
  626. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  627. "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
  628. __func__,
  629. (unsigned int)QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
  630. (unsigned int)QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
  631. ipa_res->tx_comp_doorbell_paddr =
  632. QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
  633. ipa_res->tx_comp_doorbell_vaddr =
  634. QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
  635. ipa_res->rx_ready_doorbell_paddr =
  636. QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
  637. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  638. "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  639. __func__,
  640. "transfer_ring_base_pa",
  641. (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
  642. "transfer_ring_size",
  643. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(tx),
  644. "transfer_ring_doorbell_pa",
  645. (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
  646. "event_ring_base_pa",
  647. (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(tx),
  648. "event_ring_size",
  649. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(tx),
  650. "event_ring_doorbell_pa",
  651. (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
  652. "num_pkt_buffers",
  653. QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(tx),
  654. "tx_comp_doorbell_paddr",
  655. (void *)ipa_res->tx_comp_doorbell_paddr);
  656. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  657. "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  658. __func__,
  659. "transfer_ring_base_pa",
  660. (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
  661. "transfer_ring_size",
  662. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(rx),
  663. "transfer_ring_doorbell_pa",
  664. (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
  665. "event_ring_base_pa",
  666. (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(rx),
  667. "event_ring_size",
  668. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(rx),
  669. "event_ring_doorbell_pa",
  670. (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
  671. "num_pkt_buffers",
  672. QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(rx),
  673. "tx_comp_doorbell_paddr",
  674. (void *)ipa_res->rx_ready_doorbell_paddr);
  675. return QDF_STATUS_SUCCESS;
  676. }
  677. /**
  678. * dp_ipa_cleanup() - Disconnect IPA pipes
  679. * @tx_pipe_handle: Tx pipe handle
  680. * @rx_pipe_handle: Rx pipe handle
  681. *
  682. * Return: QDF_STATUS
  683. */
  684. QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
  685. {
  686. int ret;
  687. ret = qdf_ipa_wdi3_disconn_pipes();
  688. if (ret) {
  689. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  690. "%s: ipa_wdi3_disconn_pipes: IPA pipe cleanup failed: ret=%d",
  691. __func__, ret);
  692. return QDF_STATUS_E_FAILURE;
  693. }
  694. return QDF_STATUS_SUCCESS;
  695. }
  696. /**
  697. * dp_ipa_setup_iface() - Setup IPA header and register interface
  698. * @ifname: Interface name
  699. * @mac_addr: Interface MAC address
  700. * @prod_client: IPA prod client type
  701. * @cons_client: IPA cons client type
  702. * @session_id: Session ID
  703. * @is_ipv6_enabled: Is IPV6 enabled or not
  704. *
  705. * Return: QDF_STATUS
  706. */
  707. QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
  708. qdf_ipa_client_type_t prod_client,
  709. qdf_ipa_client_type_t cons_client,
  710. uint8_t session_id, bool is_ipv6_enabled)
  711. {
  712. qdf_ipa_wdi3_reg_intf_in_params_t in;
  713. qdf_ipa_wdi3_hdr_info_t hdr_info;
  714. struct dp_ipa_uc_tx_hdr uc_tx_hdr;
  715. int ret = -EINVAL;
  716. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  717. "%s: Add Partial hdr: %s, %pM",
  718. __func__, ifname, mac_addr);
  719. qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi3_hdr_info_t));
  720. qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
  721. /* IPV4 header */
  722. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
  723. QDF_IPA_WDI3_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
  724. QDF_IPA_WDI3_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  725. QDF_IPA_WDI3_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
  726. QDF_IPA_WDI3_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
  727. DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
  728. QDF_IPA_WDI3_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
  729. memcpy(&(QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(&in)[0]), &hdr_info,
  730. sizeof(qdf_ipa_wdi3_hdr_info_t));
  731. QDF_IPA_WDI3_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
  732. QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA(&in) =
  733. htonl(session_id << 16);
  734. QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
  735. /* IPV6 header */
  736. if (is_ipv6_enabled) {
  737. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IPV6);
  738. memcpy(&(QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(&in)[1]),
  739. &hdr_info, sizeof(qdf_ipa_wdi3_hdr_info_t));
  740. }
  741. ret = qdf_ipa_wdi3_reg_intf(&in);
  742. if (ret) {
  743. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  744. "%s: ipa_wdi3_reg_intf: register IPA interface falied: ret=%d",
  745. __func__, ret);
  746. return QDF_STATUS_E_FAILURE;
  747. }
  748. return QDF_STATUS_SUCCESS;
  749. }
  750. /**
  751. * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
  752. * @ifname: Interface name
  753. * @is_ipv6_enabled: Is IPV6 enabled or not
  754. *
  755. * Return: QDF_STATUS
  756. */
  757. QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
  758. {
  759. int ret;
  760. ret = qdf_ipa_wdi3_dereg_intf(ifname);
  761. if (ret) {
  762. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  763. "%s: ipa_wdi3_dereg_intf: IPA pipe deregistration failed: ret=%d",
  764. __func__, ret);
  765. return QDF_STATUS_E_FAILURE;
  766. }
  767. return QDF_STATUS_SUCCESS;
  768. }
  769. /**
  770. * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
  771. * @ppdev - handle to the device instance
  772. *
  773. * Return: QDF_STATUS
  774. */
  775. QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *ppdev)
  776. {
  777. QDF_STATUS result;
  778. result = qdf_ipa_wdi3_enable_pipes();
  779. if (result) {
  780. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  781. "%s: Enable WDI PIPE fail, code %d",
  782. __func__, result);
  783. return QDF_STATUS_E_FAILURE;
  784. }
  785. return QDF_STATUS_SUCCESS;
  786. }
  787. /**
  788. * dp_ipa_uc_disable_pipes() – Suspend traffic and disable Tx/Rx pipes
  789. * @ppdev - handle to the device instance
  790. *
  791. * Return: QDF_STATUS
  792. */
  793. QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *ppdev)
  794. {
  795. QDF_STATUS result;
  796. result = qdf_ipa_wdi3_disable_pipes();
  797. if (result) {
  798. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  799. "%s: Disable WDI PIPE fail, code %d",
  800. __func__, result);
  801. return QDF_STATUS_E_FAILURE;
  802. }
  803. return QDF_STATUS_SUCCESS;
  804. }
  805. /**
  806. * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
  807. * @client: Client type
  808. * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
  809. *
  810. * Return: QDF_STATUS
  811. */
  812. QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
  813. {
  814. qdf_ipa_wdi3_perf_profile_t profile;
  815. QDF_STATUS result;
  816. profile.client = client;
  817. profile.max_supported_bw_mbps = max_supported_bw_mbps;
  818. result = qdf_ipa_wdi3_set_perf_profile(&profile);
  819. if (result) {
  820. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  821. "%s: ipa_wdi3_set_perf_profile fail, code %d",
  822. __func__, result);
  823. return QDF_STATUS_E_FAILURE;
  824. }
  825. return QDF_STATUS_SUCCESS;
  826. }
  827. #endif