dp_ipa.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. /*
  2. * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #ifdef IPA_OFFLOAD
  17. #include <qdf_ipa_wdi3.h>
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_hw_headers.h>
  21. #include <hal_api.h>
  22. #include <hif.h>
  23. #include <htt.h>
  24. #include <wdi_event.h>
  25. #include <queue.h>
  26. #include "dp_types.h"
  27. #include "dp_htt.h"
  28. #include "dp_tx.h"
  29. #include "dp_ipa.h"
  30. /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
  31. #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048)
  32. /**
  33. * dp_tx_ipa_uc_detach - Free autonomy TX resources
  34. * @soc: data path instance
  35. * @pdev: core txrx pdev context
  36. *
  37. * Free allocated TX buffers with WBM SRNG
  38. *
  39. * Return: none
  40. */
  41. static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  42. {
  43. int idx;
  44. uint32_t ring_base_align = 8;
  45. /*
  46. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  47. * unsigned int uc_tx_buf_sz =
  48. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  49. */
  50. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  51. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  52. for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
  53. if (soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]) {
  54. qdf_mem_free_consistent(
  55. soc->osdev, soc->osdev->dev,
  56. alloc_size,
  57. soc->ipa_uc_tx_rsc.
  58. tx_buf_pool_vaddr_unaligned[idx],
  59. soc->ipa_uc_tx_rsc.
  60. tx_buf_pool_paddr_unaligned[idx],
  61. 0);
  62. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
  63. (void *)NULL;
  64. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned[idx] =
  65. (qdf_dma_addr_t)NULL;
  66. }
  67. }
  68. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  69. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
  70. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned);
  71. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned = NULL;
  72. }
  73. /**
  74. * dp_rx_ipa_uc_detach - free autonomy RX resources
  75. * @soc: data path instance
  76. * @pdev: core txrx pdev context
  77. *
  78. * This function will detach DP RX into main device context
  79. * will free DP Rx resources.
  80. *
  81. * Return: none
  82. */
  83. static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  84. {
  85. }
  86. int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  87. {
  88. /* TX resource detach */
  89. dp_tx_ipa_uc_detach(soc, pdev);
  90. /* RX resource detach */
  91. dp_rx_ipa_uc_detach(soc, pdev);
  92. return QDF_STATUS_SUCCESS; /* success */
  93. }
  94. /**
  95. * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
  96. * @soc: data path instance
  97. * @pdev: Physical device handle
  98. *
  99. * Allocate TX buffer from non-cacheable memory
  100. * Attache allocated TX buffers with WBM SRNG
  101. *
  102. * Return: int
  103. */
  104. static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  105. {
  106. uint32_t tx_buffer_count;
  107. uint32_t ring_base_align = 8;
  108. void *buffer_vaddr_unaligned;
  109. void *buffer_vaddr;
  110. qdf_dma_addr_t buffer_paddr_unaligned;
  111. qdf_dma_addr_t buffer_paddr;
  112. struct hal_srng *wbm_srng =
  113. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  114. struct hal_srng_params srng_params;
  115. uint32_t paddr_lo;
  116. uint32_t paddr_hi;
  117. void *ring_entry;
  118. int num_entries;
  119. int retval = QDF_STATUS_SUCCESS;
  120. /*
  121. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  122. * unsigned int uc_tx_buf_sz =
  123. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  124. */
  125. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  126. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  127. hal_get_srng_params(soc->hal_soc, (void *)wbm_srng, &srng_params);
  128. num_entries = srng_params.num_entries;
  129. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  130. "%s: requested %d buffers to be posted to wbm ring",
  131. __func__, num_entries);
  132. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
  133. qdf_mem_malloc(num_entries *
  134. sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
  135. if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
  136. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  137. "%s: IPA WBM Ring Tx buf pool vaddr alloc fail",
  138. __func__);
  139. return -ENOMEM;
  140. }
  141. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned =
  142. qdf_mem_malloc(num_entries *
  143. sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned));
  144. if (!soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned) {
  145. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  146. "%s: IPA WBM Ring Tx buf pool paddr alloc fail",
  147. __func__);
  148. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  149. return -ENOMEM;
  150. }
  151. hal_srng_access_start(soc->hal_soc, (void *)wbm_srng);
  152. /*
  153. * Allocate Tx buffers as many as possible
  154. * Populate Tx buffers into WBM2IPA ring
  155. * This initial buffer population will simulate H/W as source ring,
  156. * and update HP
  157. */
  158. for (tx_buffer_count = 0;
  159. tx_buffer_count < num_entries - 1; tx_buffer_count++) {
  160. buffer_vaddr_unaligned = qdf_mem_alloc_consistent(soc->osdev,
  161. soc->osdev->dev, alloc_size, &buffer_paddr_unaligned);
  162. if (!buffer_vaddr_unaligned)
  163. break;
  164. ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
  165. (void *)wbm_srng);
  166. if (!ring_entry) {
  167. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  168. "%s: Failed to get WBM ring entry",
  169. __func__);
  170. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  171. alloc_size, buffer_vaddr_unaligned,
  172. buffer_paddr_unaligned, 0);
  173. break;
  174. }
  175. buffer_vaddr = (void *)qdf_align((unsigned long)
  176. buffer_vaddr_unaligned, ring_base_align);
  177. buffer_paddr = buffer_paddr_unaligned +
  178. ((unsigned long)(buffer_vaddr) -
  179. (unsigned long)buffer_vaddr_unaligned);
  180. paddr_lo = ((u64)buffer_paddr & 0x00000000ffffffff);
  181. paddr_hi = ((u64)buffer_paddr & 0x0000001f00000000) >> 32;
  182. HAL_WBM_PADDR_LO_SET(ring_entry, paddr_lo);
  183. HAL_WBM_PADDR_HI_SET(ring_entry, paddr_hi);
  184. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
  185. = buffer_vaddr_unaligned;
  186. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned[tx_buffer_count]
  187. = buffer_paddr_unaligned;
  188. }
  189. hal_srng_access_end(soc->hal_soc, wbm_srng);
  190. soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
  191. if (tx_buffer_count) {
  192. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  193. "%s: IPA WDI TX buffer: %d allocated",
  194. __func__, tx_buffer_count);
  195. } else {
  196. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  197. "%s: No IPA WDI TX buffer allocated",
  198. __func__);
  199. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  200. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
  201. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned);
  202. soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned = NULL;
  203. retval = -ENOMEM;
  204. }
  205. return retval;
  206. }
  207. /**
  208. * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
  209. * @soc: data path instance
  210. * @pdev: core txrx pdev context
  211. *
  212. * This function will attach a DP RX instance into the main
  213. * device (SOC) context.
  214. *
  215. * Return: QDF_STATUS_SUCCESS: success
  216. * QDF_STATUS_E_RESOURCES: Error return
  217. */
  218. static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  219. {
  220. return QDF_STATUS_SUCCESS;
  221. }
  222. int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  223. {
  224. int error;
  225. /* TX resource attach */
  226. error = dp_tx_ipa_uc_attach(soc, pdev);
  227. if (error) {
  228. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  229. "%s: DP IPA UC TX attach fail code %d",
  230. __func__, error);
  231. return error;
  232. }
  233. /* RX resource attach */
  234. error = dp_rx_ipa_uc_attach(soc, pdev);
  235. if (error) {
  236. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  237. "%s: DP IPA UC RX attach fail code %d",
  238. __func__, error);
  239. dp_tx_ipa_uc_detach(soc, pdev);
  240. return error;
  241. }
  242. return QDF_STATUS_SUCCESS; /* success */
  243. }
  244. /*
  245. * dp_ipa_ring_resource_setup() - setup IPA ring resources
  246. * @soc: data path SoC handle
  247. *
  248. * Return: none
  249. */
  250. int dp_ipa_ring_resource_setup(struct dp_soc *soc,
  251. struct dp_pdev *pdev)
  252. {
  253. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  254. struct hal_srng *hal_srng;
  255. struct hal_srng_params srng_params;
  256. qdf_dma_addr_t hp_addr;
  257. unsigned long addr_offset, dev_base_paddr;
  258. /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
  259. hal_srng = soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
  260. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  261. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
  262. srng_params.ring_base_paddr;
  263. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
  264. srng_params.ring_base_vaddr;
  265. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
  266. (srng_params.num_entries * srng_params.entry_size) << 2;
  267. /*
  268. * For the register backed memory addresses, use the scn->mem_pa to
  269. * calculate the physical address of the shadow registers
  270. */
  271. dev_base_paddr =
  272. (unsigned long)
  273. ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
  274. addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
  275. (unsigned long)(hal_soc->dev_base_addr);
  276. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
  277. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  278. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  279. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_tcl_hp_paddr=%x",
  280. __func__, (unsigned int)addr_offset,
  281. (unsigned int)dev_base_paddr,
  282. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr));
  283. /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
  284. hal_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  285. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  286. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
  287. srng_params.ring_base_paddr;
  288. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
  289. srng_params.ring_base_vaddr;
  290. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
  291. (srng_params.num_entries * srng_params.entry_size) << 2;
  292. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  293. (unsigned long)(hal_soc->dev_base_addr);
  294. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
  295. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  296. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  297. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x",
  298. __func__, (unsigned int)addr_offset,
  299. (unsigned int)dev_base_paddr,
  300. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr));
  301. /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
  302. hal_srng = soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  303. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  304. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
  305. srng_params.ring_base_paddr;
  306. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
  307. srng_params.ring_base_vaddr;
  308. soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
  309. (srng_params.num_entries * srng_params.entry_size) << 2;
  310. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  311. (unsigned long)(hal_soc->dev_base_addr);
  312. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
  313. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  314. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  315. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_reo_tp_paddr=%x",
  316. __func__, (unsigned int)addr_offset,
  317. (unsigned int)dev_base_paddr,
  318. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr));
  319. hal_srng = pdev->rx_refill_buf_ring2.hal_srng;
  320. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  321. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
  322. srng_params.ring_base_paddr;
  323. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
  324. srng_params.ring_base_vaddr;
  325. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
  326. (srng_params.num_entries * srng_params.entry_size) << 2;
  327. hp_addr = hal_srng_get_hp_addr(hal_soc, (void *)hal_srng);
  328. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = hp_addr;
  329. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  330. "%s: ipa_rx_refill_buf_hp_paddr=%x", __func__,
  331. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr));
  332. return 0;
  333. }
  334. /**
  335. * dp_ipa_uc_get_resource() - Client request resource information
  336. * @ppdev - handle to the device instance
  337. *
  338. * IPA client will request IPA UC related resource information
  339. * Resource information will be distributed to IPA module
  340. * All of the required resources should be pre-allocated
  341. *
  342. * Return: QDF_STATUS
  343. */
  344. QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *ppdev)
  345. {
  346. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  347. struct dp_soc *soc = pdev->soc;
  348. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  349. ipa_res->tx_ring_base_paddr =
  350. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr;
  351. ipa_res->tx_ring_size =
  352. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size;
  353. ipa_res->tx_num_alloc_buffer =
  354. (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  355. ipa_res->tx_comp_ring_base_paddr =
  356. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr;
  357. ipa_res->tx_comp_ring_size =
  358. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size;
  359. ipa_res->rx_rdy_ring_base_paddr =
  360. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr;
  361. ipa_res->rx_rdy_ring_size =
  362. soc->ipa_uc_rx_rsc.ipa_reo_ring_size;
  363. ipa_res->rx_refill_ring_base_paddr =
  364. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr;
  365. ipa_res->rx_refill_ring_size =
  366. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size;
  367. if ((0 == ipa_res->tx_comp_ring_base_paddr) ||
  368. (0 == ipa_res->rx_rdy_ring_base_paddr))
  369. return QDF_STATUS_E_FAILURE;
  370. return QDF_STATUS_SUCCESS;
  371. }
  372. /**
  373. * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG
  374. * @ppdev - handle to the device instance
  375. *
  376. * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
  377. * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
  378. *
  379. * Return: none
  380. */
  381. QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *ppdev)
  382. {
  383. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  384. struct dp_soc *soc = pdev->soc;
  385. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  386. struct hal_srng *wbm_srng =
  387. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  388. struct hal_srng *reo_srng =
  389. soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  390. hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
  391. hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
  392. hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
  393. return QDF_STATUS_SUCCESS;
  394. }
  395. /**
  396. * dp_ipa_op_response() - Handle OP command response from firmware
  397. * @ppdev - handle to the device instance
  398. * @op_msg: op response message from firmware
  399. *
  400. * Return: none
  401. */
  402. QDF_STATUS dp_ipa_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
  403. {
  404. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  405. if (pdev->ipa_uc_op_cb) {
  406. pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
  407. } else {
  408. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  409. "%s: IPA callback function is not registered", __func__);
  410. qdf_mem_free(op_msg);
  411. return QDF_STATUS_E_FAILURE;
  412. }
  413. return QDF_STATUS_SUCCESS;
  414. }
  415. /**
  416. * dp_ipa_register_op_cb() - Register OP handler function
  417. * @ppdev - handle to the device instance
  418. * @op_cb: handler function pointer
  419. *
  420. * Return: none
  421. */
  422. QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *ppdev,
  423. ipa_uc_op_cb_type op_cb,
  424. void *usr_ctxt)
  425. {
  426. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  427. pdev->ipa_uc_op_cb = op_cb;
  428. pdev->usr_ctxt = usr_ctxt;
  429. return QDF_STATUS_SUCCESS;
  430. }
  431. /**
  432. * dp_ipa_get_stat() - Get firmware wdi status
  433. * @ppdev - handle to the device instance
  434. *
  435. * Return: none
  436. */
  437. QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *ppdev)
  438. {
  439. /* TBD */
  440. return QDF_STATUS_SUCCESS;
  441. }
  442. /**
  443. * dp_tx_send_ipa_data_frame() - send IPA data frame
  444. * @vdev: vdev
  445. * @skb: skb
  446. *
  447. * Return: skb/ NULL is for success
  448. */
  449. qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
  450. {
  451. qdf_nbuf_t ret;
  452. /* Terminate the (single-element) list of tx frames */
  453. qdf_nbuf_set_next(skb, NULL);
  454. ret = dp_tx_send((struct dp_vdev_t *)vdev, skb);
  455. if (ret) {
  456. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  457. "%s: Failed to tx", __func__);
  458. return ret;
  459. }
  460. return NULL;
  461. }
  462. /**
  463. * dp_ipa_enable_autonomy() – Enable autonomy RX path
  464. * @pdev - handle to the device instance
  465. *
  466. * Set all RX packet route to IPA REO ring
  467. * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
  468. * Return: none
  469. */
  470. QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *ppdev)
  471. {
  472. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  473. struct dp_soc *soc = pdev->soc;
  474. uint32_t remap_val;
  475. /* Call HAL API to remap REO rings to REO2IPA ring */
  476. remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
  477. HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW4) |
  478. HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW4) |
  479. HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW4) |
  480. HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW4) |
  481. HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
  482. HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
  483. HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
  484. hal_reo_remap_IX0(soc->hal_soc, remap_val);
  485. return QDF_STATUS_SUCCESS;
  486. }
  487. /**
  488. * dp_ipa_disable_autonomy() – Disable autonomy RX path
  489. * @ppdev - handle to the device instance
  490. *
  491. * Disable RX packet routing to IPA REO
  492. * Program Destination_Ring_Ctrl_IX_0 REO register to disable
  493. * Return: none
  494. */
  495. QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *ppdev)
  496. {
  497. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  498. struct dp_soc *soc = pdev->soc;
  499. uint32_t remap_val;
  500. /* Call HAL API to remap REO rings to REO2IPA ring */
  501. remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
  502. HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW1) |
  503. HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW2) |
  504. HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW3) |
  505. HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW2) |
  506. HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
  507. HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
  508. HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
  509. hal_reo_remap_IX0(soc->hal_soc, remap_val);
  510. return QDF_STATUS_SUCCESS;
  511. }
  512. /* This should be configurable per H/W configuration enable status */
  513. #define L3_HEADER_PADDING 2
  514. /**
  515. * dp_ipa_setup() - Setup and connect IPA pipes
  516. * @ppdev - handle to the device instance
  517. * @ipa_i2w_cb: IPA to WLAN callback
  518. * @ipa_w2i_cb: WLAN to IPA callback
  519. * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
  520. * @ipa_desc_size: IPA descriptor size
  521. * @ipa_priv: handle to the HTT instance
  522. * @is_rm_enabled: Is IPA RM enabled or not
  523. * @tx_pipe_handle: pointer to Tx pipe handle
  524. * @rx_pipe_handle: pointer to Rx pipe handle
  525. *
  526. * Return: QDF_STATUS
  527. */
  528. QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
  529. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  530. uint32_t ipa_desc_size, void *ipa_priv,
  531. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  532. uint32_t *rx_pipe_handle)
  533. {
  534. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  535. struct dp_soc *soc = pdev->soc;
  536. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  537. qdf_ipa_wdi3_setup_info_t *tx;
  538. qdf_ipa_wdi3_setup_info_t *rx;
  539. qdf_ipa_wdi3_conn_in_params_t pipe_in;
  540. qdf_ipa_wdi3_conn_out_params_t pipe_out;
  541. struct tcl_data_cmd *tcl_desc_ptr;
  542. uint8_t *desc_addr;
  543. uint32_t desc_size;
  544. int ret;
  545. qdf_mem_zero(&tx, sizeof(struct ipa_wdi3_setup_info));
  546. qdf_mem_zero(&rx, sizeof(struct ipa_wdi3_setup_info));
  547. qdf_mem_zero(&pipe_in, sizeof(pipe_in));
  548. qdf_mem_zero(&pipe_out, sizeof(pipe_out));
  549. /* TX PIPE */
  550. /**
  551. * Transfer Ring: WBM Ring
  552. * Transfer Ring Doorbell PA: WBM Tail Pointer Address
  553. * Event Ring: TCL ring
  554. * Event Ring Doorbell PA: TCL Head Pointer Address
  555. */
  556. tx = &QDF_IPA_WDI3_CONN_IN_PARAMS_TX(&pipe_in);
  557. QDF_IPA_WDI3_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
  558. QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  559. QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
  560. QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
  561. QDF_IPA_WDI3_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
  562. QDF_IPA_WDI3_SETUP_INFO_MODE(tx) = IPA_BASIC;
  563. QDF_IPA_WDI3_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
  564. QDF_IPA_WDI3_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
  565. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
  566. ipa_res->tx_comp_ring_base_paddr;
  567. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
  568. ipa_res->tx_comp_ring_size;
  569. /* WBM Tail Pointer Address */
  570. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
  571. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  572. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
  573. ipa_res->tx_ring_base_paddr;
  574. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
  575. /* TCL Head Pointer Address */
  576. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
  577. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  578. QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
  579. ipa_res->tx_num_alloc_buffer;
  580. QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(tx) = 0;
  581. /* Preprogram TCL descriptor */
  582. desc_addr =
  583. (uint8_t *)QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
  584. desc_size = sizeof(struct tcl_data_cmd);
  585. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  586. tcl_desc_ptr = (struct tcl_data_cmd *)
  587. (QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
  588. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  589. HAL_RX_BUF_RBM_SW2_BM;
  590. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  591. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  592. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  593. /* RX PIPE */
  594. /**
  595. * Transfer Ring: REO Ring
  596. * Transfer Ring Doorbell PA: REO Tail Pointer Address
  597. * Event Ring: FW ring
  598. * Event Ring Doorbell PA: FW Head Pointer Address
  599. */
  600. rx = &QDF_IPA_WDI3_CONN_IN_PARAMS_RX(&pipe_in);
  601. QDF_IPA_WDI3_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
  602. QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  603. QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
  604. QDF_IPA_WDI3_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
  605. QDF_IPA_WDI3_SETUP_INFO_MODE(rx) = IPA_BASIC;
  606. QDF_IPA_WDI3_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
  607. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = ipa_res->rx_rdy_ring_base_paddr;
  608. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(rx) = ipa_res->rx_rdy_ring_size;
  609. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = /* REO Tail Pointer Address */
  610. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  611. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(rx) = ipa_res->rx_refill_ring_base_paddr;
  612. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(rx) = ipa_res->rx_refill_ring_size;
  613. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = /* FW Head Pointer Address */
  614. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  615. QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
  616. QDF_IPA_WDI3_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
  617. QDF_IPA_WDI3_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
  618. /* Connect WDI IPA PIPE */
  619. ret = qdf_ipa_wdi3_conn_pipes(&pipe_in, &pipe_out);
  620. if (ret) {
  621. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  622. "%s: ipa_wdi3_conn_pipes: IPA pipe setup failed: ret=%d",
  623. __func__, ret);
  624. return QDF_STATUS_E_FAILURE;
  625. }
  626. /* IPA uC Doorbell registers */
  627. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  628. "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
  629. __func__,
  630. (unsigned int)QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
  631. (unsigned int)QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
  632. ipa_res->tx_comp_doorbell_paddr =
  633. QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
  634. ipa_res->tx_comp_doorbell_vaddr =
  635. QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
  636. ipa_res->rx_ready_doorbell_paddr =
  637. QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
  638. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  639. "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  640. __func__,
  641. "transfer_ring_base_pa",
  642. (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
  643. "transfer_ring_size",
  644. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(tx),
  645. "transfer_ring_doorbell_pa",
  646. (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
  647. "event_ring_base_pa",
  648. (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(tx),
  649. "event_ring_size",
  650. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(tx),
  651. "event_ring_doorbell_pa",
  652. (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
  653. "num_pkt_buffers",
  654. QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(tx),
  655. "tx_comp_doorbell_paddr",
  656. (void *)ipa_res->tx_comp_doorbell_paddr);
  657. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  658. "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  659. __func__,
  660. "transfer_ring_base_pa",
  661. (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
  662. "transfer_ring_size",
  663. QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(rx),
  664. "transfer_ring_doorbell_pa",
  665. (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
  666. "event_ring_base_pa",
  667. (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(rx),
  668. "event_ring_size",
  669. QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(rx),
  670. "event_ring_doorbell_pa",
  671. (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
  672. "num_pkt_buffers",
  673. QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(rx),
  674. "tx_comp_doorbell_paddr",
  675. (void *)ipa_res->rx_ready_doorbell_paddr);
  676. return QDF_STATUS_SUCCESS;
  677. }
  678. /**
  679. * dp_ipa_cleanup() - Disconnect IPA pipes
  680. * @tx_pipe_handle: Tx pipe handle
  681. * @rx_pipe_handle: Rx pipe handle
  682. *
  683. * Return: QDF_STATUS
  684. */
  685. QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
  686. {
  687. int ret;
  688. ret = qdf_ipa_wdi3_disconn_pipes();
  689. if (ret) {
  690. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  691. "%s: ipa_wdi3_disconn_pipes: IPA pipe cleanup failed: ret=%d",
  692. __func__, ret);
  693. return QDF_STATUS_E_FAILURE;
  694. }
  695. return QDF_STATUS_SUCCESS;
  696. }
  697. /**
  698. * dp_ipa_setup_iface() - Setup IPA header and register interface
  699. * @ifname: Interface name
  700. * @mac_addr: Interface MAC address
  701. * @prod_client: IPA prod client type
  702. * @cons_client: IPA cons client type
  703. * @session_id: Session ID
  704. * @is_ipv6_enabled: Is IPV6 enabled or not
  705. *
  706. * Return: QDF_STATUS
  707. */
  708. QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
  709. qdf_ipa_client_type_t prod_client,
  710. qdf_ipa_client_type_t cons_client,
  711. uint8_t session_id, bool is_ipv6_enabled)
  712. {
  713. qdf_ipa_wdi3_reg_intf_in_params_t in;
  714. qdf_ipa_wdi3_hdr_info_t hdr_info;
  715. struct dp_ipa_uc_tx_hdr uc_tx_hdr;
  716. int ret = -EINVAL;
  717. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  718. "%s: Add Partial hdr: %s, %pM",
  719. __func__, ifname, mac_addr);
  720. qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi3_hdr_info_t));
  721. qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
  722. /* IPV4 header */
  723. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
  724. QDF_IPA_WDI3_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
  725. QDF_IPA_WDI3_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  726. QDF_IPA_WDI3_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
  727. QDF_IPA_WDI3_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
  728. DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
  729. QDF_IPA_WDI3_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
  730. memcpy(&(QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(&in)[0]), &hdr_info,
  731. sizeof(qdf_ipa_wdi3_hdr_info_t));
  732. QDF_IPA_WDI3_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
  733. QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA(&in) =
  734. htonl(session_id << 16);
  735. QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
  736. /* IPV6 header */
  737. if (is_ipv6_enabled) {
  738. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IPV6);
  739. memcpy(&(QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(&in)[1]),
  740. &hdr_info, sizeof(qdf_ipa_wdi3_hdr_info_t));
  741. }
  742. ret = qdf_ipa_wdi3_reg_intf(&in);
  743. if (ret) {
  744. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  745. "%s: ipa_wdi3_reg_intf: register IPA interface falied: ret=%d",
  746. __func__, ret);
  747. return QDF_STATUS_E_FAILURE;
  748. }
  749. return QDF_STATUS_SUCCESS;
  750. }
  751. /**
  752. * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
  753. * @ifname: Interface name
  754. * @is_ipv6_enabled: Is IPV6 enabled or not
  755. *
  756. * Return: QDF_STATUS
  757. */
  758. QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
  759. {
  760. int ret;
  761. ret = qdf_ipa_wdi3_dereg_intf(ifname);
  762. if (ret) {
  763. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  764. "%s: ipa_wdi3_dereg_intf: IPA pipe deregistration failed: ret=%d",
  765. __func__, ret);
  766. return QDF_STATUS_E_FAILURE;
  767. }
  768. return QDF_STATUS_SUCCESS;
  769. }
  770. /**
  771. * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
  772. * @ppdev - handle to the device instance
  773. *
  774. * Return: QDF_STATUS
  775. */
  776. QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *ppdev)
  777. {
  778. QDF_STATUS result;
  779. result = qdf_ipa_wdi3_enable_pipes();
  780. if (result) {
  781. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  782. "%s: Enable WDI PIPE fail, code %d",
  783. __func__, result);
  784. return QDF_STATUS_E_FAILURE;
  785. }
  786. return QDF_STATUS_SUCCESS;
  787. }
  788. /**
  789. * dp_ipa_uc_disable_pipes() – Suspend traffic and disable Tx/Rx pipes
  790. * @ppdev - handle to the device instance
  791. *
  792. * Return: QDF_STATUS
  793. */
  794. QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *ppdev)
  795. {
  796. QDF_STATUS result;
  797. result = qdf_ipa_wdi3_disable_pipes();
  798. if (result) {
  799. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  800. "%s: Disable WDI PIPE fail, code %d",
  801. __func__, result);
  802. return QDF_STATUS_E_FAILURE;
  803. }
  804. return QDF_STATUS_SUCCESS;
  805. }
  806. /**
  807. * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
  808. * @client: Client type
  809. * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
  810. *
  811. * Return: QDF_STATUS
  812. */
  813. QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
  814. {
  815. qdf_ipa_wdi3_perf_profile_t profile;
  816. QDF_STATUS result;
  817. profile.client = client;
  818. profile.max_supported_bw_mbps = max_supported_bw_mbps;
  819. result = qdf_ipa_wdi3_set_perf_profile(&profile);
  820. if (result) {
  821. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  822. "%s: ipa_wdi3_set_perf_profile fail, code %d",
  823. __func__, result);
  824. return QDF_STATUS_E_FAILURE;
  825. }
  826. return QDF_STATUS_SUCCESS;
  827. }
  828. #endif