dp_ipa.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #ifdef IPA_OFFLOAD
  17. #include <linux/ipa_wdi3.h>
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_api.h>
  21. #include <hif.h>
  22. #include <htt.h>
  23. #include <wdi_event.h>
  24. #include <queue.h>
  25. #include "dp_types.h"
  26. #include "dp_htt.h"
  27. #include "dp_tx.h"
  28. #include "dp_ipa.h"
  29. /**
  30. * dp_tx_ipa_uc_detach - Free autonomy TX resources
  31. * @soc: data path instance
  32. * @pdev: core txrx pdev context
  33. *
  34. * Free allocated TX buffers with WBM SRNG
  35. *
  36. * Return: none
  37. */
  38. static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  39. {
  40. int idx;
  41. for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
  42. if (soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr[idx]) {
  43. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr[idx]);
  44. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr[idx] = NULL;
  45. }
  46. }
  47. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr);
  48. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr = NULL;
  49. }
  50. /**
  51. * dp_rx_ipa_uc_detach - free autonomy RX resources
  52. * @soc: data path instance
  53. * @pdev: core txrx pdev context
  54. *
  55. * This function will detach DP RX into main device context
  56. * will free DP Rx resources.
  57. *
  58. * Return: none
  59. */
  60. static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  61. {
  62. }
  63. int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  64. {
  65. /* TX resource detach */
  66. dp_tx_ipa_uc_detach(soc, pdev);
  67. /* RX resource detach */
  68. dp_rx_ipa_uc_detach(soc, pdev);
  69. return QDF_STATUS_SUCCESS; /* success */
  70. }
  71. /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
  72. #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048)
  73. /**
  74. * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
  75. * @soc: data path instance
  76. * @pdev: Physical device handle
  77. *
  78. * Allocate TX buffer from non-cacheable memory
  79. * Attache allocated TX buffers with WBM SRNG
  80. *
  81. * Return: int
  82. */
  83. static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  84. {
  85. uint32_t tx_buffer_count;
  86. uint32_t ring_base_align = 8;
  87. void *buffer_vaddr_unaligned;
  88. void *buffer_vaddr;
  89. qdf_dma_addr_t buffer_paddr_unaligned;
  90. qdf_dma_addr_t buffer_paddr;
  91. struct hal_srng *wbm_srng =
  92. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  93. struct hal_srng_params srng_params;
  94. uint32_t paddr_lo;
  95. uint32_t paddr_hi;
  96. void *ring_entry;
  97. int num_entries;
  98. int retval = QDF_STATUS_SUCCESS;
  99. /*
  100. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  101. * unsigned int uc_tx_buf_sz =
  102. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  103. */
  104. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  105. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  106. hal_get_srng_params(soc->hal_soc, (void *)wbm_srng, &srng_params);
  107. num_entries = srng_params.num_entries;
  108. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  109. "requested %d buffers to be posted to wbm ring",
  110. num_entries);
  111. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr = qdf_mem_malloc(num_entries *
  112. sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr));
  113. if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr) {
  114. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  115. "%s: IPA WBM Ring mem_info alloc fail", __func__);
  116. return -ENOMEM;
  117. }
  118. hal_srng_access_start(soc->hal_soc, (void *)wbm_srng);
  119. /*
  120. * Allocate Tx buffers as many as possible
  121. * Populate Tx buffers into WBM2IPA ring
  122. * This initial buffer population will simulate H/W as source ring,
  123. * and update HP
  124. */
  125. for (tx_buffer_count = 0;
  126. tx_buffer_count < num_entries - 1; tx_buffer_count++) {
  127. buffer_vaddr_unaligned = qdf_mem_alloc_consistent(soc->osdev,
  128. soc->osdev->dev, alloc_size, &buffer_paddr_unaligned);
  129. if (!buffer_vaddr_unaligned)
  130. break;
  131. ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
  132. (void *)wbm_srng);
  133. if (!ring_entry) {
  134. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  135. "Failed to get WBM ring entry\n");
  136. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  137. alloc_size, buffer_vaddr_unaligned,
  138. buffer_paddr_unaligned, 0);
  139. goto fail;
  140. }
  141. buffer_vaddr = (void *)qdf_align((unsigned long)
  142. buffer_vaddr_unaligned, ring_base_align);
  143. buffer_paddr = buffer_paddr_unaligned +
  144. ((unsigned long)(buffer_vaddr) -
  145. (unsigned long)buffer_vaddr_unaligned);
  146. paddr_lo = ((u64)buffer_paddr & 0x00000000ffffffff);
  147. paddr_hi = ((u64)buffer_paddr & 0x0000001f00000000) >> 32;
  148. HAL_WBM_PADDR_LO_SET(ring_entry, paddr_lo);
  149. HAL_WBM_PADDR_HI_SET(ring_entry, paddr_hi);
  150. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr[tx_buffer_count] =
  151. buffer_vaddr;
  152. }
  153. hal_srng_access_end(soc->hal_soc, wbm_srng);
  154. soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
  155. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  156. "IPA WDI TX buffer: %d allocated\n",
  157. tx_buffer_count);
  158. return retval;
  159. fail:
  160. hal_srng_access_end(soc->hal_soc, wbm_srng);
  161. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr);
  162. return retval;
  163. }
  164. /**
  165. * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
  166. * @soc: data path instance
  167. * @pdev: core txrx pdev context
  168. *
  169. * This function will attach a DP RX instance into the main
  170. * device (SOC) context.
  171. *
  172. * Return: QDF_STATUS_SUCCESS: success
  173. * QDF_STATUS_E_RESOURCES: Error return
  174. */
  175. static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  176. {
  177. return QDF_STATUS_SUCCESS;
  178. }
  179. int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  180. {
  181. int error;
  182. /* TX resource attach */
  183. error = dp_tx_ipa_uc_attach(soc, pdev);
  184. if (error) {
  185. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  186. "DP IPA UC TX attach fail code %d\n", error);
  187. return error;
  188. }
  189. /* RX resource attach */
  190. error = dp_rx_ipa_uc_attach(soc, pdev);
  191. if (error) {
  192. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  193. "DP IPA UC RX attach fail code %d\n", error);
  194. dp_tx_ipa_uc_detach(soc, pdev);
  195. return error;
  196. }
  197. return QDF_STATUS_SUCCESS; /* success */
  198. }
  199. /*
  200. * dp_ipa_ring_resource_setup() - setup IPA ring resources
  201. * @soc: data path SoC handle
  202. *
  203. * Return: none
  204. */
  205. int dp_ipa_ring_resource_setup(struct dp_soc *soc,
  206. struct dp_pdev *pdev)
  207. {
  208. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  209. struct hal_srng *hal_srng;
  210. struct hal_srng_params srng_params;
  211. qdf_dma_addr_t hp_addr;
  212. unsigned long addr_offset, dev_base_paddr;
  213. /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
  214. hal_srng = soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
  215. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  216. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
  217. srng_params.ring_base_paddr;
  218. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
  219. srng_params.ring_base_vaddr;
  220. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
  221. (srng_params.num_entries * srng_params.entry_size) << 2;
  222. /*
  223. * For the register backed memory addresses, use the scn->mem_pa to
  224. * calculate the physical address of the shadow registers
  225. */
  226. dev_base_paddr =
  227. (unsigned long)
  228. ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
  229. addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
  230. (unsigned long)(hal_soc->dev_base_addr);
  231. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
  232. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  233. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  234. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_tcl_hp_paddr=%x",
  235. __func__, (unsigned int)addr_offset,
  236. (unsigned int)dev_base_paddr,
  237. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr));
  238. /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
  239. hal_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  240. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  241. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
  242. srng_params.ring_base_paddr;
  243. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
  244. srng_params.ring_base_vaddr;
  245. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
  246. (srng_params.num_entries * srng_params.entry_size) << 2;
  247. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  248. (unsigned long)(hal_soc->dev_base_addr);
  249. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
  250. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  251. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  252. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x",
  253. __func__, (unsigned int)addr_offset,
  254. (unsigned int)dev_base_paddr,
  255. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr));
  256. /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
  257. hal_srng = soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  258. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  259. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
  260. srng_params.ring_base_paddr;
  261. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
  262. srng_params.ring_base_vaddr;
  263. soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
  264. (srng_params.num_entries * srng_params.entry_size) << 2;
  265. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  266. (unsigned long)(hal_soc->dev_base_addr);
  267. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
  268. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  269. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  270. "%s: addr_offset=%x, dev_base_paddr=%x, ipa_reo_tp_paddr=%x",
  271. __func__, (unsigned int)addr_offset,
  272. (unsigned int)dev_base_paddr,
  273. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr));
  274. hal_srng = pdev->rx_refill_buf_ring2.hal_srng;
  275. hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
  276. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
  277. srng_params.ring_base_paddr;
  278. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
  279. srng_params.ring_base_vaddr;
  280. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
  281. (srng_params.num_entries * srng_params.entry_size) << 2;
  282. hp_addr = hal_srng_get_hp_addr(hal_soc, (void *)hal_srng);
  283. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = hp_addr;
  284. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  285. "%s: ipa_rx_refill_buf_hp_paddr=%x", __func__,
  286. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr));
  287. return 0;
  288. }
  289. /**
  290. * dp_ipa_uc_get_resource() - Client request resource information
  291. * @ppdev - handle to the device instance
  292. *
  293. * IPA client will request IPA UC related resource information
  294. * Resource information will be distributed to IPA module
  295. * All of the required resources should be pre-allocated
  296. *
  297. * Return: QDF_STATUS
  298. */
  299. QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *ppdev)
  300. {
  301. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  302. struct dp_soc *soc = pdev->soc;
  303. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  304. ipa_res->tx_ring_base_paddr =
  305. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr;
  306. ipa_res->tx_ring_size =
  307. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size;
  308. ipa_res->tx_num_alloc_buffer =
  309. (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  310. ipa_res->tx_comp_ring_base_paddr =
  311. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr;
  312. ipa_res->tx_comp_ring_size =
  313. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size;
  314. ipa_res->rx_rdy_ring_base_paddr =
  315. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr;
  316. ipa_res->rx_rdy_ring_size =
  317. soc->ipa_uc_rx_rsc.ipa_reo_ring_size;
  318. ipa_res->rx_refill_ring_base_paddr =
  319. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr;
  320. ipa_res->rx_refill_ring_size =
  321. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size;
  322. if ((0 == ipa_res->tx_comp_ring_base_paddr) ||
  323. (0 == ipa_res->rx_rdy_ring_base_paddr))
  324. return QDF_STATUS_E_FAILURE;
  325. return QDF_STATUS_SUCCESS;
  326. }
  327. /**
  328. * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG
  329. * @ppdev - handle to the device instance
  330. *
  331. * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
  332. * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
  333. *
  334. * Return: none
  335. */
  336. QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *ppdev)
  337. {
  338. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  339. struct dp_soc *soc = pdev->soc;
  340. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  341. struct hal_srng *wbm_srng =
  342. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  343. struct hal_srng *reo_srng =
  344. soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  345. hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
  346. hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
  347. hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
  348. return QDF_STATUS_SUCCESS;
  349. }
  350. /**
  351. * dp_ipa_op_response() - Handle OP command response from firmware
  352. * @ppdev - handle to the device instance
  353. * @op_msg: op response message from firmware
  354. *
  355. * Return: none
  356. */
  357. QDF_STATUS dp_ipa_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
  358. {
  359. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  360. if (pdev->ipa_uc_op_cb) {
  361. pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
  362. } else {
  363. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  364. "%s: IPA callback function is not registered", __func__);
  365. qdf_mem_free(op_msg);
  366. return QDF_STATUS_E_FAILURE;
  367. }
  368. return QDF_STATUS_SUCCESS;
  369. }
  370. /**
  371. * dp_ipa_register_op_cb() - Register OP handler function
  372. * @ppdev - handle to the device instance
  373. * @op_cb: handler function pointer
  374. *
  375. * Return: none
  376. */
  377. QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *ppdev,
  378. ipa_uc_op_cb_type op_cb,
  379. void *usr_ctxt)
  380. {
  381. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  382. pdev->ipa_uc_op_cb = op_cb;
  383. pdev->usr_ctxt = usr_ctxt;
  384. return QDF_STATUS_SUCCESS;
  385. }
  386. /**
  387. * dp_ipa_get_stat() - Get firmware wdi status
  388. * @ppdev - handle to the device instance
  389. *
  390. * Return: none
  391. */
  392. QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *ppdev)
  393. {
  394. /* TBD */
  395. return QDF_STATUS_SUCCESS;
  396. }
  397. /**
  398. * dp_tx_send_ipa_data_frame() - send IPA data frame
  399. * @vdev: vdev
  400. * @skb: skb
  401. *
  402. * Return: skb/ NULL is for success
  403. */
  404. qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
  405. {
  406. qdf_nbuf_t ret;
  407. /* Terminate the (single-element) list of tx frames */
  408. qdf_nbuf_set_next(skb, NULL);
  409. ret = dp_tx_send((struct dp_vdev_t *)vdev, skb);
  410. if (ret) {
  411. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  412. "Failed to tx");
  413. return ret;
  414. }
  415. return NULL;
  416. }
  417. /**
  418. * dp_ipa_enable_autonomy() – Enable autonomy RX path
  419. * @pdev - handle to the device instance
  420. *
  421. * Set all RX packet route to IPA REO ring
  422. * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
  423. * Return: none
  424. */
  425. QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *ppdev)
  426. {
  427. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  428. struct dp_soc *soc = pdev->soc;
  429. uint32_t remap_val;
  430. /* Call HAL API to remap REO rings to REO2IPA ring */
  431. remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
  432. HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW4) |
  433. HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW4) |
  434. HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW4) |
  435. HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW4) |
  436. HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
  437. HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
  438. HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
  439. hal_reo_remap_IX0(soc->hal_soc, remap_val);
  440. return QDF_STATUS_SUCCESS;
  441. }
  442. /**
  443. * dp_ipa_disable_autonomy() – Disable autonomy RX path
  444. * @ppdev - handle to the device instance
  445. *
  446. * Disable RX packet routing to IPA REO
  447. * Program Destination_Ring_Ctrl_IX_0 REO register to disable
  448. * Return: none
  449. */
  450. QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *ppdev)
  451. {
  452. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  453. struct dp_soc *soc = pdev->soc;
  454. uint32_t remap_val;
  455. /* Call HAL API to remap REO rings to REO2IPA ring */
  456. remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
  457. HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW1) |
  458. HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW2) |
  459. HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW3) |
  460. HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW2) |
  461. HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
  462. HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
  463. HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
  464. hal_reo_remap_IX0(soc->hal_soc, remap_val);
  465. return QDF_STATUS_SUCCESS;
  466. }
  467. /* This should be configurable per H/W configuration enable status */
  468. #define L3_HEADER_PADDING 2
  469. /**
  470. * dp_ipa_setup() - Setup and connect IPA pipes
  471. * @ppdev - handle to the device instance
  472. * @ipa_i2w_cb: IPA to WLAN callback
  473. * @ipa_w2i_cb: WLAN to IPA callback
  474. * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
  475. * @ipa_desc_size: IPA descriptor size
  476. * @ipa_priv: handle to the HTT instance
  477. * @is_rm_enabled: Is IPA RM enabled or not
  478. * @tx_pipe_handle: pointer to Tx pipe handle
  479. * @rx_pipe_handle: pointer to Rx pipe handle
  480. *
  481. * Return: QDF_STATUS
  482. */
  483. QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
  484. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  485. uint32_t ipa_desc_size, void *ipa_priv,
  486. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  487. uint32_t *rx_pipe_handle)
  488. {
  489. struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
  490. struct dp_soc *soc = pdev->soc;
  491. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  492. struct ipa_wdi3_setup_info tx;
  493. struct ipa_wdi3_setup_info rx;
  494. struct ipa_wdi3_conn_in_params pipe_in;
  495. struct ipa_wdi3_conn_out_params pipe_out;
  496. struct tcl_data_cmd *tcl_desc_ptr;
  497. uint8_t *desc_addr;
  498. uint32_t desc_size;
  499. int ret;
  500. qdf_mem_zero(&tx, sizeof(struct ipa_wdi3_setup_info));
  501. qdf_mem_zero(&rx, sizeof(struct ipa_wdi3_setup_info));
  502. qdf_mem_zero(&pipe_in, sizeof(struct ipa_wdi3_conn_in_params));
  503. qdf_mem_zero(&pipe_out, sizeof(struct ipa_wdi3_conn_out_params));
  504. /* TX PIPE */
  505. /**
  506. * Transfer Ring: WBM Ring
  507. * Transfer Ring Doorbell PA: WBM Tail Pointer Address
  508. * Event Ring: TCL ring
  509. * Event Ring Doorbell PA: TCL Head Pointer Address
  510. */
  511. tx.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
  512. tx.ipa_ep_cfg.hdr.hdr_len = DP_IPA_UC_WLAN_TX_HDR_LEN;
  513. tx.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
  514. tx.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
  515. tx.ipa_ep_cfg.hdr.hdr_additional_const_len = 0;
  516. tx.ipa_ep_cfg.mode.mode = IPA_BASIC;
  517. tx.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
  518. tx.client = IPA_CLIENT_WLAN1_CONS;
  519. tx.transfer_ring_base_pa = ipa_res->tx_comp_ring_base_paddr;
  520. tx.transfer_ring_size = ipa_res->tx_comp_ring_size;
  521. tx.transfer_ring_doorbell_pa = /* WBM Tail Pointer Address */
  522. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  523. tx.event_ring_base_pa = ipa_res->tx_ring_base_paddr;
  524. tx.event_ring_size = ipa_res->tx_ring_size;
  525. tx.event_ring_doorbell_pa = /* TCL Head Pointer Address */
  526. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  527. tx.num_pkt_buffers = ipa_res->tx_num_alloc_buffer;
  528. tx.pkt_offset = 0;
  529. /* Preprogram TCL descriptor */
  530. desc_addr = (uint8_t *)(tx.desc_format_template);
  531. desc_size = sizeof(struct tcl_data_cmd);
  532. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  533. tcl_desc_ptr = (struct tcl_data_cmd *)(tx.desc_format_template+1);
  534. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  535. HAL_RX_BUF_RBM_SW2_BM;
  536. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  537. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  538. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  539. /* RX PIPE */
  540. /**
  541. * Transfer Ring: REO Ring
  542. * Transfer Ring Doorbell PA: REO Tail Pointer Address
  543. * Event Ring: FW ring
  544. * Event Ring Doorbell PA: FW Head Pointer Address
  545. */
  546. rx.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
  547. rx.ipa_ep_cfg.hdr.hdr_len = DP_IPA_UC_WLAN_RX_HDR_LEN;
  548. rx.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
  549. rx.ipa_ep_cfg.hdr.hdr_metadata_reg_valid = 1;
  550. rx.ipa_ep_cfg.mode.mode = IPA_BASIC;
  551. rx.client = IPA_CLIENT_WLAN1_PROD;
  552. rx.transfer_ring_base_pa = ipa_res->rx_rdy_ring_base_paddr;
  553. rx.transfer_ring_size = ipa_res->rx_rdy_ring_size;
  554. rx.transfer_ring_doorbell_pa = /* REO Tail Pointer Address */
  555. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  556. rx.event_ring_base_pa = ipa_res->rx_refill_ring_base_paddr;
  557. rx.event_ring_size = ipa_res->rx_refill_ring_size;
  558. rx.event_ring_doorbell_pa = /* FW Head Pointer Address */
  559. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  560. rx.pkt_offset = RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
  561. pipe_in.notify = ipa_w2i_cb;
  562. pipe_in.priv = ipa_priv;
  563. memcpy(&pipe_in.tx, &tx, sizeof(struct ipa_wdi3_setup_info));
  564. memcpy(&pipe_in.rx, &rx, sizeof(struct ipa_wdi3_setup_info));
  565. /* Connect WDI IPA PIPE */
  566. ret = ipa_wdi3_conn_pipes(&pipe_in, &pipe_out);
  567. if (ret) {
  568. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  569. "ipa_wdi3_conn_pipes: IPA pipe setup failed: ret=%d",
  570. ret);
  571. return QDF_STATUS_E_FAILURE;
  572. }
  573. /* IPA uC Doorbell registers */
  574. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  575. "Tx DB PA=0x%x, Rx DB PA=0x%x",
  576. (unsigned int)pipe_out.tx_uc_db_pa,
  577. (unsigned int)pipe_out.rx_uc_db_pa);
  578. ipa_res->tx_comp_doorbell_paddr = pipe_out.tx_uc_db_pa;
  579. ipa_res->tx_comp_doorbell_vaddr = pipe_out.tx_uc_db_va;
  580. ipa_res->rx_ready_doorbell_paddr = pipe_out.rx_uc_db_pa;
  581. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  582. "Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  583. "transfer_ring_base_pa",
  584. (void *)pipe_in.tx.transfer_ring_base_pa,
  585. "transfer_ring_size",
  586. pipe_in.tx.transfer_ring_size,
  587. "transfer_ring_doorbell_pa",
  588. (void *)pipe_in.tx.transfer_ring_doorbell_pa,
  589. "event_ring_base_pa",
  590. (void *)pipe_in.tx.event_ring_base_pa,
  591. "event_ring_size",
  592. pipe_in.tx.event_ring_size,
  593. "event_ring_doorbell_pa",
  594. (void *)pipe_in.tx.event_ring_doorbell_pa,
  595. "num_pkt_buffers",
  596. pipe_in.tx.num_pkt_buffers,
  597. "tx_comp_doorbell_paddr",
  598. (void *)ipa_res->tx_comp_doorbell_paddr);
  599. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  600. "Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  601. "transfer_ring_base_pa",
  602. (void *)pipe_in.rx.transfer_ring_base_pa,
  603. "transfer_ring_size",
  604. pipe_in.rx.transfer_ring_size,
  605. "transfer_ring_doorbell_pa",
  606. (void *)pipe_in.rx.transfer_ring_doorbell_pa,
  607. "event_ring_base_pa",
  608. (void *)pipe_in.rx.event_ring_base_pa,
  609. "event_ring_size",
  610. pipe_in.rx.event_ring_size,
  611. "event_ring_doorbell_pa",
  612. (void *)pipe_in.rx.event_ring_doorbell_pa,
  613. "num_pkt_buffers",
  614. pipe_in.rx.num_pkt_buffers,
  615. "tx_comp_doorbell_paddr",
  616. (void *)ipa_res->rx_ready_doorbell_paddr);
  617. return QDF_STATUS_SUCCESS;
  618. }
  619. /**
  620. * dp_ipa_cleanup() - Disconnect IPA pipes
  621. * @tx_pipe_handle: Tx pipe handle
  622. * @rx_pipe_handle: Rx pipe handle
  623. *
  624. * Return: QDF_STATUS
  625. */
  626. QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
  627. {
  628. int ret;
  629. ret = ipa_wdi3_disconn_pipes();
  630. if (ret) {
  631. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  632. "ipa_wdi3_disconn_pipes: IPA pipe cleanup failed: ret=%d",
  633. ret);
  634. return QDF_STATUS_E_FAILURE;
  635. }
  636. return QDF_STATUS_SUCCESS;
  637. }
  638. /**
  639. * dp_ipa_setup_iface() - Setup IPA header and register interface
  640. * @ifname: Interface name
  641. * @mac_addr: Interface MAC address
  642. * @prod_client: IPA prod client type
  643. * @cons_client: IPA cons client type
  644. * @session_id: Session ID
  645. * @is_ipv6_enabled: Is IPV6 enabled or not
  646. *
  647. * Return: QDF_STATUS
  648. */
  649. QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
  650. enum ipa_client_type prod_client,
  651. enum ipa_client_type cons_client,
  652. uint8_t session_id, bool is_ipv6_enabled)
  653. {
  654. struct ipa_wdi3_reg_intf_in_params in;
  655. struct ipa_wdi3_hdr_info hdr_info;
  656. struct dp_ipa_uc_tx_hdr uc_tx_hdr;
  657. int ret = -EINVAL;
  658. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  659. "Add Partial hdr: %s, %pM",
  660. ifname, mac_addr);
  661. qdf_mem_zero(&hdr_info, sizeof(struct ipa_wdi3_hdr_info));
  662. qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
  663. /* IPV4 header */
  664. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
  665. hdr_info.hdr = (uint8_t *)&uc_tx_hdr;
  666. hdr_info.hdr_len = DP_IPA_UC_WLAN_TX_HDR_LEN;
  667. hdr_info.hdr_type = IPA_HDR_L2_ETHERNET_II;
  668. hdr_info.dst_mac_addr_offset = DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
  669. in.netdev_name = ifname;
  670. memcpy(&(in.hdr_info[0]), &hdr_info, sizeof(struct ipa_wdi3_hdr_info));
  671. in.is_meta_data_valid = 1;
  672. in.meta_data = htonl(session_id << 16);
  673. in.meta_data_mask = htonl(0x00FF0000);
  674. /* IPV6 header */
  675. if (is_ipv6_enabled) {
  676. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IPV6);
  677. memcpy(&(in.hdr_info[1]), &hdr_info,
  678. sizeof(struct ipa_wdi3_hdr_info));
  679. }
  680. ret = ipa_wdi3_reg_intf(&in);
  681. if (ret) {
  682. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  683. "ipa_wdi3_reg_intf: register IPA interface falied: ret=%d",
  684. ret);
  685. return QDF_STATUS_E_FAILURE;
  686. }
  687. return QDF_STATUS_SUCCESS;
  688. }
  689. /**
  690. * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
  691. * @ifname: Interface name
  692. * @is_ipv6_enabled: Is IPV6 enabled or not
  693. *
  694. * Return: QDF_STATUS
  695. */
  696. QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
  697. {
  698. int ret;
  699. ret = ipa_wdi3_dereg_intf(ifname);
  700. if (ret) {
  701. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  702. "ipa_wdi3_dereg_intf: IPA pipe deregistration failed: ret=%d",
  703. ret);
  704. return QDF_STATUS_E_FAILURE;
  705. }
  706. return QDF_STATUS_SUCCESS;
  707. }
  708. /**
  709. * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
  710. * @ppdev - handle to the device instance
  711. *
  712. * Return: QDF_STATUS
  713. */
  714. QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *ppdev)
  715. {
  716. QDF_STATUS result;
  717. result = ipa_wdi3_enable_pipes();
  718. if (result) {
  719. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  720. "%s: Enable WDI PIPE fail, code %d",
  721. __func__, result);
  722. return QDF_STATUS_E_FAILURE;
  723. }
  724. return QDF_STATUS_SUCCESS;
  725. }
  726. /**
  727. * dp_ipa_uc_disable_pipes() – Suspend traffic and disable Tx/Rx pipes
  728. * @ppdev - handle to the device instance
  729. *
  730. * Return: QDF_STATUS
  731. */
  732. QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *ppdev)
  733. {
  734. QDF_STATUS result;
  735. result = ipa_wdi3_disable_pipes();
  736. if (result) {
  737. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  738. "%s: Disable WDI PIPE fail, code %d",
  739. __func__, result);
  740. return QDF_STATUS_E_FAILURE;
  741. }
  742. return QDF_STATUS_SUCCESS;
  743. }
  744. /**
  745. * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
  746. * @client: Client type
  747. * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
  748. *
  749. * Return: QDF_STATUS
  750. */
  751. QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
  752. {
  753. struct ipa_wdi3_perf_profile profile;
  754. QDF_STATUS result;
  755. profile.client = client;
  756. profile.max_supported_bw_mbps = max_supported_bw_mbps;
  757. result = ipa_wdi3_set_perf_profile(&profile);
  758. if (result) {
  759. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  760. "%s: ipa_wdi3_set_perf_profile fail, code %d",
  761. __func__, result);
  762. return QDF_STATUS_E_FAILURE;
  763. }
  764. return QDF_STATUS_SUCCESS;
  765. }
  766. #endif