dp_txrx_me.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "qdf_nbuf.h"
  21. #include "qdf_atomic.h"
  22. #include "qdf_types.h"
  23. #include "dp_tx.h"
  24. #include "dp_tx_desc.h"
  25. #include "dp_internal.h"
  26. #include "dp_txrx_me.h"
  27. #define MAX_ME_BUF_CHUNK 1424
  28. #define ME_US_TO_SEC(_x) ((_x) / (1000 * 1000))
  29. #define ME_CLEAN_WAIT_TIMEOUT (200000) /*200ms*/
  30. #define ME_CLEAN_WAIT_COUNT 400
  31. /**
  32. * dp_tx_me_init():Initialize ME buffer ppol
  33. * @pdev: DP PDEV handle
  34. *
  35. * Return:0 on Succes 1 on failure
  36. */
  37. static inline uint16_t
  38. dp_tx_me_init(struct dp_pdev *pdev)
  39. {
  40. uint16_t i, mc_uc_buf_len, num_pool_elems;
  41. uint32_t pool_size;
  42. struct dp_tx_me_buf_t *p;
  43. mc_uc_buf_len = sizeof(struct dp_tx_me_buf_t);
  44. num_pool_elems = MAX_ME_BUF_CHUNK;
  45. /* Add flow control buffer count */
  46. pool_size = (mc_uc_buf_len) * num_pool_elems;
  47. pdev->me_buf.size = mc_uc_buf_len;
  48. if (!(pdev->me_buf.vaddr)) {
  49. qdf_spin_lock_bh(&pdev->tx_mutex);
  50. pdev->me_buf.vaddr = qdf_mem_malloc(pool_size);
  51. if (!(pdev->me_buf.vaddr)) {
  52. qdf_spin_unlock_bh(&pdev->tx_mutex);
  53. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  54. "Error allocating memory pool");
  55. return 1;
  56. }
  57. pdev->me_buf.buf_in_use = 0;
  58. pdev->me_buf.freelist =
  59. (struct dp_tx_me_buf_t *)pdev->me_buf.vaddr;
  60. /*
  61. * me_buf looks like this
  62. * |=======+==========================|
  63. * | ptr | Dst MAC |
  64. * |=======+==========================|
  65. */
  66. p = pdev->me_buf.freelist;
  67. for (i = 0; i < num_pool_elems - 1; i++) {
  68. p->next = (struct dp_tx_me_buf_t *)
  69. ((char *)p + pdev->me_buf.size);
  70. p = p->next;
  71. }
  72. p->next = NULL;
  73. qdf_spin_unlock_bh(&pdev->tx_mutex);
  74. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  75. "ME Pool successfully initialized vaddr - %x",
  76. pdev->me_buf.vaddr);
  77. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  78. "paddr - %x\n", (unsigned int)pdev->me_buf.paddr);
  79. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  80. "num_elems = %d", (unsigned int)num_pool_elems);
  81. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  82. "buf_size - %d", (unsigned int)pdev->me_buf.size);
  83. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  84. "pool_size = %d", (unsigned int)pool_size);
  85. } else {
  86. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  87. "ME Already Enabled!!");
  88. }
  89. return 0;
  90. }
  91. /**
  92. * dp_tx_me_alloc_descriptor():Allocate ME descriptor
  93. * @pdev_handle: DP PDEV handle
  94. *
  95. * Return:void
  96. */
  97. void
  98. dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev_handle)
  99. {
  100. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  101. if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) {
  102. dp_tx_me_init(pdev);
  103. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  104. FL("Enable MCAST_TO_UCAST "));
  105. }
  106. qdf_atomic_inc(&pdev->mc_num_vap_attached);
  107. }
  108. /**
  109. * dp_tx_me_exit():Free memory and other cleanup required for
  110. * multicast unicast conversion
  111. * @pdev - DP_PDEV handle
  112. *
  113. * Return:void
  114. */
  115. void
  116. dp_tx_me_exit(struct dp_pdev *pdev)
  117. {
  118. /* Add flow control buffer count */
  119. uint32_t wait_time = ME_US_TO_SEC(ME_CLEAN_WAIT_TIMEOUT *
  120. ME_CLEAN_WAIT_COUNT);
  121. if (pdev->me_buf.vaddr) {
  122. uint16_t wait_cnt = 0;
  123. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  124. "Disabling Mcastenhance,This may take some time");
  125. qdf_spin_lock_bh(&pdev->tx_mutex);
  126. while ((pdev->me_buf.buf_in_use > 0) &&
  127. (wait_cnt < ME_CLEAN_WAIT_COUNT)) {
  128. qdf_spin_unlock_bh(&pdev->tx_mutex);
  129. OS_SLEEP(ME_CLEAN_WAIT_TIMEOUT);
  130. wait_cnt++;
  131. qdf_spin_lock_bh(&pdev->tx_mutex);
  132. }
  133. if (pdev->me_buf.buf_in_use > 0) {
  134. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  135. "Tx-comp pending for %d",
  136. pdev->me_buf.buf_in_use);
  137. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  138. "ME frames after waiting %ds!!",
  139. wait_time);
  140. qdf_assert_always(0);
  141. }
  142. qdf_mem_free(pdev->me_buf.vaddr);
  143. pdev->me_buf.vaddr = NULL;
  144. pdev->me_buf.freelist = NULL;
  145. qdf_spin_unlock_bh(&pdev->tx_mutex);
  146. } else {
  147. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  148. "ME Already Disabled !!!");
  149. }
  150. }
  151. /* dp_tx_me_desc_flush() - release me resources associated to tx_desc
  152. * @pdev: DP_PDEV handle
  153. *
  154. * This function will free all outstanding ME buffer
  155. * for which either free during
  156. * completion didn't happened or completion is not
  157. * received.
  158. */
  159. void dp_tx_me_desc_flush(struct dp_pdev *pdev)
  160. {
  161. uint8_t i, num_pool;
  162. uint32_t j;
  163. uint32_t num_desc, page_id, offset;
  164. uint16_t num_desc_per_page;
  165. struct dp_soc *soc = pdev->soc;
  166. struct dp_tx_desc_s *tx_desc = NULL;
  167. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  168. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  169. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  170. for (i = 0; i < num_pool; i++) {
  171. tx_desc_pool = &soc->tx_desc[i];
  172. if (!tx_desc_pool || !tx_desc_pool->desc_pages.cacheable_pages)
  173. continue;
  174. num_desc_per_page =
  175. tx_desc_pool->desc_pages.num_element_per_page;
  176. for (j = 0; j < num_desc; j++) {
  177. page_id = j / num_desc_per_page;
  178. offset = j % num_desc_per_page;
  179. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  180. if (tx_desc && (tx_desc->pdev == pdev) &&
  181. (tx_desc->flags & DP_TX_DESC_FLAG_ME) &&
  182. (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
  183. dp_tx_comp_free_buf(soc, tx_desc);
  184. dp_tx_desc_release(tx_desc, i);
  185. }
  186. }
  187. }
  188. }
  189. /**
  190. * dp_tx_me_free_descriptor():free ME descriptor
  191. * @pdev_handle:DP_PDEV handle
  192. *
  193. * Return:void
  194. */
  195. void
  196. dp_tx_me_free_descriptor(struct cdp_pdev *pdev_handle)
  197. {
  198. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  199. qdf_atomic_dec(&pdev->mc_num_vap_attached);
  200. if (atomic_read(&pdev->mc_num_vap_attached) == 0) {
  201. dp_tx_me_desc_flush(pdev);
  202. dp_tx_me_exit(pdev);
  203. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  204. "Disable MCAST_TO_UCAST");
  205. }
  206. }
  207. /**
  208. * dp_tx_prepare_send_me(): Call to the umac to get the list of clients
  209. * @vdev: DP VDEV handle
  210. * @nbuf: Multicast buffer
  211. *
  212. * Return: no of packets transmitted
  213. */
  214. QDF_STATUS
  215. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  216. {
  217. if (vdev->me_convert) {
  218. if (vdev->me_convert(vdev->osif_vdev, nbuf) > 0)
  219. return QDF_STATUS_SUCCESS;
  220. }
  221. return QDF_STATUS_E_FAILURE;
  222. }
  223. /*
  224. * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
  225. * pdev: pointer to DP PDEV structure
  226. * seg_info_head: Pointer to the head of list
  227. *
  228. * return: void
  229. */
  230. static void dp_tx_me_mem_free(struct dp_pdev *pdev,
  231. struct dp_tx_seg_info_s *seg_info_head)
  232. {
  233. struct dp_tx_me_buf_t *mc_uc_buf;
  234. struct dp_tx_seg_info_s *seg_info_new = NULL;
  235. qdf_nbuf_t nbuf = NULL;
  236. uint64_t phy_addr;
  237. while (seg_info_head) {
  238. nbuf = seg_info_head->nbuf;
  239. mc_uc_buf = (struct dp_tx_me_buf_t *)
  240. seg_info_head->frags[0].vaddr;
  241. phy_addr = seg_info_head->frags[0].paddr_hi;
  242. phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
  243. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  244. phy_addr,
  245. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE);
  246. dp_tx_me_free_buf(pdev, mc_uc_buf);
  247. qdf_nbuf_free(nbuf);
  248. seg_info_new = seg_info_head;
  249. seg_info_head = seg_info_head->next;
  250. qdf_mem_free(seg_info_new);
  251. }
  252. }
  253. /**
  254. * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
  255. * @vdev: DP VDEV handle
  256. * @nbuf: Multicast nbuf
  257. * @newmac: Table of the clients to which packets have to be sent
  258. * @new_mac_cnt: No of clients
  259. *
  260. * return: no of converted packets
  261. */
  262. uint16_t
  263. dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
  264. uint8_t newmac[][QDF_MAC_ADDR_SIZE], uint8_t new_mac_cnt)
  265. {
  266. struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
  267. struct dp_pdev *pdev = vdev->pdev;
  268. qdf_ether_header_t *eh;
  269. uint8_t *data;
  270. uint16_t len;
  271. /* reference to frame dst addr */
  272. uint8_t *dstmac;
  273. /* copy of original frame src addr */
  274. uint8_t srcmac[QDF_MAC_ADDR_SIZE];
  275. /* local index into newmac */
  276. uint8_t new_mac_idx = 0;
  277. struct dp_tx_me_buf_t *mc_uc_buf;
  278. qdf_nbuf_t nbuf_clone;
  279. struct dp_tx_msdu_info_s msdu_info;
  280. struct dp_tx_seg_info_s *seg_info_head = NULL;
  281. struct dp_tx_seg_info_s *seg_info_tail = NULL;
  282. struct dp_tx_seg_info_s *seg_info_new;
  283. qdf_dma_addr_t paddr_data;
  284. qdf_dma_addr_t paddr_mcbuf = 0;
  285. uint8_t empty_entry_mac[QDF_MAC_ADDR_SIZE] = {0};
  286. QDF_STATUS status;
  287. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  288. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  289. eh = (qdf_ether_header_t *)nbuf;
  290. qdf_mem_copy(srcmac, eh->ether_shost, QDF_MAC_ADDR_SIZE);
  291. len = qdf_nbuf_len(nbuf);
  292. data = qdf_nbuf_data(nbuf);
  293. status = qdf_nbuf_map(vdev->osdev, nbuf,
  294. QDF_DMA_TO_DEVICE);
  295. if (status) {
  296. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  297. "Mapping failure Error:%d", status);
  298. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  299. qdf_nbuf_free(nbuf);
  300. return 1;
  301. }
  302. paddr_data = qdf_nbuf_mapped_paddr_get(nbuf) + QDF_MAC_ADDR_SIZE;
  303. for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
  304. dstmac = newmac[new_mac_idx];
  305. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  306. "added mac addr (%pM)", dstmac);
  307. /* Check for NULL Mac Address */
  308. if (!qdf_mem_cmp(dstmac, empty_entry_mac, QDF_MAC_ADDR_SIZE))
  309. continue;
  310. /* frame to self mac. skip */
  311. if (!qdf_mem_cmp(dstmac, srcmac, QDF_MAC_ADDR_SIZE))
  312. continue;
  313. /*
  314. * optimize to avoid malloc in per-packet path
  315. * For eg. seg_pool can be made part of vdev structure
  316. */
  317. seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
  318. if (!seg_info_new) {
  319. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  320. "alloc failed");
  321. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
  322. goto fail_seg_alloc;
  323. }
  324. mc_uc_buf = dp_tx_me_alloc_buf(pdev);
  325. if (!mc_uc_buf)
  326. goto fail_buf_alloc;
  327. /*
  328. * Check if we need to clone the nbuf
  329. * Or can we just use the reference for all cases
  330. */
  331. if (new_mac_idx < (new_mac_cnt - 1)) {
  332. nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
  333. if (!nbuf_clone) {
  334. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
  335. goto fail_clone;
  336. }
  337. } else {
  338. /*
  339. * Update the ref
  340. * to account for frame sent without cloning
  341. */
  342. qdf_nbuf_ref(nbuf);
  343. nbuf_clone = nbuf;
  344. }
  345. qdf_mem_copy(mc_uc_buf->data, dstmac, QDF_MAC_ADDR_SIZE);
  346. status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
  347. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE,
  348. &paddr_mcbuf);
  349. if (status) {
  350. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  351. "Mapping failure Error:%d", status);
  352. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  353. goto fail_map;
  354. }
  355. seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
  356. seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
  357. seg_info_new->frags[0].paddr_hi =
  358. (uint16_t)((uint64_t)paddr_mcbuf >> 32);
  359. seg_info_new->frags[0].len = QDF_MAC_ADDR_SIZE;
  360. /*preparing data fragment*/
  361. seg_info_new->frags[1].vaddr =
  362. qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE;
  363. seg_info_new->frags[1].paddr_lo = (uint32_t)paddr_data;
  364. seg_info_new->frags[1].paddr_hi =
  365. (uint16_t)(((uint64_t)paddr_data) >> 32);
  366. seg_info_new->frags[1].len = len - QDF_MAC_ADDR_SIZE;
  367. seg_info_new->nbuf = nbuf_clone;
  368. seg_info_new->frag_cnt = 2;
  369. seg_info_new->total_len = len;
  370. seg_info_new->next = NULL;
  371. if (!seg_info_head)
  372. seg_info_head = seg_info_new;
  373. else
  374. seg_info_tail->next = seg_info_new;
  375. seg_info_tail = seg_info_new;
  376. }
  377. if (!seg_info_head) {
  378. goto free_return;
  379. }
  380. msdu_info.u.sg_info.curr_seg = seg_info_head;
  381. msdu_info.num_seg = new_mac_cnt;
  382. msdu_info.frm_type = dp_tx_frm_me;
  383. msdu_info.tid = HTT_INVALID_TID;
  384. if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
  385. qdf_unlikely(pdev->hmmc_tid_override_en))
  386. msdu_info.tid = pdev->hmmc_tid;
  387. DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
  388. dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  389. while (seg_info_head->next) {
  390. seg_info_new = seg_info_head;
  391. seg_info_head = seg_info_head->next;
  392. qdf_mem_free(seg_info_new);
  393. }
  394. qdf_mem_free(seg_info_head);
  395. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  396. qdf_nbuf_free(nbuf);
  397. return new_mac_cnt;
  398. fail_map:
  399. qdf_nbuf_free(nbuf_clone);
  400. fail_clone:
  401. dp_tx_me_free_buf(pdev, mc_uc_buf);
  402. fail_buf_alloc:
  403. qdf_mem_free(seg_info_new);
  404. fail_seg_alloc:
  405. dp_tx_me_mem_free(pdev, seg_info_head);
  406. free_return:
  407. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  408. qdf_nbuf_free(nbuf);
  409. return 1;
  410. }