dp_txrx_me.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "qdf_nbuf.h"
  21. #include "qdf_atomic.h"
  22. #include "qdf_types.h"
  23. #include "dp_tx.h"
  24. #include "dp_tx_desc.h"
  25. #include "dp_internal.h"
  26. #include "dp_txrx_me.h"
  27. #define MAX_ME_BUF_CHUNK 1424
  28. #define ME_US_TO_SEC(_x) ((_x) / (1000 * 1000))
  29. #define ME_CLEAN_WAIT_TIMEOUT (200000) /*200ms*/
  30. #define ME_CLEAN_WAIT_COUNT 400
  31. /**
  32. * dp_tx_me_init():Initialize ME buffer ppol
  33. * @pdev: DP PDEV handle
  34. *
  35. * Return:0 on Succes 1 on failure
  36. */
  37. static inline uint16_t
  38. dp_tx_me_init(struct dp_pdev *pdev)
  39. {
  40. uint16_t i, mc_uc_buf_len, num_pool_elems;
  41. uint32_t pool_size;
  42. struct dp_tx_me_buf_t *p;
  43. mc_uc_buf_len = sizeof(struct dp_tx_me_buf_t);
  44. num_pool_elems = MAX_ME_BUF_CHUNK;
  45. /* Add flow control buffer count */
  46. pool_size = (mc_uc_buf_len) * num_pool_elems;
  47. pdev->me_buf.size = mc_uc_buf_len;
  48. if (!(pdev->me_buf.vaddr)) {
  49. qdf_spin_lock_bh(&pdev->tx_mutex);
  50. pdev->me_buf.vaddr = qdf_mem_malloc(pool_size);
  51. if (!(pdev->me_buf.vaddr)) {
  52. qdf_spin_unlock_bh(&pdev->tx_mutex);
  53. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  54. "Error allocating memory pool");
  55. return 1;
  56. }
  57. pdev->me_buf.buf_in_use = 0;
  58. pdev->me_buf.freelist =
  59. (struct dp_tx_me_buf_t *)pdev->me_buf.vaddr;
  60. /*
  61. * me_buf looks like this
  62. * |=======+==========================|
  63. * | ptr | Dst MAC |
  64. * |=======+==========================|
  65. */
  66. p = pdev->me_buf.freelist;
  67. for (i = 0; i < num_pool_elems - 1; i++) {
  68. p->next = (struct dp_tx_me_buf_t *)
  69. ((char *)p + pdev->me_buf.size);
  70. p = p->next;
  71. }
  72. p->next = NULL;
  73. qdf_spin_unlock_bh(&pdev->tx_mutex);
  74. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  75. "ME Pool successfully initialized vaddr - %x",
  76. pdev->me_buf.vaddr);
  77. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  78. "paddr - %x\n", (unsigned int)pdev->me_buf.paddr);
  79. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  80. "num_elems = %d", (unsigned int)num_pool_elems);
  81. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  82. "buf_size - %d", (unsigned int)pdev->me_buf.size);
  83. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  84. "pool_size = %d", (unsigned int)pool_size);
  85. } else {
  86. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  87. "ME Already Enabled!!");
  88. }
  89. return 0;
  90. }
  91. /**
  92. * dp_tx_me_alloc_descriptor():Allocate ME descriptor
  93. * @pdev_handle: DP PDEV handle
  94. *
  95. * Return:void
  96. */
  97. void
  98. dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev_handle)
  99. {
  100. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  101. if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) {
  102. dp_tx_me_init(pdev);
  103. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  104. FL("Enable MCAST_TO_UCAST "));
  105. }
  106. qdf_atomic_inc(&pdev->mc_num_vap_attached);
  107. }
  108. /**
  109. * dp_tx_me_exit():Free memory and other cleanup required for
  110. * multicast unicast conversion
  111. * @pdev - DP_PDEV handle
  112. *
  113. * Return:void
  114. */
  115. void
  116. dp_tx_me_exit(struct dp_pdev *pdev)
  117. {
  118. /* Add flow control buffer count */
  119. uint32_t wait_time = ME_US_TO_SEC(ME_CLEAN_WAIT_TIMEOUT *
  120. ME_CLEAN_WAIT_COUNT);
  121. if (pdev->me_buf.vaddr) {
  122. uint16_t wait_cnt = 0;
  123. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  124. "Disabling Mcastenhance,This may take some time");
  125. qdf_spin_lock_bh(&pdev->tx_mutex);
  126. while ((pdev->me_buf.buf_in_use > 0) &&
  127. (wait_cnt < ME_CLEAN_WAIT_COUNT)) {
  128. qdf_spin_unlock_bh(&pdev->tx_mutex);
  129. OS_SLEEP(ME_CLEAN_WAIT_TIMEOUT);
  130. wait_cnt++;
  131. qdf_spin_lock_bh(&pdev->tx_mutex);
  132. }
  133. if (pdev->me_buf.buf_in_use > 0) {
  134. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  135. "Tx-comp pending for %d",
  136. pdev->me_buf.buf_in_use);
  137. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  138. "ME frames after waiting %ds!!",
  139. wait_time);
  140. qdf_assert_always(0);
  141. }
  142. qdf_mem_free(pdev->me_buf.vaddr);
  143. pdev->me_buf.vaddr = NULL;
  144. pdev->me_buf.freelist = NULL;
  145. qdf_spin_unlock_bh(&pdev->tx_mutex);
  146. } else {
  147. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  148. "ME Already Disabled !!!");
  149. }
  150. }
  151. /**
  152. * dp_tx_me_free_descriptor():free ME descriptor
  153. * @pdev_handle:DP_PDEV handle
  154. *
  155. * Return:void
  156. */
  157. void
  158. dp_tx_me_free_descriptor(struct cdp_pdev *pdev_handle)
  159. {
  160. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  161. qdf_atomic_dec(&pdev->mc_num_vap_attached);
  162. if (atomic_read(&pdev->mc_num_vap_attached) == 0) {
  163. dp_tx_me_exit(pdev);
  164. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  165. "Disable MCAST_TO_UCAST");
  166. }
  167. }
  168. /**
  169. * dp_tx_prepare_send_me(): Call to the umac to get the list of clients
  170. * @vdev: DP VDEV handle
  171. * @nbuf: Multicast buffer
  172. *
  173. * Return: no of packets transmitted
  174. */
  175. QDF_STATUS
  176. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  177. {
  178. if (vdev->me_convert) {
  179. if (vdev->me_convert(vdev->osif_vdev, nbuf) > 0)
  180. return QDF_STATUS_SUCCESS;
  181. }
  182. return QDF_STATUS_E_FAILURE;
  183. }
  184. /*
  185. * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
  186. * pdev: pointer to DP PDEV structure
  187. * seg_info_head: Pointer to the head of list
  188. *
  189. * return: void
  190. */
  191. static void dp_tx_me_mem_free(struct dp_pdev *pdev,
  192. struct dp_tx_seg_info_s *seg_info_head)
  193. {
  194. struct dp_tx_me_buf_t *mc_uc_buf;
  195. struct dp_tx_seg_info_s *seg_info_new = NULL;
  196. qdf_nbuf_t nbuf = NULL;
  197. uint64_t phy_addr;
  198. while (seg_info_head) {
  199. nbuf = seg_info_head->nbuf;
  200. mc_uc_buf = (struct dp_tx_me_buf_t *)
  201. seg_info_head->frags[0].vaddr;
  202. phy_addr = seg_info_head->frags[0].paddr_hi;
  203. phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
  204. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  205. phy_addr,
  206. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE);
  207. dp_tx_me_free_buf(pdev, mc_uc_buf);
  208. qdf_nbuf_free(nbuf);
  209. seg_info_new = seg_info_head;
  210. seg_info_head = seg_info_head->next;
  211. qdf_mem_free(seg_info_new);
  212. }
  213. }
  214. /**
  215. * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
  216. * @vdev: DP VDEV handle
  217. * @nbuf: Multicast nbuf
  218. * @newmac: Table of the clients to which packets have to be sent
  219. * @new_mac_cnt: No of clients
  220. *
  221. * return: no of converted packets
  222. */
  223. uint16_t
  224. dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
  225. uint8_t newmac[][QDF_MAC_ADDR_SIZE], uint8_t new_mac_cnt)
  226. {
  227. struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
  228. struct dp_pdev *pdev = vdev->pdev;
  229. qdf_ether_header_t *eh;
  230. uint8_t *data;
  231. uint16_t len;
  232. /* reference to frame dst addr */
  233. uint8_t *dstmac;
  234. /* copy of original frame src addr */
  235. uint8_t srcmac[QDF_MAC_ADDR_SIZE];
  236. /* local index into newmac */
  237. uint8_t new_mac_idx = 0;
  238. struct dp_tx_me_buf_t *mc_uc_buf;
  239. qdf_nbuf_t nbuf_clone;
  240. struct dp_tx_msdu_info_s msdu_info;
  241. struct dp_tx_seg_info_s *seg_info_head = NULL;
  242. struct dp_tx_seg_info_s *seg_info_tail = NULL;
  243. struct dp_tx_seg_info_s *seg_info_new;
  244. qdf_dma_addr_t paddr_data;
  245. qdf_dma_addr_t paddr_mcbuf = 0;
  246. uint8_t empty_entry_mac[QDF_MAC_ADDR_SIZE] = {0};
  247. QDF_STATUS status;
  248. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  249. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  250. eh = (qdf_ether_header_t *)nbuf;
  251. qdf_mem_copy(srcmac, eh->ether_shost, QDF_MAC_ADDR_SIZE);
  252. len = qdf_nbuf_len(nbuf);
  253. data = qdf_nbuf_data(nbuf);
  254. status = qdf_nbuf_map(vdev->osdev, nbuf,
  255. QDF_DMA_TO_DEVICE);
  256. if (status) {
  257. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  258. "Mapping failure Error:%d", status);
  259. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  260. qdf_nbuf_free(nbuf);
  261. return 1;
  262. }
  263. paddr_data = qdf_nbuf_mapped_paddr_get(nbuf) + QDF_MAC_ADDR_SIZE;
  264. for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
  265. dstmac = newmac[new_mac_idx];
  266. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  267. "added mac addr (%pM)", dstmac);
  268. /* Check for NULL Mac Address */
  269. if (!qdf_mem_cmp(dstmac, empty_entry_mac, QDF_MAC_ADDR_SIZE))
  270. continue;
  271. /* frame to self mac. skip */
  272. if (!qdf_mem_cmp(dstmac, srcmac, QDF_MAC_ADDR_SIZE))
  273. continue;
  274. /*
  275. * optimize to avoid malloc in per-packet path
  276. * For eg. seg_pool can be made part of vdev structure
  277. */
  278. seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
  279. if (!seg_info_new) {
  280. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  281. "alloc failed");
  282. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
  283. goto fail_seg_alloc;
  284. }
  285. mc_uc_buf = dp_tx_me_alloc_buf(pdev);
  286. if (!mc_uc_buf)
  287. goto fail_buf_alloc;
  288. /*
  289. * Check if we need to clone the nbuf
  290. * Or can we just use the reference for all cases
  291. */
  292. if (new_mac_idx < (new_mac_cnt - 1)) {
  293. nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
  294. if (!nbuf_clone) {
  295. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
  296. goto fail_clone;
  297. }
  298. } else {
  299. /*
  300. * Update the ref
  301. * to account for frame sent without cloning
  302. */
  303. qdf_nbuf_ref(nbuf);
  304. nbuf_clone = nbuf;
  305. }
  306. qdf_mem_copy(mc_uc_buf->data, dstmac, QDF_MAC_ADDR_SIZE);
  307. status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
  308. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE,
  309. &paddr_mcbuf);
  310. if (status) {
  311. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  312. "Mapping failure Error:%d", status);
  313. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  314. goto fail_map;
  315. }
  316. seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
  317. seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
  318. seg_info_new->frags[0].paddr_hi =
  319. (uint16_t)((uint64_t)paddr_mcbuf >> 32);
  320. seg_info_new->frags[0].len = QDF_MAC_ADDR_SIZE;
  321. /*preparing data fragment*/
  322. seg_info_new->frags[1].vaddr =
  323. qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE;
  324. seg_info_new->frags[1].paddr_lo = (uint32_t)paddr_data;
  325. seg_info_new->frags[1].paddr_hi =
  326. (uint16_t)(((uint64_t)paddr_data) >> 32);
  327. seg_info_new->frags[1].len = len - QDF_MAC_ADDR_SIZE;
  328. seg_info_new->nbuf = nbuf_clone;
  329. seg_info_new->frag_cnt = 2;
  330. seg_info_new->total_len = len;
  331. seg_info_new->next = NULL;
  332. if (!seg_info_head)
  333. seg_info_head = seg_info_new;
  334. else
  335. seg_info_tail->next = seg_info_new;
  336. seg_info_tail = seg_info_new;
  337. }
  338. if (!seg_info_head) {
  339. goto free_return;
  340. }
  341. msdu_info.u.sg_info.curr_seg = seg_info_head;
  342. msdu_info.num_seg = new_mac_cnt;
  343. msdu_info.frm_type = dp_tx_frm_me;
  344. msdu_info.tid = HTT_INVALID_TID;
  345. if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
  346. qdf_unlikely(pdev->hmmc_tid_override_en))
  347. msdu_info.tid = pdev->hmmc_tid;
  348. DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
  349. dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  350. while (seg_info_head->next) {
  351. seg_info_new = seg_info_head;
  352. seg_info_head = seg_info_head->next;
  353. qdf_mem_free(seg_info_new);
  354. }
  355. qdf_mem_free(seg_info_head);
  356. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  357. qdf_nbuf_free(nbuf);
  358. return new_mac_cnt;
  359. fail_map:
  360. qdf_nbuf_free(nbuf_clone);
  361. fail_clone:
  362. dp_tx_me_free_buf(pdev, mc_uc_buf);
  363. fail_buf_alloc:
  364. qdf_mem_free(seg_info_new);
  365. fail_seg_alloc:
  366. dp_tx_me_mem_free(pdev, seg_info_head);
  367. free_return:
  368. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  369. qdf_nbuf_free(nbuf);
  370. return 1;
  371. }