dp_txrx_me.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_peer.h"
  21. #include "qdf_nbuf.h"
  22. #include "qdf_atomic.h"
  23. #include "qdf_types.h"
  24. #include "dp_tx.h"
  25. #include "dp_tx_desc.h"
  26. #include "dp_internal.h"
  27. #include "dp_txrx_me.h"
  28. #define MAX_ME_BUF_CHUNK 1424
  29. #define ME_US_TO_SEC(_x) ((_x) / (1000 * 1000))
  30. #define ME_CLEAN_WAIT_TIMEOUT (200000) /*200ms*/
  31. #define ME_CLEAN_WAIT_COUNT 400
  32. /**
  33. * dp_tx_me_init():Initialize ME buffer ppol
  34. * @pdev: DP PDEV handle
  35. *
  36. * Return:0 on Succes 1 on failure
  37. */
  38. static inline uint16_t
  39. dp_tx_me_init(struct dp_pdev *pdev)
  40. {
  41. uint16_t i, mc_uc_buf_len, num_pool_elems;
  42. uint32_t pool_size;
  43. struct dp_tx_me_buf_t *p;
  44. mc_uc_buf_len = sizeof(struct dp_tx_me_buf_t);
  45. num_pool_elems = MAX_ME_BUF_CHUNK;
  46. /* Add flow control buffer count */
  47. pool_size = (mc_uc_buf_len) * num_pool_elems;
  48. pdev->me_buf.size = mc_uc_buf_len;
  49. if (!(pdev->me_buf.vaddr)) {
  50. qdf_spin_lock_bh(&pdev->tx_mutex);
  51. pdev->me_buf.vaddr = qdf_mem_malloc(pool_size);
  52. if (!(pdev->me_buf.vaddr)) {
  53. qdf_spin_unlock_bh(&pdev->tx_mutex);
  54. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  55. "Error allocating memory pool");
  56. return 1;
  57. }
  58. pdev->me_buf.buf_in_use = 0;
  59. pdev->me_buf.freelist =
  60. (struct dp_tx_me_buf_t *)pdev->me_buf.vaddr;
  61. /*
  62. * me_buf looks like this
  63. * |=======+==========================|
  64. * | ptr | Dst MAC |
  65. * |=======+==========================|
  66. */
  67. p = pdev->me_buf.freelist;
  68. for (i = 0; i < num_pool_elems - 1; i++) {
  69. p->next = (struct dp_tx_me_buf_t *)
  70. ((char *)p + pdev->me_buf.size);
  71. p = p->next;
  72. }
  73. p->next = NULL;
  74. qdf_spin_unlock_bh(&pdev->tx_mutex);
  75. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  76. "ME Pool successfully initialized vaddr - %x",
  77. pdev->me_buf.vaddr);
  78. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  79. "paddr - %x\n", (unsigned int)pdev->me_buf.paddr);
  80. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  81. "num_elems = %d", (unsigned int)num_pool_elems);
  82. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  83. "buf_size - %d", (unsigned int)pdev->me_buf.size);
  84. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  85. "pool_size = %d", (unsigned int)pool_size);
  86. } else {
  87. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  88. "ME Already Enabled!!");
  89. }
  90. return 0;
  91. }
  92. /**
  93. * dp_tx_me_alloc_descriptor():Allocate ME descriptor
  94. * @soc: DP SOC handle
  95. * @pdev_id: id of DP PDEV handle
  96. *
  97. * Return:void
  98. */
  99. void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id)
  100. {
  101. struct dp_pdev *pdev =
  102. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  103. pdev_id);
  104. if (!pdev)
  105. return;
  106. if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) {
  107. dp_tx_me_init(pdev);
  108. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  109. FL("Enable MCAST_TO_UCAST "));
  110. }
  111. qdf_atomic_inc(&pdev->mc_num_vap_attached);
  112. }
  113. /**
  114. * dp_tx_me_exit():Free memory and other cleanup required for
  115. * multicast unicast conversion
  116. * @pdev - DP_PDEV handle
  117. *
  118. * Return:void
  119. */
  120. void
  121. dp_tx_me_exit(struct dp_pdev *pdev)
  122. {
  123. /* Add flow control buffer count */
  124. uint32_t wait_time = ME_US_TO_SEC(ME_CLEAN_WAIT_TIMEOUT *
  125. ME_CLEAN_WAIT_COUNT);
  126. if (pdev->me_buf.vaddr) {
  127. uint16_t wait_cnt = 0;
  128. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  129. "Disabling Mcastenhance,This may take some time");
  130. qdf_spin_lock_bh(&pdev->tx_mutex);
  131. while ((pdev->me_buf.buf_in_use > 0) &&
  132. (wait_cnt < ME_CLEAN_WAIT_COUNT)) {
  133. qdf_spin_unlock_bh(&pdev->tx_mutex);
  134. OS_SLEEP(ME_CLEAN_WAIT_TIMEOUT);
  135. wait_cnt++;
  136. qdf_spin_lock_bh(&pdev->tx_mutex);
  137. }
  138. if (pdev->me_buf.buf_in_use > 0) {
  139. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  140. "Tx-comp pending for %d",
  141. pdev->me_buf.buf_in_use);
  142. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  143. "ME frames after waiting %ds!!",
  144. wait_time);
  145. qdf_assert_always(0);
  146. }
  147. qdf_mem_free(pdev->me_buf.vaddr);
  148. pdev->me_buf.vaddr = NULL;
  149. pdev->me_buf.freelist = NULL;
  150. qdf_spin_unlock_bh(&pdev->tx_mutex);
  151. } else {
  152. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  153. "ME Already Disabled !!!");
  154. }
  155. }
  156. /**
  157. * dp_tx_me_free_descriptor():free ME descriptor
  158. * @soc: DP SOC handle
  159. * @pdev_id: id of DP PDEV handle
  160. *
  161. * Return:void
  162. */
  163. void
  164. dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id)
  165. {
  166. struct dp_pdev *pdev =
  167. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  168. pdev_id);
  169. if (!pdev)
  170. return;
  171. if (atomic_read(&pdev->mc_num_vap_attached)) {
  172. if (qdf_atomic_dec_and_test(&pdev->mc_num_vap_attached)) {
  173. dp_tx_me_exit(pdev);
  174. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  175. "Disable MCAST_TO_UCAST");
  176. }
  177. }
  178. }
  179. /**
  180. * dp_tx_prepare_send_me(): Call to the umac to get the list of clients
  181. * @vdev: DP VDEV handle
  182. * @nbuf: Multicast buffer
  183. *
  184. * Return: no of packets transmitted
  185. */
  186. QDF_STATUS
  187. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  188. {
  189. if (dp_me_mcast_convert((struct cdp_soc_t *)(vdev->pdev->soc),
  190. vdev->vdev_id, vdev->pdev->pdev_id,
  191. nbuf) > 0)
  192. return QDF_STATUS_SUCCESS;
  193. return QDF_STATUS_E_FAILURE;
  194. }
  195. /*
  196. * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
  197. * pdev: pointer to DP PDEV structure
  198. * seg_info_head: Pointer to the head of list
  199. *
  200. * return: void
  201. */
  202. static void dp_tx_me_mem_free(struct dp_pdev *pdev,
  203. struct dp_tx_seg_info_s *seg_info_head)
  204. {
  205. struct dp_tx_me_buf_t *mc_uc_buf;
  206. struct dp_tx_seg_info_s *seg_info_new = NULL;
  207. qdf_nbuf_t nbuf = NULL;
  208. uint64_t phy_addr;
  209. while (seg_info_head) {
  210. nbuf = seg_info_head->nbuf;
  211. mc_uc_buf = (struct dp_tx_me_buf_t *)
  212. seg_info_head->frags[0].vaddr;
  213. phy_addr = seg_info_head->frags[0].paddr_hi;
  214. phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
  215. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  216. phy_addr,
  217. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE);
  218. dp_tx_me_free_buf(pdev, mc_uc_buf);
  219. qdf_nbuf_free(nbuf);
  220. seg_info_new = seg_info_head;
  221. seg_info_head = seg_info_head->next;
  222. qdf_mem_free(seg_info_new);
  223. }
  224. }
  225. /**
  226. * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
  227. * @soc: Datapath soc handle
  228. * @vdev_id: vdev id
  229. * @nbuf: Multicast nbuf
  230. * @newmac: Table of the clients to which packets have to be sent
  231. * @new_mac_cnt: No of clients
  232. *
  233. * return: no of converted packets
  234. */
  235. uint16_t
  236. dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  237. qdf_nbuf_t nbuf,
  238. uint8_t newmac[][QDF_MAC_ADDR_SIZE],
  239. uint8_t new_mac_cnt)
  240. {
  241. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  242. struct dp_pdev *pdev;
  243. qdf_ether_header_t *eh;
  244. uint8_t *data;
  245. uint16_t len;
  246. /* reference to frame dst addr */
  247. uint8_t *dstmac;
  248. /* copy of original frame src addr */
  249. uint8_t srcmac[QDF_MAC_ADDR_SIZE];
  250. /* local index into newmac */
  251. uint8_t new_mac_idx = 0;
  252. struct dp_tx_me_buf_t *mc_uc_buf;
  253. qdf_nbuf_t nbuf_clone;
  254. struct dp_tx_msdu_info_s msdu_info;
  255. struct dp_tx_seg_info_s *seg_info_head = NULL;
  256. struct dp_tx_seg_info_s *seg_info_tail = NULL;
  257. struct dp_tx_seg_info_s *seg_info_new;
  258. qdf_dma_addr_t paddr_data;
  259. qdf_dma_addr_t paddr_mcbuf = 0;
  260. uint8_t empty_entry_mac[QDF_MAC_ADDR_SIZE] = {0};
  261. QDF_STATUS status;
  262. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id);
  263. if (!vdev)
  264. goto free_return;
  265. pdev = vdev->pdev;
  266. if (!pdev)
  267. goto free_return;
  268. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  269. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  270. eh = (qdf_ether_header_t *)nbuf;
  271. qdf_mem_copy(srcmac, eh->ether_shost, QDF_MAC_ADDR_SIZE);
  272. len = qdf_nbuf_len(nbuf);
  273. data = qdf_nbuf_data(nbuf);
  274. status = qdf_nbuf_map(vdev->osdev, nbuf,
  275. QDF_DMA_TO_DEVICE);
  276. if (status) {
  277. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  278. "Mapping failure Error:%d", status);
  279. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  280. qdf_nbuf_free(nbuf);
  281. return 1;
  282. }
  283. paddr_data = qdf_nbuf_mapped_paddr_get(nbuf) + QDF_MAC_ADDR_SIZE;
  284. for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
  285. dstmac = newmac[new_mac_idx];
  286. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  287. "added mac addr (%pM)", dstmac);
  288. /* Check for NULL Mac Address */
  289. if (!qdf_mem_cmp(dstmac, empty_entry_mac, QDF_MAC_ADDR_SIZE))
  290. continue;
  291. /* frame to self mac. skip */
  292. if (!qdf_mem_cmp(dstmac, srcmac, QDF_MAC_ADDR_SIZE))
  293. continue;
  294. /*
  295. * optimize to avoid malloc in per-packet path
  296. * For eg. seg_pool can be made part of vdev structure
  297. */
  298. seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
  299. if (!seg_info_new) {
  300. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  301. "alloc failed");
  302. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
  303. goto fail_seg_alloc;
  304. }
  305. mc_uc_buf = dp_tx_me_alloc_buf(pdev);
  306. if (!mc_uc_buf)
  307. goto fail_buf_alloc;
  308. /*
  309. * Check if we need to clone the nbuf
  310. * Or can we just use the reference for all cases
  311. */
  312. if (new_mac_idx < (new_mac_cnt - 1)) {
  313. nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
  314. if (!nbuf_clone) {
  315. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
  316. goto fail_clone;
  317. }
  318. } else {
  319. /*
  320. * Update the ref
  321. * to account for frame sent without cloning
  322. */
  323. qdf_nbuf_ref(nbuf);
  324. nbuf_clone = nbuf;
  325. }
  326. qdf_mem_copy(mc_uc_buf->data, dstmac, QDF_MAC_ADDR_SIZE);
  327. status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
  328. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE,
  329. &paddr_mcbuf);
  330. if (status) {
  331. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  332. "Mapping failure Error:%d", status);
  333. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  334. goto fail_map;
  335. }
  336. seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
  337. seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
  338. seg_info_new->frags[0].paddr_hi =
  339. (uint16_t)((uint64_t)paddr_mcbuf >> 32);
  340. seg_info_new->frags[0].len = QDF_MAC_ADDR_SIZE;
  341. /*preparing data fragment*/
  342. seg_info_new->frags[1].vaddr =
  343. qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE;
  344. seg_info_new->frags[1].paddr_lo = (uint32_t)paddr_data;
  345. seg_info_new->frags[1].paddr_hi =
  346. (uint16_t)(((uint64_t)paddr_data) >> 32);
  347. seg_info_new->frags[1].len = len - QDF_MAC_ADDR_SIZE;
  348. seg_info_new->nbuf = nbuf_clone;
  349. seg_info_new->frag_cnt = 2;
  350. seg_info_new->total_len = len;
  351. seg_info_new->next = NULL;
  352. if (!seg_info_head)
  353. seg_info_head = seg_info_new;
  354. else
  355. seg_info_tail->next = seg_info_new;
  356. seg_info_tail = seg_info_new;
  357. }
  358. if (!seg_info_head) {
  359. goto unmap_free_return;
  360. }
  361. msdu_info.u.sg_info.curr_seg = seg_info_head;
  362. msdu_info.num_seg = new_mac_cnt;
  363. msdu_info.frm_type = dp_tx_frm_me;
  364. msdu_info.tid = HTT_INVALID_TID;
  365. if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
  366. qdf_unlikely(pdev->hmmc_tid_override_en))
  367. msdu_info.tid = pdev->hmmc_tid;
  368. DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
  369. dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  370. while (seg_info_head->next) {
  371. seg_info_new = seg_info_head;
  372. seg_info_head = seg_info_head->next;
  373. qdf_mem_free(seg_info_new);
  374. }
  375. qdf_mem_free(seg_info_head);
  376. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  377. qdf_nbuf_free(nbuf);
  378. dp_vdev_unref_delete(soc, vdev);
  379. return new_mac_cnt;
  380. fail_map:
  381. qdf_nbuf_free(nbuf_clone);
  382. fail_clone:
  383. dp_tx_me_free_buf(pdev, mc_uc_buf);
  384. fail_buf_alloc:
  385. qdf_mem_free(seg_info_new);
  386. fail_seg_alloc:
  387. dp_tx_me_mem_free(pdev, seg_info_head);
  388. unmap_free_return:
  389. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  390. free_return:
  391. if (vdev)
  392. dp_vdev_unref_delete(soc, vdev);
  393. qdf_nbuf_free(nbuf);
  394. return 1;
  395. }