dp_txrx_me.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_peer.h"
  21. #include "qdf_nbuf.h"
  22. #include "qdf_atomic.h"
  23. #include "qdf_types.h"
  24. #include "dp_tx.h"
  25. #include "dp_tx_desc.h"
  26. #include "dp_internal.h"
  27. #include "dp_txrx_me.h"
  28. #define MAX_ME_BUF_CHUNK 1424
  29. #define ME_US_TO_SEC(_x) ((_x) / (1000 * 1000))
  30. #define ME_CLEAN_WAIT_TIMEOUT (200000) /*200ms*/
  31. #define ME_CLEAN_WAIT_COUNT 400
  32. /**
  33. * dp_tx_me_init():Initialize ME buffer ppol
  34. * @pdev: DP PDEV handle
  35. *
  36. * Return:0 on Succes 1 on failure
  37. */
  38. static inline uint16_t
  39. dp_tx_me_init(struct dp_pdev *pdev)
  40. {
  41. uint16_t i, mc_uc_buf_len, num_pool_elems;
  42. uint32_t pool_size;
  43. struct dp_tx_me_buf_t *p;
  44. mc_uc_buf_len = sizeof(struct dp_tx_me_buf_t);
  45. num_pool_elems = MAX_ME_BUF_CHUNK;
  46. /* Add flow control buffer count */
  47. pool_size = (mc_uc_buf_len) * num_pool_elems;
  48. pdev->me_buf.size = mc_uc_buf_len;
  49. if (!(pdev->me_buf.vaddr)) {
  50. qdf_spin_lock_bh(&pdev->tx_mutex);
  51. pdev->me_buf.vaddr = qdf_mem_malloc(pool_size);
  52. if (!(pdev->me_buf.vaddr)) {
  53. qdf_spin_unlock_bh(&pdev->tx_mutex);
  54. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  55. "Error allocating memory pool");
  56. return 1;
  57. }
  58. pdev->me_buf.buf_in_use = 0;
  59. pdev->me_buf.freelist =
  60. (struct dp_tx_me_buf_t *)pdev->me_buf.vaddr;
  61. /*
  62. * me_buf looks like this
  63. * |=======+==========================|
  64. * | ptr | Dst MAC |
  65. * |=======+==========================|
  66. */
  67. p = pdev->me_buf.freelist;
  68. for (i = 0; i < num_pool_elems - 1; i++) {
  69. p->next = (struct dp_tx_me_buf_t *)
  70. ((char *)p + pdev->me_buf.size);
  71. p = p->next;
  72. }
  73. p->next = NULL;
  74. qdf_spin_unlock_bh(&pdev->tx_mutex);
  75. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  76. "ME Pool successfully initialized vaddr - %x",
  77. pdev->me_buf.vaddr);
  78. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  79. "paddr - %x\n", (unsigned int)pdev->me_buf.paddr);
  80. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  81. "num_elems = %d", (unsigned int)num_pool_elems);
  82. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  83. "buf_size - %d", (unsigned int)pdev->me_buf.size);
  84. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  85. "pool_size = %d", (unsigned int)pool_size);
  86. } else {
  87. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  88. "ME Already Enabled!!");
  89. }
  90. return 0;
  91. }
  92. /**
  93. * dp_tx_me_alloc_descriptor():Allocate ME descriptor
  94. * @soc: DP SOC handle
  95. * @pdev_id: id of DP PDEV handle
  96. *
  97. * Return:void
  98. */
  99. void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id)
  100. {
  101. struct dp_pdev *pdev =
  102. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  103. pdev_id);
  104. if (!pdev)
  105. return;
  106. if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) {
  107. dp_tx_me_init(pdev);
  108. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  109. FL("Enable MCAST_TO_UCAST "));
  110. }
  111. qdf_atomic_inc(&pdev->mc_num_vap_attached);
  112. }
  113. /**
  114. * dp_tx_me_exit():Free memory and other cleanup required for
  115. * multicast unicast conversion
  116. * @pdev - DP_PDEV handle
  117. *
  118. * Return:void
  119. */
  120. void
  121. dp_tx_me_exit(struct dp_pdev *pdev)
  122. {
  123. /* Add flow control buffer count */
  124. uint32_t wait_time = ME_US_TO_SEC(ME_CLEAN_WAIT_TIMEOUT *
  125. ME_CLEAN_WAIT_COUNT);
  126. if (pdev->me_buf.vaddr) {
  127. uint16_t wait_cnt = 0;
  128. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  129. "Disabling Mcastenhance,This may take some time");
  130. qdf_spin_lock_bh(&pdev->tx_mutex);
  131. while ((pdev->me_buf.buf_in_use > 0) &&
  132. (wait_cnt < ME_CLEAN_WAIT_COUNT)) {
  133. qdf_spin_unlock_bh(&pdev->tx_mutex);
  134. OS_SLEEP(ME_CLEAN_WAIT_TIMEOUT);
  135. wait_cnt++;
  136. qdf_spin_lock_bh(&pdev->tx_mutex);
  137. }
  138. if (pdev->me_buf.buf_in_use > 0) {
  139. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  140. "Tx-comp pending for %d",
  141. pdev->me_buf.buf_in_use);
  142. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  143. "ME frames after waiting %ds!!",
  144. wait_time);
  145. qdf_assert_always(0);
  146. }
  147. qdf_mem_free(pdev->me_buf.vaddr);
  148. pdev->me_buf.vaddr = NULL;
  149. pdev->me_buf.freelist = NULL;
  150. qdf_spin_unlock_bh(&pdev->tx_mutex);
  151. } else {
  152. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  153. "ME Already Disabled !!!");
  154. }
  155. }
  156. /**
  157. * dp_tx_me_free_descriptor():free ME descriptor
  158. * @soc: DP SOC handle
  159. * @pdev_id: id of DP PDEV handle
  160. *
  161. * Return:void
  162. */
  163. void
  164. dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id)
  165. {
  166. struct dp_pdev *pdev =
  167. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  168. pdev_id);
  169. if (!pdev)
  170. return;
  171. if (atomic_read(&pdev->mc_num_vap_attached)) {
  172. if (qdf_atomic_dec_and_test(&pdev->mc_num_vap_attached)) {
  173. dp_tx_me_exit(pdev);
  174. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  175. "Disable MCAST_TO_UCAST");
  176. }
  177. }
  178. }
  179. /**
  180. * dp_tx_prepare_send_me(): Call to the umac to get the list of clients
  181. * @vdev: DP VDEV handle
  182. * @nbuf: Multicast buffer
  183. *
  184. * Return: no of packets transmitted
  185. */
  186. QDF_STATUS
  187. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  188. {
  189. if (dp_me_mcast_convert((struct cdp_soc_t *)(vdev->pdev->soc),
  190. vdev->vdev_id, vdev->pdev->pdev_id,
  191. nbuf) > 0)
  192. return QDF_STATUS_SUCCESS;
  193. return QDF_STATUS_E_FAILURE;
  194. }
  195. /*
  196. * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
  197. * pdev: pointer to DP PDEV structure
  198. * seg_info_head: Pointer to the head of list
  199. *
  200. * return: void
  201. */
  202. static void dp_tx_me_mem_free(struct dp_pdev *pdev,
  203. struct dp_tx_seg_info_s *seg_info_head)
  204. {
  205. struct dp_tx_me_buf_t *mc_uc_buf;
  206. struct dp_tx_seg_info_s *seg_info_new = NULL;
  207. qdf_nbuf_t nbuf = NULL;
  208. uint64_t phy_addr;
  209. while (seg_info_head) {
  210. nbuf = seg_info_head->nbuf;
  211. mc_uc_buf = (struct dp_tx_me_buf_t *)
  212. seg_info_head->frags[0].vaddr;
  213. phy_addr = seg_info_head->frags[0].paddr_hi;
  214. phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
  215. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  216. phy_addr,
  217. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE);
  218. dp_tx_me_free_buf(pdev, mc_uc_buf);
  219. qdf_nbuf_free(nbuf);
  220. seg_info_new = seg_info_head;
  221. seg_info_head = seg_info_head->next;
  222. qdf_mem_free(seg_info_new);
  223. }
  224. }
  225. /**
  226. * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
  227. * @soc: Datapath soc handle
  228. * @vdev_id: vdev id
  229. * @nbuf: Multicast nbuf
  230. * @newmac: Table of the clients to which packets have to be sent
  231. * @new_mac_cnt: No of clients
  232. *
  233. * return: no of converted packets
  234. */
  235. uint16_t
  236. dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id,
  237. qdf_nbuf_t nbuf,
  238. uint8_t newmac[][QDF_MAC_ADDR_SIZE],
  239. uint8_t new_mac_cnt)
  240. {
  241. struct dp_pdev *pdev;
  242. qdf_ether_header_t *eh;
  243. uint8_t *data;
  244. uint16_t len;
  245. /* reference to frame dst addr */
  246. uint8_t *dstmac;
  247. /* copy of original frame src addr */
  248. uint8_t srcmac[QDF_MAC_ADDR_SIZE];
  249. /* local index into newmac */
  250. uint8_t new_mac_idx = 0;
  251. struct dp_tx_me_buf_t *mc_uc_buf;
  252. qdf_nbuf_t nbuf_clone;
  253. struct dp_tx_msdu_info_s msdu_info;
  254. struct dp_tx_seg_info_s *seg_info_head = NULL;
  255. struct dp_tx_seg_info_s *seg_info_tail = NULL;
  256. struct dp_tx_seg_info_s *seg_info_new;
  257. qdf_dma_addr_t paddr_data;
  258. qdf_dma_addr_t paddr_mcbuf = 0;
  259. uint8_t empty_entry_mac[QDF_MAC_ADDR_SIZE] = {0};
  260. QDF_STATUS status;
  261. struct dp_vdev *vdev =
  262. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  263. vdev_id);
  264. if (!vdev) {
  265. qdf_nbuf_free(nbuf);
  266. return 1;
  267. }
  268. pdev = vdev->pdev;
  269. if (!pdev) {
  270. qdf_nbuf_free(nbuf);
  271. return 1;
  272. }
  273. vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  274. vdev_id);
  275. if (!vdev)
  276. return 1;
  277. pdev = vdev->pdev;
  278. if (!pdev)
  279. return 1;
  280. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  281. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  282. eh = (qdf_ether_header_t *)nbuf;
  283. qdf_mem_copy(srcmac, eh->ether_shost, QDF_MAC_ADDR_SIZE);
  284. len = qdf_nbuf_len(nbuf);
  285. data = qdf_nbuf_data(nbuf);
  286. status = qdf_nbuf_map(vdev->osdev, nbuf,
  287. QDF_DMA_TO_DEVICE);
  288. if (status) {
  289. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  290. "Mapping failure Error:%d", status);
  291. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  292. qdf_nbuf_free(nbuf);
  293. return 1;
  294. }
  295. paddr_data = qdf_nbuf_mapped_paddr_get(nbuf) + QDF_MAC_ADDR_SIZE;
  296. for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
  297. dstmac = newmac[new_mac_idx];
  298. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  299. "added mac addr (%pM)", dstmac);
  300. /* Check for NULL Mac Address */
  301. if (!qdf_mem_cmp(dstmac, empty_entry_mac, QDF_MAC_ADDR_SIZE))
  302. continue;
  303. /* frame to self mac. skip */
  304. if (!qdf_mem_cmp(dstmac, srcmac, QDF_MAC_ADDR_SIZE))
  305. continue;
  306. /*
  307. * optimize to avoid malloc in per-packet path
  308. * For eg. seg_pool can be made part of vdev structure
  309. */
  310. seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
  311. if (!seg_info_new) {
  312. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  313. "alloc failed");
  314. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
  315. goto fail_seg_alloc;
  316. }
  317. mc_uc_buf = dp_tx_me_alloc_buf(pdev);
  318. if (!mc_uc_buf)
  319. goto fail_buf_alloc;
  320. /*
  321. * Check if we need to clone the nbuf
  322. * Or can we just use the reference for all cases
  323. */
  324. if (new_mac_idx < (new_mac_cnt - 1)) {
  325. nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
  326. if (!nbuf_clone) {
  327. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
  328. goto fail_clone;
  329. }
  330. } else {
  331. /*
  332. * Update the ref
  333. * to account for frame sent without cloning
  334. */
  335. qdf_nbuf_ref(nbuf);
  336. nbuf_clone = nbuf;
  337. }
  338. qdf_mem_copy(mc_uc_buf->data, dstmac, QDF_MAC_ADDR_SIZE);
  339. status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
  340. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE,
  341. &paddr_mcbuf);
  342. if (status) {
  343. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  344. "Mapping failure Error:%d", status);
  345. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  346. goto fail_map;
  347. }
  348. seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
  349. seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
  350. seg_info_new->frags[0].paddr_hi =
  351. (uint16_t)((uint64_t)paddr_mcbuf >> 32);
  352. seg_info_new->frags[0].len = QDF_MAC_ADDR_SIZE;
  353. /*preparing data fragment*/
  354. seg_info_new->frags[1].vaddr =
  355. qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE;
  356. seg_info_new->frags[1].paddr_lo = (uint32_t)paddr_data;
  357. seg_info_new->frags[1].paddr_hi =
  358. (uint16_t)(((uint64_t)paddr_data) >> 32);
  359. seg_info_new->frags[1].len = len - QDF_MAC_ADDR_SIZE;
  360. seg_info_new->nbuf = nbuf_clone;
  361. seg_info_new->frag_cnt = 2;
  362. seg_info_new->total_len = len;
  363. seg_info_new->next = NULL;
  364. if (!seg_info_head)
  365. seg_info_head = seg_info_new;
  366. else
  367. seg_info_tail->next = seg_info_new;
  368. seg_info_tail = seg_info_new;
  369. }
  370. if (!seg_info_head) {
  371. goto free_return;
  372. }
  373. msdu_info.u.sg_info.curr_seg = seg_info_head;
  374. msdu_info.num_seg = new_mac_cnt;
  375. msdu_info.frm_type = dp_tx_frm_me;
  376. msdu_info.tid = HTT_INVALID_TID;
  377. if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
  378. qdf_unlikely(pdev->hmmc_tid_override_en))
  379. msdu_info.tid = pdev->hmmc_tid;
  380. DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
  381. dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  382. while (seg_info_head->next) {
  383. seg_info_new = seg_info_head;
  384. seg_info_head = seg_info_head->next;
  385. qdf_mem_free(seg_info_new);
  386. }
  387. qdf_mem_free(seg_info_head);
  388. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  389. qdf_nbuf_free(nbuf);
  390. return new_mac_cnt;
  391. fail_map:
  392. qdf_nbuf_free(nbuf_clone);
  393. fail_clone:
  394. dp_tx_me_free_buf(pdev, mc_uc_buf);
  395. fail_buf_alloc:
  396. qdf_mem_free(seg_info_new);
  397. fail_seg_alloc:
  398. dp_tx_me_mem_free(pdev, seg_info_head);
  399. free_return:
  400. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  401. qdf_nbuf_free(nbuf);
  402. return 1;
  403. }