dp_txrx_me.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_peer.h"
  21. #include "qdf_nbuf.h"
  22. #include "qdf_atomic.h"
  23. #include "qdf_types.h"
  24. #include "dp_tx.h"
  25. #include "dp_tx_desc.h"
  26. #include "dp_internal.h"
  27. #include "dp_txrx_me.h"
  28. #define MAX_ME_BUF_CHUNK 1424
  29. #define ME_US_TO_SEC(_x) ((_x) / (1000 * 1000))
  30. #define ME_CLEAN_WAIT_TIMEOUT (200000) /*200ms*/
  31. #define ME_CLEAN_WAIT_COUNT 400
  32. /**
  33. * dp_tx_me_init():Initialize ME buffer ppol
  34. * @pdev: DP PDEV handle
  35. *
  36. * Return:0 on Succes 1 on failure
  37. */
  38. static inline uint16_t
  39. dp_tx_me_init(struct dp_pdev *pdev)
  40. {
  41. uint16_t i, mc_uc_buf_len, num_pool_elems;
  42. uint32_t pool_size;
  43. struct dp_tx_me_buf_t *p;
  44. mc_uc_buf_len = sizeof(struct dp_tx_me_buf_t);
  45. num_pool_elems = MAX_ME_BUF_CHUNK;
  46. /* Add flow control buffer count */
  47. pool_size = (mc_uc_buf_len) * num_pool_elems;
  48. pdev->me_buf.size = mc_uc_buf_len;
  49. if (!(pdev->me_buf.vaddr)) {
  50. qdf_spin_lock_bh(&pdev->tx_mutex);
  51. pdev->me_buf.vaddr = qdf_mem_malloc(pool_size);
  52. if (!(pdev->me_buf.vaddr)) {
  53. qdf_spin_unlock_bh(&pdev->tx_mutex);
  54. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  55. "Error allocating memory pool");
  56. return 1;
  57. }
  58. pdev->me_buf.buf_in_use = 0;
  59. pdev->me_buf.freelist =
  60. (struct dp_tx_me_buf_t *)pdev->me_buf.vaddr;
  61. /*
  62. * me_buf looks like this
  63. * |=======+==========================|
  64. * | ptr | Dst MAC |
  65. * |=======+==========================|
  66. */
  67. p = pdev->me_buf.freelist;
  68. for (i = 0; i < num_pool_elems - 1; i++) {
  69. p->next = (struct dp_tx_me_buf_t *)
  70. ((char *)p + pdev->me_buf.size);
  71. p = p->next;
  72. }
  73. p->next = NULL;
  74. qdf_spin_unlock_bh(&pdev->tx_mutex);
  75. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  76. "ME Pool successfully initialized vaddr - %pK",
  77. pdev->me_buf.vaddr);
  78. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  79. "paddr - %x\n", (unsigned int)pdev->me_buf.paddr);
  80. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  81. "num_elems = %d", (unsigned int)num_pool_elems);
  82. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  83. "buf_size - %d", (unsigned int)pdev->me_buf.size);
  84. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  85. "pool_size = %d", (unsigned int)pool_size);
  86. } else {
  87. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  88. "ME Already Enabled!!");
  89. }
  90. return 0;
  91. }
  92. /**
  93. * dp_tx_me_alloc_descriptor():Allocate ME descriptor
  94. * @soc: DP SOC handle
  95. * @pdev_id: id of DP PDEV handle
  96. *
  97. * Return:void
  98. */
  99. void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id)
  100. {
  101. struct dp_pdev *pdev =
  102. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  103. pdev_id);
  104. if (!pdev)
  105. return;
  106. if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) {
  107. dp_tx_me_init(pdev);
  108. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  109. FL("Enable MCAST_TO_UCAST "));
  110. }
  111. qdf_atomic_inc(&pdev->mc_num_vap_attached);
  112. }
  113. /**
  114. * dp_tx_me_exit():Free memory and other cleanup required for
  115. * multicast unicast conversion
  116. * @pdev - DP_PDEV handle
  117. *
  118. * Return:void
  119. */
  120. void
  121. dp_tx_me_exit(struct dp_pdev *pdev)
  122. {
  123. /* Add flow control buffer count */
  124. uint32_t wait_time = ME_US_TO_SEC(ME_CLEAN_WAIT_TIMEOUT *
  125. ME_CLEAN_WAIT_COUNT);
  126. if (pdev->me_buf.vaddr) {
  127. uint16_t wait_cnt = 0;
  128. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  129. "Disabling Mcastenhance,This may take some time");
  130. qdf_spin_lock_bh(&pdev->tx_mutex);
  131. while ((pdev->me_buf.buf_in_use > 0) &&
  132. (wait_cnt < ME_CLEAN_WAIT_COUNT)) {
  133. qdf_spin_unlock_bh(&pdev->tx_mutex);
  134. OS_SLEEP(ME_CLEAN_WAIT_TIMEOUT);
  135. wait_cnt++;
  136. qdf_spin_lock_bh(&pdev->tx_mutex);
  137. }
  138. if (pdev->me_buf.buf_in_use > 0) {
  139. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  140. "Tx-comp pending for %d",
  141. pdev->me_buf.buf_in_use);
  142. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  143. "ME frames after waiting %ds!!",
  144. wait_time);
  145. qdf_assert_always(0);
  146. }
  147. qdf_mem_free(pdev->me_buf.vaddr);
  148. pdev->me_buf.vaddr = NULL;
  149. pdev->me_buf.freelist = NULL;
  150. qdf_spin_unlock_bh(&pdev->tx_mutex);
  151. } else {
  152. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  153. "ME Already Disabled !!!");
  154. }
  155. }
  156. /**
  157. * dp_tx_me_free_descriptor():free ME descriptor
  158. * @soc: DP SOC handle
  159. * @pdev_id: id of DP PDEV handle
  160. *
  161. * Return:void
  162. */
  163. void
  164. dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id)
  165. {
  166. struct dp_pdev *pdev =
  167. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  168. pdev_id);
  169. if (!pdev)
  170. return;
  171. if (atomic_read(&pdev->mc_num_vap_attached)) {
  172. if (qdf_atomic_dec_and_test(&pdev->mc_num_vap_attached)) {
  173. dp_tx_me_exit(pdev);
  174. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  175. "Disable MCAST_TO_UCAST");
  176. }
  177. }
  178. }
  179. /**
  180. * dp_tx_prepare_send_me(): Call to the umac to get the list of clients
  181. * @vdev: DP VDEV handle
  182. * @nbuf: Multicast buffer
  183. *
  184. * Return: no of packets transmitted
  185. */
  186. QDF_STATUS
  187. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  188. {
  189. if (dp_me_mcast_convert((struct cdp_soc_t *)(vdev->pdev->soc),
  190. vdev->vdev_id, vdev->pdev->pdev_id,
  191. nbuf) > 0)
  192. return QDF_STATUS_SUCCESS;
  193. return QDF_STATUS_E_FAILURE;
  194. }
  195. /**
  196. * dp_tx_prepare_send_igmp_me(): Call to check igmp ,convert mcast to ucast
  197. * @vdev: DP VDEV handle
  198. * @nbuf: Multicast buffer
  199. *
  200. * Return: no of packets transmitted
  201. */
  202. QDF_STATUS
  203. dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  204. {
  205. if (dp_igmp_me_mcast_convert((struct cdp_soc_t *)(vdev->pdev->soc),
  206. vdev->vdev_id, vdev->pdev->pdev_id,
  207. nbuf) > 0)
  208. return QDF_STATUS_SUCCESS;
  209. return QDF_STATUS_E_FAILURE;
  210. }
  211. /*
  212. * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
  213. * pdev: pointer to DP PDEV structure
  214. * seg_info_head: Pointer to the head of list
  215. *
  216. * return: void
  217. */
  218. static void dp_tx_me_mem_free(struct dp_pdev *pdev,
  219. struct dp_tx_seg_info_s *seg_info_head)
  220. {
  221. struct dp_tx_me_buf_t *mc_uc_buf;
  222. struct dp_tx_seg_info_s *seg_info_new = NULL;
  223. qdf_nbuf_t nbuf = NULL;
  224. uint64_t phy_addr;
  225. while (seg_info_head) {
  226. nbuf = seg_info_head->nbuf;
  227. mc_uc_buf = (struct dp_tx_me_buf_t *)
  228. seg_info_head->frags[0].vaddr;
  229. phy_addr = seg_info_head->frags[0].paddr_hi;
  230. phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
  231. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  232. phy_addr,
  233. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE);
  234. dp_tx_me_free_buf(pdev, mc_uc_buf);
  235. qdf_nbuf_free(nbuf);
  236. seg_info_new = seg_info_head;
  237. seg_info_head = seg_info_head->next;
  238. qdf_mem_free(seg_info_new);
  239. }
  240. }
  241. /**
  242. * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
  243. * @soc: Datapath soc handle
  244. * @vdev_id: vdev id
  245. * @nbuf: Multicast nbuf
  246. * @newmac: Table of the clients to which packets have to be sent
  247. * @new_mac_cnt: No of clients
  248. * @tid: desired tid
  249. * @is_igmp: flag to indicate if packet is igmp
  250. *
  251. * return: no of converted packets
  252. */
  253. uint16_t
  254. dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  255. qdf_nbuf_t nbuf,
  256. uint8_t newmac[][QDF_MAC_ADDR_SIZE],
  257. uint8_t new_mac_cnt, uint8_t tid,
  258. bool is_igmp)
  259. {
  260. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  261. struct dp_pdev *pdev;
  262. qdf_ether_header_t *eh;
  263. uint8_t *data;
  264. uint16_t len;
  265. /* reference to frame dst addr */
  266. uint8_t *dstmac;
  267. /* copy of original frame src addr */
  268. uint8_t srcmac[QDF_MAC_ADDR_SIZE];
  269. /* local index into newmac */
  270. uint8_t new_mac_idx = 0;
  271. struct dp_tx_me_buf_t *mc_uc_buf;
  272. qdf_nbuf_t nbuf_clone;
  273. struct dp_tx_msdu_info_s msdu_info;
  274. struct dp_tx_seg_info_s *seg_info_head = NULL;
  275. struct dp_tx_seg_info_s *seg_info_tail = NULL;
  276. struct dp_tx_seg_info_s *seg_info_new;
  277. qdf_dma_addr_t paddr_data;
  278. qdf_dma_addr_t paddr_mcbuf = 0;
  279. uint8_t empty_entry_mac[QDF_MAC_ADDR_SIZE] = {0};
  280. QDF_STATUS status;
  281. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  282. DP_MOD_ID_MCAST2UCAST);
  283. if (!vdev)
  284. goto free_return;
  285. pdev = vdev->pdev;
  286. if (!pdev)
  287. goto free_return;
  288. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  289. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  290. eh = (qdf_ether_header_t *)nbuf;
  291. qdf_mem_copy(srcmac, eh->ether_shost, QDF_MAC_ADDR_SIZE);
  292. len = qdf_nbuf_len(nbuf);
  293. data = qdf_nbuf_data(nbuf);
  294. status = qdf_nbuf_map(vdev->osdev, nbuf,
  295. QDF_DMA_TO_DEVICE);
  296. if (status) {
  297. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  298. "Mapping failure Error:%d", status);
  299. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  300. qdf_nbuf_free(nbuf);
  301. return 1;
  302. }
  303. paddr_data = qdf_nbuf_mapped_paddr_get(nbuf) + QDF_MAC_ADDR_SIZE;
  304. for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
  305. dstmac = newmac[new_mac_idx];
  306. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  307. "added mac addr (%pM)", dstmac);
  308. /* Check for NULL Mac Address */
  309. if (!qdf_mem_cmp(dstmac, empty_entry_mac, QDF_MAC_ADDR_SIZE))
  310. continue;
  311. /* frame to self mac. skip */
  312. if (!qdf_mem_cmp(dstmac, srcmac, QDF_MAC_ADDR_SIZE))
  313. continue;
  314. /*
  315. * optimize to avoid malloc in per-packet path
  316. * For eg. seg_pool can be made part of vdev structure
  317. */
  318. seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
  319. if (!seg_info_new) {
  320. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  321. "alloc failed");
  322. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
  323. goto fail_seg_alloc;
  324. }
  325. mc_uc_buf = dp_tx_me_alloc_buf(pdev);
  326. if (!mc_uc_buf)
  327. goto fail_buf_alloc;
  328. /*
  329. * Check if we need to clone the nbuf
  330. * Or can we just use the reference for all cases
  331. */
  332. if (new_mac_idx < (new_mac_cnt - 1)) {
  333. nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
  334. if (!nbuf_clone) {
  335. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
  336. goto fail_clone;
  337. }
  338. } else {
  339. /*
  340. * Update the ref
  341. * to account for frame sent without cloning
  342. */
  343. qdf_nbuf_ref(nbuf);
  344. nbuf_clone = nbuf;
  345. }
  346. qdf_mem_copy(mc_uc_buf->data, dstmac, QDF_MAC_ADDR_SIZE);
  347. status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
  348. QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE,
  349. &paddr_mcbuf);
  350. if (status) {
  351. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  352. "Mapping failure Error:%d", status);
  353. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  354. mc_uc_buf->paddr_macbuf = 0;
  355. goto fail_map;
  356. }
  357. mc_uc_buf->paddr_macbuf = paddr_mcbuf;
  358. seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
  359. seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
  360. seg_info_new->frags[0].paddr_hi =
  361. (uint16_t)((uint64_t)paddr_mcbuf >> 32);
  362. seg_info_new->frags[0].len = QDF_MAC_ADDR_SIZE;
  363. /*preparing data fragment*/
  364. seg_info_new->frags[1].vaddr =
  365. qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE;
  366. seg_info_new->frags[1].paddr_lo = (uint32_t)paddr_data;
  367. seg_info_new->frags[1].paddr_hi =
  368. (uint16_t)(((uint64_t)paddr_data) >> 32);
  369. seg_info_new->frags[1].len = len - QDF_MAC_ADDR_SIZE;
  370. seg_info_new->nbuf = nbuf_clone;
  371. seg_info_new->frag_cnt = 2;
  372. seg_info_new->total_len = len;
  373. seg_info_new->next = NULL;
  374. if (!seg_info_head)
  375. seg_info_head = seg_info_new;
  376. else
  377. seg_info_tail->next = seg_info_new;
  378. seg_info_tail = seg_info_new;
  379. }
  380. if (!seg_info_head) {
  381. goto unmap_free_return;
  382. }
  383. msdu_info.u.sg_info.curr_seg = seg_info_head;
  384. msdu_info.num_seg = new_mac_cnt;
  385. msdu_info.frm_type = dp_tx_frm_me;
  386. if (tid == HTT_INVALID_TID) {
  387. msdu_info.tid = HTT_INVALID_TID;
  388. if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
  389. qdf_unlikely(pdev->hmmc_tid_override_en))
  390. msdu_info.tid = pdev->hmmc_tid;
  391. } else {
  392. msdu_info.tid = tid;
  393. }
  394. if (is_igmp) {
  395. DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
  396. new_mac_cnt);
  397. } else {
  398. DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
  399. }
  400. dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  401. while (seg_info_head->next) {
  402. seg_info_new = seg_info_head;
  403. seg_info_head = seg_info_head->next;
  404. qdf_mem_free(seg_info_new);
  405. }
  406. qdf_mem_free(seg_info_head);
  407. qdf_nbuf_free(nbuf);
  408. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MCAST2UCAST);
  409. return new_mac_cnt;
  410. fail_map:
  411. qdf_nbuf_free(nbuf_clone);
  412. fail_clone:
  413. dp_tx_me_free_buf(pdev, mc_uc_buf);
  414. fail_buf_alloc:
  415. qdf_mem_free(seg_info_new);
  416. fail_seg_alloc:
  417. dp_tx_me_mem_free(pdev, seg_info_head);
  418. unmap_free_return:
  419. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  420. free_return:
  421. if (vdev)
  422. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MCAST2UCAST);
  423. qdf_nbuf_free(nbuf);
  424. return 1;
  425. }