dp_rx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "hal_rx.h"
  22. #include "hal_api.h"
  23. #include "qdf_nbuf.h"
  24. #include <ieee80211.h>
  25. #ifdef MESH_MODE_SUPPORT
  26. #include "if_meta_hdr.h"
  27. #endif
  28. #include "dp_internal.h"
  29. /*
  30. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  31. * called during dp rx initialization
  32. * and at the end of dp_rx_process.
  33. *
  34. * @soc: core txrx main context
  35. * @mac_id: mac_id which is one of 3 mac_ids
  36. * @desc_list: list of descs if called from dp_rx_process
  37. * or NULL during dp rx initialization or out of buffer
  38. * interrupt.
  39. * @owner: who owns the nbuf (host, NSS etc...)
  40. * Return: return success or failure
  41. */
  42. QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  43. uint32_t num_req_buffers,
  44. union dp_rx_desc_list_elem_t **desc_list,
  45. union dp_rx_desc_list_elem_t **tail,
  46. uint8_t owner)
  47. {
  48. uint32_t num_alloc_desc;
  49. uint16_t num_desc_to_free = 0;
  50. struct dp_pdev *dp_pdev = dp_soc->pdev_list[mac_id];
  51. uint32_t num_entries_avail;
  52. uint32_t count;
  53. int sync_hw_ptr = 1;
  54. qdf_dma_addr_t paddr;
  55. qdf_nbuf_t rx_netbuf;
  56. void *rxdma_ring_entry;
  57. union dp_rx_desc_list_elem_t *next;
  58. struct dp_srng *dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  59. void *rxdma_srng = dp_rxdma_srng->hal_srng;
  60. int32_t ret;
  61. if (!rxdma_srng) {
  62. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  63. "rxdma srng not initialized");
  64. DP_STATS_INC(dp_pdev, err.rxdma_unitialized, 1);
  65. return QDF_STATUS_E_FAILURE;
  66. }
  67. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  68. "requested %d buffers for replenish", num_req_buffers);
  69. /*
  70. * if desc_list is NULL, allocate the descs from freelist
  71. */
  72. if (!(*desc_list)) {
  73. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  74. num_req_buffers,
  75. desc_list,
  76. tail);
  77. if (!num_alloc_desc) {
  78. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  79. "no free rx_descs in freelist");
  80. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  81. num_alloc_desc);
  82. return QDF_STATUS_E_NOMEM;
  83. }
  84. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  85. "%d rx desc allocated", num_alloc_desc);
  86. num_req_buffers = num_alloc_desc;
  87. }
  88. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  89. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  90. rxdma_srng,
  91. sync_hw_ptr);
  92. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  93. "no of availble entries in rxdma ring: %d",
  94. num_entries_avail);
  95. if (num_entries_avail < num_req_buffers) {
  96. num_desc_to_free = num_req_buffers - num_entries_avail;
  97. num_req_buffers = num_entries_avail;
  98. }
  99. count = 0;
  100. while (count < num_req_buffers) {
  101. rx_netbuf = qdf_nbuf_alloc(dp_pdev->osif_pdev,
  102. RX_BUFFER_SIZE,
  103. RX_BUFFER_RESERVATION,
  104. RX_BUFFER_ALIGNMENT,
  105. FALSE);
  106. if (rx_netbuf == NULL)
  107. continue;
  108. qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
  109. QDF_DMA_BIDIRECTIONAL);
  110. paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
  111. /*
  112. * check if the physical address of nbuf->data is
  113. * less then 0x50000000 then free the nbuf and try
  114. * allocating new nbuf. We can try for 100 times.
  115. * this is a temp WAR till we fix it properly.
  116. */
  117. ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
  118. if (ret == QDF_STATUS_E_FAILURE)
  119. break;
  120. count++;
  121. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  122. rxdma_srng);
  123. next = (*desc_list)->next;
  124. (*desc_list)->rx_desc.nbuf = rx_netbuf;
  125. DP_STATS_INC_PKT(dp_pdev, replenished, 1,
  126. qdf_nbuf_len(rx_netbuf));
  127. hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
  128. (*desc_list)->rx_desc.cookie,
  129. owner);
  130. *desc_list = next;
  131. }
  132. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  133. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  134. "successfully replenished %d buffers", num_req_buffers);
  135. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  136. "%d rx desc added back to free list", num_desc_to_free);
  137. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  138. /*
  139. * add any available free desc back to the free list
  140. */
  141. if (*desc_list)
  142. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list,
  143. tail, mac_id);
  144. return QDF_STATUS_SUCCESS;
  145. }
  146. /*
  147. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  148. * pkts to RAW mode simulation to
  149. * decapsulate the pkt.
  150. *
  151. * @vdev: vdev on which RAW mode is enabled
  152. * @nbuf_list: list of RAW pkts to process
  153. *
  154. * Return: void
  155. */
  156. static void
  157. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list)
  158. {
  159. qdf_nbuf_t deliver_list_head = NULL;
  160. qdf_nbuf_t deliver_list_tail = NULL;
  161. qdf_nbuf_t nbuf;
  162. nbuf = nbuf_list;
  163. while (nbuf) {
  164. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  165. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  166. /*
  167. * reset the chfrag_start and chfrag_end bits in nbuf cb
  168. * as this is a non-amsdu pkt and RAW mode simulation expects
  169. * these bit s to be 0 for non-amsdu pkt.
  170. */
  171. if (qdf_nbuf_is_chfrag_start(nbuf) &&
  172. qdf_nbuf_is_chfrag_end(nbuf)) {
  173. qdf_nbuf_set_chfrag_start(nbuf, 0);
  174. qdf_nbuf_set_chfrag_end(nbuf, 0);
  175. }
  176. nbuf = next;
  177. }
  178. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  179. &deliver_list_tail);
  180. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  181. }
  182. /**
  183. * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
  184. *
  185. * @soc: core txrx main context
  186. * @sa_peer : source peer entry
  187. * @rx_tlv_hdr : start address of rx tlvs
  188. * @nbuf : nbuf that has to be intrabss forwarded
  189. *
  190. * Return: bool: true if it is forwarded else false
  191. */
  192. static bool
  193. dp_rx_intrabss_fwd(struct dp_soc *soc,
  194. struct dp_peer *sa_peer,
  195. uint8_t *rx_tlv_hdr,
  196. qdf_nbuf_t nbuf)
  197. {
  198. DP_STATS_INC_PKT(sa_peer, rx.intra_bss, 1,
  199. qdf_nbuf_len(nbuf));
  200. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  201. FL("Intra-BSS forwarding not implemented"));
  202. return false;
  203. }
  204. #ifdef MESH_MODE_SUPPORT
  205. /**
  206. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  207. *
  208. * @vdev: DP Virtual device handle
  209. * @nbuf: Buffer pointer
  210. *
  211. * This function allocated memory for mesh receive stats and fill the
  212. * required stats. Stores the memory address in skb cb.
  213. *
  214. * Return: void
  215. */
  216. static
  217. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  218. {
  219. struct mesh_recv_hdr_s *rx_info = NULL;
  220. uint32_t pkt_type;
  221. uint32_t nss;
  222. uint32_t rate_mcs;
  223. uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
  224. /* fill recv mesh stats */
  225. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  226. /* upper layers are resposible to free this memory */
  227. if (rx_info == NULL) {
  228. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  229. "Memory allocation failed for mesh rx stats");
  230. return;
  231. }
  232. if (qdf_nbuf_is_chfrag_start(nbuf))
  233. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  234. if (qdf_nbuf_is_chfrag_end(nbuf))
  235. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  236. if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
  237. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  238. rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
  239. rx_info->rs_flags |= MESH_KEY_NOTFILLED;
  240. }
  241. rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
  242. rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
  243. pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
  244. rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
  245. nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr);
  246. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x4) | (pkt_type << 6);
  247. qdf_nbuf_set_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  248. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  249. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
  250. rx_info->rs_flags,
  251. rx_info->rs_rssi,
  252. rx_info->rs_channel,
  253. rx_info->rs_ratephy1,
  254. rx_info->rs_keyix);
  255. }
  256. /**
  257. * dp_rx_fill_mesh_stats() - Filters mesh unwanted packets
  258. *
  259. * @vdev: DP Virtual device handle
  260. * @nbuf: Buffer pointer
  261. *
  262. * This checks if the received packet is matching any filter out
  263. * catogery and and drop the packet if it matches.
  264. *
  265. * Return: status(0 indicates drop, 1 indicate to no drop)
  266. */
  267. static inline
  268. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  269. {
  270. uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
  271. union dp_align_mac_addr mac_addr;
  272. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  273. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  274. if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr))
  275. return QDF_STATUS_SUCCESS;
  276. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  277. if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
  278. return QDF_STATUS_SUCCESS;
  279. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  280. if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)
  281. && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
  282. return QDF_STATUS_SUCCESS;
  283. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  284. if (hal_rx_mpdu_get_addr1(rx_tlv_hdr,
  285. &mac_addr.raw[0]))
  286. return QDF_STATUS_E_FAILURE;
  287. if (!qdf_mem_cmp(&mac_addr.raw[0],
  288. &vdev->mac_addr.raw[0],
  289. DP_MAC_ADDR_LEN))
  290. return QDF_STATUS_SUCCESS;
  291. }
  292. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  293. if (hal_rx_mpdu_get_addr2(rx_tlv_hdr,
  294. &mac_addr.raw[0]))
  295. return QDF_STATUS_E_FAILURE;
  296. if (!qdf_mem_cmp(&mac_addr.raw[0],
  297. &vdev->mac_addr.raw[0],
  298. DP_MAC_ADDR_LEN))
  299. return QDF_STATUS_SUCCESS;
  300. }
  301. }
  302. return QDF_STATUS_E_FAILURE;
  303. }
  304. #else
  305. static
  306. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  307. {
  308. }
  309. static inline
  310. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  311. {
  312. return QDF_STATUS_E_FAILURE;
  313. }
  314. #endif
  315. /**
  316. * dp_rx_process() - Brain of the Rx processing functionality
  317. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  318. * @soc: core txrx main context
  319. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  320. * @quota: No. of units (packets) that can be serviced in one shot.
  321. *
  322. * This function implements the core of Rx functionality. This is
  323. * expected to handle only non-error frames.
  324. *
  325. * Return: uint32_t: No. of elements processed
  326. */
  327. uint32_t
  328. dp_rx_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  329. {
  330. void *hal_soc;
  331. void *ring_desc;
  332. struct dp_rx_desc *rx_desc;
  333. qdf_nbuf_t nbuf;
  334. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  335. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  336. uint32_t rx_bufs_used = 0, rx_buf_cookie, l2_hdr_offset;
  337. uint16_t msdu_len;
  338. uint16_t peer_id;
  339. struct dp_peer *peer = NULL;
  340. struct dp_vdev *vdev = NULL;
  341. struct dp_vdev *vdev_list[WLAN_UMAC_PSOC_MAX_VDEVS] = { NULL };
  342. uint32_t pkt_len;
  343. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  344. struct hal_rx_msdu_desc_info msdu_desc_info;
  345. enum hal_reo_error_status error;
  346. static uint32_t peer_mdata;
  347. uint8_t *rx_tlv_hdr;
  348. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  349. uint32_t sgi, rate_mcs, tid, nss, bw, reception_type;
  350. uint64_t vdev_map = 0;
  351. uint8_t mac_id;
  352. uint16_t i, vdev_cnt = 0;
  353. uint32_t ampdu_flag, amsdu_flag;
  354. struct ether_header *eh;
  355. /* Debug -- Remove later */
  356. qdf_assert(soc && hal_ring);
  357. hal_soc = soc->hal_soc;
  358. /* Debug -- Remove later */
  359. qdf_assert(hal_soc);
  360. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  361. /*
  362. * Need API to convert from hal_ring pointer to
  363. * Ring Type / Ring Id combo
  364. */
  365. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  366. FL("HAL RING Access Failed -- %p"), hal_ring);
  367. hal_srng_access_end(hal_soc, hal_ring);
  368. goto done;
  369. }
  370. /*
  371. * start reaping the buffers from reo ring and queue
  372. * them in per vdev queue.
  373. * Process the received pkts in a different per vdev loop.
  374. */
  375. while (qdf_likely((ring_desc =
  376. hal_srng_dst_get_next(hal_soc, hal_ring))
  377. && quota--)) {
  378. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  379. if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
  380. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  381. FL("HAL RING 0x%p:error %d"), hal_ring, error);
  382. /* Don't know how to deal with this -- assert */
  383. qdf_assert(0);
  384. }
  385. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  386. rx_desc = dp_rx_cookie_2_va(soc, rx_buf_cookie);
  387. qdf_assert(rx_desc);
  388. rx_bufs_reaped[rx_desc->pool_id]++;
  389. /* TODO */
  390. /*
  391. * Need a separate API for unmapping based on
  392. * phyiscal address
  393. */
  394. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  395. QDF_DMA_BIDIRECTIONAL);
  396. /* Get MPDU DESC info */
  397. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  398. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  399. mpdu_desc_info.peer_meta_data);
  400. peer = dp_peer_find_by_id(soc, peer_id);
  401. if (!peer) {
  402. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  403. FL("peer look-up failed peer id %d"), peer_id);
  404. /* Drop & free packet */
  405. qdf_nbuf_free(rx_desc->nbuf);
  406. /* Statistics */
  407. goto fail;
  408. }
  409. vdev = peer->vdev;
  410. if (!vdev) {
  411. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  412. FL("vdev is NULL"));
  413. qdf_nbuf_free(rx_desc->nbuf);
  414. goto fail;
  415. }
  416. if (!((vdev_map >> vdev->vdev_id) & 1)) {
  417. vdev_map |= 1 << vdev->vdev_id;
  418. vdev_list[vdev_cnt] = vdev;
  419. vdev_cnt++;
  420. }
  421. /* Get MSDU DESC info */
  422. hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
  423. /*
  424. * save msdu flags first, last and continuation msdu in
  425. * nbuf->cb
  426. */
  427. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  428. qdf_nbuf_set_chfrag_start(rx_desc->nbuf, 1);
  429. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  430. qdf_nbuf_set_chfrag_cont(rx_desc->nbuf, 1);
  431. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  432. qdf_nbuf_set_chfrag_end(rx_desc->nbuf, 1);
  433. DP_STATS_INC_PKT(peer, rx.rcvd_reo, 1,
  434. qdf_nbuf_len(rx_desc->nbuf));
  435. ampdu_flag = (mpdu_desc_info.mpdu_flags &
  436. HAL_MPDU_F_AMPDU_FLAG);
  437. DP_STATS_INCC(vdev->pdev, rx.ampdu_cnt, 1, ampdu_flag);
  438. DP_STATS_INCC(vdev->pdev, rx.non_ampdu_cnt, 1, !(ampdu_flag));
  439. hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
  440. amsdu_flag = ((msdu_desc_info.msdu_flags &
  441. HAL_MSDU_F_FIRST_MSDU_IN_MPDU) &&
  442. (msdu_desc_info.msdu_flags &
  443. HAL_MSDU_F_LAST_MSDU_IN_MPDU));
  444. DP_STATS_INCC(vdev->pdev, rx.non_amsdu_cnt, 1,
  445. amsdu_flag);
  446. DP_STATS_INCC(vdev->pdev, rx.amsdu_cnt, 1,
  447. !(amsdu_flag));
  448. qdf_nbuf_queue_add(&vdev->rxq, rx_desc->nbuf);
  449. fail:
  450. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  451. &tail[rx_desc->pool_id],
  452. rx_desc);
  453. }
  454. done:
  455. hal_srng_access_end(hal_soc, hal_ring);
  456. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  457. /*
  458. * continue with next mac_id if no pkts were reaped
  459. * from that pool
  460. */
  461. if (!rx_bufs_reaped[mac_id])
  462. continue;
  463. dp_rx_buffers_replenish(soc, mac_id,
  464. rx_bufs_reaped[mac_id],
  465. &head[mac_id],
  466. &tail[mac_id],
  467. HAL_RX_BUF_RBM_SW3_BM);
  468. }
  469. for (i = 0; i < vdev_cnt; i++) {
  470. qdf_nbuf_t deliver_list_head = NULL;
  471. qdf_nbuf_t deliver_list_tail = NULL;
  472. vdev = vdev_list[i];
  473. while ((nbuf = qdf_nbuf_queue_remove(&vdev->rxq))) {
  474. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  475. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  476. /*
  477. * Check if DMA completed -- msdu_done is the last bit
  478. * to be written
  479. */
  480. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  481. QDF_TRACE(QDF_MODULE_ID_DP,
  482. QDF_TRACE_LEVEL_ERROR,
  483. FL("MSDU DONE failure"));
  484. hal_rx_dump_pkt_tlvs(rx_tlv_hdr,
  485. QDF_TRACE_LEVEL_INFO);
  486. qdf_assert(0);
  487. }
  488. if (qdf_nbuf_is_chfrag_start(nbuf))
  489. peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr);
  490. peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
  491. peer = dp_peer_find_by_id(soc, peer_id);
  492. /* TODO */
  493. /*
  494. * In case of roaming peer object may not be
  495. * immediately available -- need to handle this
  496. * Cannot drop these packets right away.
  497. */
  498. /* Peer lookup failed */
  499. if (!peer) {
  500. /* Drop & free packet */
  501. qdf_nbuf_free(nbuf);
  502. /* Statistics */
  503. continue;
  504. }
  505. if (qdf_unlikely(peer->bss_peer)) {
  506. QDF_TRACE(QDF_MODULE_ID_DP,
  507. QDF_TRACE_LEVEL_INFO,
  508. FL("received pkt with same src MAC"));
  509. /* Drop & free packet */
  510. qdf_nbuf_free(nbuf);
  511. /* Statistics */
  512. continue;
  513. }
  514. sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
  515. rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
  516. tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr);
  517. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  518. "%s: %d, SGI: %d, rate_mcs: %d, tid: %d",
  519. __func__, __LINE__, sgi, rate_mcs, tid);
  520. bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
  521. reception_type = hal_rx_msdu_start_reception_type_get(
  522. rx_tlv_hdr);
  523. nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr);
  524. DP_STATS_INC(vdev->pdev, rx.bw[bw], 1);
  525. DP_STATS_INC(vdev->pdev,
  526. rx.reception_type[reception_type], 1);
  527. DP_STATS_INCC(vdev->pdev, rx.nss[nss], 1,
  528. ((reception_type == REPT_MU_MIMO) ||
  529. (reception_type == REPT_MU_OFDMA_MIMO))
  530. );
  531. DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
  532. DP_STATS_INC(peer, rx.mcs_count[rate_mcs], 1);
  533. DP_STATS_INCC(peer, rx.err.mic_err, 1,
  534. hal_rx_mpdu_end_mic_err_get(
  535. rx_tlv_hdr));
  536. DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
  537. hal_rx_mpdu_end_decrypt_err_get(
  538. rx_tlv_hdr));
  539. DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)],
  540. 1);
  541. DP_STATS_INC(peer, rx.bw[bw], 1);
  542. DP_STATS_INC(peer, rx.reception_type[reception_type],
  543. 1);
  544. /*
  545. * HW structures call this L3 header padding --
  546. * even though this is actually the offset from
  547. * the buffer beginning where the L2 header
  548. * begins.
  549. */
  550. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  551. FL("rxhash: flow id toeplitz: 0x%x\n"),
  552. hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr));
  553. l2_hdr_offset =
  554. hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  555. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  556. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  557. /* Set length in nbuf */
  558. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  559. if (qdf_unlikely(vdev->mesh_vdev)) {
  560. if (dp_rx_filter_mesh_packets(vdev, nbuf)
  561. == QDF_STATUS_SUCCESS) {
  562. QDF_TRACE(QDF_MODULE_ID_DP,
  563. QDF_TRACE_LEVEL_INFO_MED,
  564. FL("mesh pkt filtered"));
  565. qdf_nbuf_free(nbuf);
  566. continue;
  567. }
  568. dp_rx_fill_mesh_stats(vdev, nbuf);
  569. }
  570. /*
  571. * Advance the packet start pointer by total size of
  572. * pre-header TLV's
  573. */
  574. qdf_nbuf_pull_head(nbuf,
  575. RX_PKT_TLVS_LEN + l2_hdr_offset);
  576. #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */
  577. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  578. "p_id %d msdu_len %d hdr_off %d",
  579. peer_id, msdu_len, l2_hdr_offset);
  580. print_hex_dump(KERN_ERR,
  581. "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
  582. qdf_nbuf_data(nbuf), 128, false);
  583. #endif /* NAPIER_EMULATION */
  584. /* WDS Source Port Learning */
  585. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
  586. /* Intrabss-fwd */
  587. if (dp_rx_intrabss_fwd(soc, peer, rx_tlv_hdr, nbuf))
  588. continue; /* Get next descriptor */
  589. rx_bufs_used++;
  590. DP_RX_LIST_APPEND(deliver_list_head,
  591. deliver_list_tail,
  592. nbuf);
  593. DP_STATS_INCC_PKT(peer, rx.multicast, 1, pkt_len,
  594. DP_FRAME_IS_MULTICAST((eh)->ether_dhost
  595. ));
  596. DP_STATS_INCC_PKT(peer, rx.unicast, 1, pkt_len,
  597. !(DP_FRAME_IS_MULTICAST(
  598. (eh)->ether_dhost)));
  599. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  600. pkt_len);
  601. if (hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
  602. if (soc->cdp_soc.ol_ops->update_dp_stats)
  603. soc->cdp_soc.ol_ops->update_dp_stats(
  604. vdev->pdev->osif_pdev,
  605. &peer->stats,
  606. peer_id,
  607. UPDATE_PEER_STATS);
  608. dp_aggregate_vdev_stats(peer->vdev);
  609. if (soc->cdp_soc.ol_ops->update_dp_stats)
  610. soc->cdp_soc.ol_ops->update_dp_stats(
  611. vdev->pdev->osif_pdev,
  612. &peer->vdev->stats,
  613. peer->vdev->vdev_id,
  614. UPDATE_VDEV_STATS);
  615. }
  616. }
  617. if (qdf_unlikely(vdev->rx_decap_type == htt_pkt_type_raw))
  618. dp_rx_deliver_raw(vdev, deliver_list_head);
  619. else if (qdf_likely(vdev->osif_rx) && deliver_list_head)
  620. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  621. }
  622. return rx_bufs_used; /* Assume no scale factor for now */
  623. }
  624. /**
  625. * dp_rx_detach() - detach dp rx
  626. * @soc: core txrx main context
  627. *
  628. * This function will detach DP RX into main device context
  629. * will free DP Rx resources.
  630. *
  631. * Return: void
  632. */
  633. void
  634. dp_rx_pdev_detach(struct dp_pdev *pdev)
  635. {
  636. uint8_t pdev_id = pdev->pdev_id;
  637. struct dp_soc *soc = pdev->soc;
  638. dp_rx_desc_pool_free(soc, pdev_id);
  639. qdf_spinlock_destroy(&soc->rx_desc_mutex[pdev_id]);
  640. return;
  641. }
  642. /**
  643. * dp_rx_attach() - attach DP RX
  644. * @soc: core txrx main context
  645. *
  646. * This function will attach a DP RX instance into the main
  647. * device (SOC) context. Will allocate dp rx resource and
  648. * initialize resources.
  649. *
  650. * Return: QDF_STATUS_SUCCESS: success
  651. * QDF_STATUS_E_RESOURCES: Error return
  652. */
  653. QDF_STATUS
  654. dp_rx_pdev_attach(struct dp_pdev *pdev)
  655. {
  656. uint8_t pdev_id = pdev->pdev_id;
  657. struct dp_soc *soc = pdev->soc;
  658. struct dp_srng rxdma_srng;
  659. uint32_t rxdma_entries;
  660. union dp_rx_desc_list_elem_t *desc_list = NULL;
  661. union dp_rx_desc_list_elem_t *tail = NULL;
  662. qdf_spinlock_create(&soc->rx_desc_mutex[pdev_id]);
  663. pdev = soc->pdev_list[pdev_id];
  664. rxdma_srng = pdev->rx_refill_buf_ring;
  665. rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize(
  666. soc->hal_soc, RXDMA_BUF);
  667. dp_rx_desc_pool_alloc(soc, pdev_id);
  668. /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
  669. dp_rx_buffers_replenish(soc, pdev_id, rxdma_entries,
  670. &desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
  671. return QDF_STATUS_SUCCESS;
  672. }