dp_rx.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "hal_rx.h"
  23. #include "hal_api.h"
  24. #include "qdf_nbuf.h"
  25. #ifdef MESH_MODE_SUPPORT
  26. #include "if_meta_hdr.h"
  27. #endif
  28. #include "dp_internal.h"
  29. #include "dp_rx_mon.h"
  30. #ifdef RX_DESC_DEBUG_CHECK
  31. static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
  32. {
  33. rx_desc->magic = DP_RX_DESC_MAGIC;
  34. rx_desc->nbuf = nbuf;
  35. }
  36. #else
  37. static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
  38. {
  39. rx_desc->nbuf = nbuf;
  40. }
  41. #endif
  42. #ifdef CONFIG_WIN
  43. static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
  44. {
  45. return vdev->ap_bridge_enabled;
  46. }
  47. #else
  48. static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
  49. {
  50. if (vdev->opmode != wlan_op_mode_sta)
  51. return true;
  52. else
  53. return false;
  54. }
  55. #endif
  56. /*
  57. * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
  58. *
  59. * @soc: core txrx main context
  60. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  61. * @ring_desc: opaque pointer to the RX ring descriptor
  62. * @rx_desc: host rs descriptor
  63. *
  64. * Return: void
  65. */
  66. void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring,
  67. void *ring_desc, struct dp_rx_desc *rx_desc)
  68. {
  69. void *hal_soc = soc->hal_soc;
  70. dp_rx_desc_dump(rx_desc);
  71. hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
  72. hal_srng_dump_ring(hal_soc, hal_ring);
  73. qdf_assert_always(rx_desc->in_use);
  74. }
  75. /*
  76. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  77. * called during dp rx initialization
  78. * and at the end of dp_rx_process.
  79. *
  80. * @soc: core txrx main context
  81. * @mac_id: mac_id which is one of 3 mac_ids
  82. * @dp_rxdma_srng: dp rxdma circular ring
  83. * @rx_desc_pool: Pointer to free Rx descriptor pool
  84. * @num_req_buffers: number of buffer to be replenished
  85. * @desc_list: list of descs if called from dp_rx_process
  86. * or NULL during dp rx initialization or out of buffer
  87. * interrupt.
  88. * @tail: tail of descs list
  89. * Return: return success or failure
  90. */
  91. QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  92. struct dp_srng *dp_rxdma_srng,
  93. struct rx_desc_pool *rx_desc_pool,
  94. uint32_t num_req_buffers,
  95. union dp_rx_desc_list_elem_t **desc_list,
  96. union dp_rx_desc_list_elem_t **tail)
  97. {
  98. uint32_t num_alloc_desc;
  99. uint16_t num_desc_to_free = 0;
  100. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
  101. uint32_t num_entries_avail;
  102. uint32_t count;
  103. int sync_hw_ptr = 1;
  104. qdf_dma_addr_t paddr;
  105. qdf_nbuf_t rx_netbuf;
  106. void *rxdma_ring_entry;
  107. union dp_rx_desc_list_elem_t *next;
  108. QDF_STATUS ret;
  109. void *rxdma_srng;
  110. rxdma_srng = dp_rxdma_srng->hal_srng;
  111. if (!rxdma_srng) {
  112. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  113. "rxdma srng not initialized");
  114. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  115. return QDF_STATUS_E_FAILURE;
  116. }
  117. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  118. "requested %d buffers for replenish", num_req_buffers);
  119. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  120. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  121. rxdma_srng,
  122. sync_hw_ptr);
  123. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  124. "no of available entries in rxdma ring: %d",
  125. num_entries_avail);
  126. if (!(*desc_list) && (num_entries_avail >
  127. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  128. num_req_buffers = num_entries_avail;
  129. } else if (num_entries_avail < num_req_buffers) {
  130. num_desc_to_free = num_req_buffers - num_entries_avail;
  131. num_req_buffers = num_entries_avail;
  132. }
  133. if (qdf_unlikely(!num_req_buffers)) {
  134. num_desc_to_free = num_req_buffers;
  135. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  136. goto free_descs;
  137. }
  138. /*
  139. * if desc_list is NULL, allocate the descs from freelist
  140. */
  141. if (!(*desc_list)) {
  142. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  143. rx_desc_pool,
  144. num_req_buffers,
  145. desc_list,
  146. tail);
  147. if (!num_alloc_desc) {
  148. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  149. "no free rx_descs in freelist");
  150. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  151. num_req_buffers);
  152. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  153. return QDF_STATUS_E_NOMEM;
  154. }
  155. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  156. "%d rx desc allocated", num_alloc_desc);
  157. num_req_buffers = num_alloc_desc;
  158. }
  159. count = 0;
  160. while (count < num_req_buffers) {
  161. rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
  162. RX_BUFFER_SIZE,
  163. RX_BUFFER_RESERVATION,
  164. RX_BUFFER_ALIGNMENT,
  165. FALSE);
  166. if (rx_netbuf == NULL) {
  167. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  168. continue;
  169. }
  170. ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
  171. QDF_DMA_BIDIRECTIONAL);
  172. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  173. qdf_nbuf_free(rx_netbuf);
  174. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  175. continue;
  176. }
  177. paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
  178. /*
  179. * check if the physical address of nbuf->data is
  180. * less then 0x50000000 then free the nbuf and try
  181. * allocating new nbuf. We can try for 100 times.
  182. * this is a temp WAR till we fix it properly.
  183. */
  184. ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
  185. if (ret == QDF_STATUS_E_FAILURE) {
  186. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  187. break;
  188. }
  189. count++;
  190. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  191. rxdma_srng);
  192. qdf_assert_always(rxdma_ring_entry);
  193. next = (*desc_list)->next;
  194. dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
  195. (*desc_list)->rx_desc.in_use = 1;
  196. dp_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
  197. rx_netbuf, qdf_nbuf_data(rx_netbuf),
  198. (unsigned long long)paddr,
  199. (*desc_list)->rx_desc.cookie);
  200. hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
  201. (*desc_list)->rx_desc.cookie,
  202. rx_desc_pool->owner);
  203. *desc_list = next;
  204. }
  205. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  206. dp_debug("replenished buffers %d, rx desc added back to free list %u",
  207. num_req_buffers, num_desc_to_free);
  208. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers,
  209. (RX_BUFFER_SIZE * num_req_buffers));
  210. free_descs:
  211. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  212. /*
  213. * add any available free desc back to the free list
  214. */
  215. if (*desc_list)
  216. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  217. mac_id, rx_desc_pool);
  218. return QDF_STATUS_SUCCESS;
  219. }
  220. /*
  221. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  222. * pkts to RAW mode simulation to
  223. * decapsulate the pkt.
  224. *
  225. * @vdev: vdev on which RAW mode is enabled
  226. * @nbuf_list: list of RAW pkts to process
  227. * @peer: peer object from which the pkt is rx
  228. *
  229. * Return: void
  230. */
  231. void
  232. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  233. struct dp_peer *peer)
  234. {
  235. qdf_nbuf_t deliver_list_head = NULL;
  236. qdf_nbuf_t deliver_list_tail = NULL;
  237. qdf_nbuf_t nbuf;
  238. nbuf = nbuf_list;
  239. while (nbuf) {
  240. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  241. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  242. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  243. DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
  244. /*
  245. * reset the chfrag_start and chfrag_end bits in nbuf cb
  246. * as this is a non-amsdu pkt and RAW mode simulation expects
  247. * these bit s to be 0 for non-amsdu pkt.
  248. */
  249. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  250. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  251. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  252. qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
  253. }
  254. nbuf = next;
  255. }
  256. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  257. &deliver_list_tail, (struct cdp_peer*) peer);
  258. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  259. }
  260. #ifdef DP_LFR
  261. /*
  262. * In case of LFR, data of a new peer might be sent up
  263. * even before peer is added.
  264. */
  265. static inline struct dp_vdev *
  266. dp_get_vdev_from_peer(struct dp_soc *soc,
  267. uint16_t peer_id,
  268. struct dp_peer *peer,
  269. struct hal_rx_mpdu_desc_info mpdu_desc_info)
  270. {
  271. struct dp_vdev *vdev;
  272. uint8_t vdev_id;
  273. if (unlikely(!peer)) {
  274. if (peer_id != HTT_INVALID_PEER) {
  275. vdev_id = DP_PEER_METADATA_ID_GET(
  276. mpdu_desc_info.peer_meta_data);
  277. QDF_TRACE(QDF_MODULE_ID_DP,
  278. QDF_TRACE_LEVEL_DEBUG,
  279. FL("PeerID %d not found use vdevID %d"),
  280. peer_id, vdev_id);
  281. vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
  282. vdev_id);
  283. } else {
  284. QDF_TRACE(QDF_MODULE_ID_DP,
  285. QDF_TRACE_LEVEL_DEBUG,
  286. FL("Invalid PeerID %d"),
  287. peer_id);
  288. return NULL;
  289. }
  290. } else {
  291. vdev = peer->vdev;
  292. }
  293. return vdev;
  294. }
  295. #else
  296. static inline struct dp_vdev *
  297. dp_get_vdev_from_peer(struct dp_soc *soc,
  298. uint16_t peer_id,
  299. struct dp_peer *peer,
  300. struct hal_rx_mpdu_desc_info mpdu_desc_info)
  301. {
  302. if (unlikely(!peer)) {
  303. QDF_TRACE(QDF_MODULE_ID_DP,
  304. QDF_TRACE_LEVEL_DEBUG,
  305. FL("Peer not found for peerID %d"),
  306. peer_id);
  307. return NULL;
  308. } else {
  309. return peer->vdev;
  310. }
  311. }
  312. #endif
  313. /**
  314. * dp_rx_da_learn() - Add AST entry based on DA lookup
  315. * This is a WAR for HK 1.0 and will
  316. * be removed in HK 2.0
  317. *
  318. * @soc: core txrx main context
  319. * @rx_tlv_hdr : start address of rx tlvs
  320. * @ta_peer : Transmitter peer entry
  321. * @nbuf : nbuf to retrieve destination mac for which AST will be added
  322. *
  323. */
  324. #ifdef FEATURE_WDS
  325. static void
  326. dp_rx_da_learn(struct dp_soc *soc,
  327. uint8_t *rx_tlv_hdr,
  328. struct dp_peer *ta_peer,
  329. qdf_nbuf_t nbuf)
  330. {
  331. /* For HKv2 DA port learing is not needed */
  332. if (qdf_likely(soc->ast_override_support))
  333. return;
  334. if (qdf_unlikely(!ta_peer))
  335. return;
  336. if (qdf_unlikely(ta_peer->vdev->opmode != wlan_op_mode_ap))
  337. return;
  338. if (!soc->da_war_enabled)
  339. return;
  340. if (qdf_unlikely(!hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) &&
  341. !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  342. dp_peer_add_ast(soc,
  343. ta_peer,
  344. qdf_nbuf_data(nbuf),
  345. CDP_TXRX_AST_TYPE_DA,
  346. IEEE80211_NODE_F_WDS_HM);
  347. }
  348. }
  349. #else
  350. static void
  351. dp_rx_da_learn(struct dp_soc *soc,
  352. uint8_t *rx_tlv_hdr,
  353. struct dp_peer *ta_peer,
  354. qdf_nbuf_t nbuf)
  355. {
  356. }
  357. #endif
  358. /**
  359. * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
  360. *
  361. * @soc: core txrx main context
  362. * @ta_peer : source peer entry
  363. * @rx_tlv_hdr : start address of rx tlvs
  364. * @nbuf : nbuf that has to be intrabss forwarded
  365. *
  366. * Return: bool: true if it is forwarded else false
  367. */
  368. static bool
  369. dp_rx_intrabss_fwd(struct dp_soc *soc,
  370. struct dp_peer *ta_peer,
  371. uint8_t *rx_tlv_hdr,
  372. qdf_nbuf_t nbuf)
  373. {
  374. uint16_t da_idx;
  375. uint16_t len;
  376. struct dp_peer *da_peer;
  377. struct dp_ast_entry *ast_entry;
  378. qdf_nbuf_t nbuf_copy;
  379. /* check if the destination peer is available in peer table
  380. * and also check if the source peer and destination peer
  381. * belong to the same vap and destination peer is not bss peer.
  382. */
  383. if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) &&
  384. !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  385. da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr);
  386. ast_entry = soc->ast_table[da_idx];
  387. if (!ast_entry)
  388. return false;
  389. if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
  390. ast_entry->is_active = TRUE;
  391. return false;
  392. }
  393. da_peer = ast_entry->peer;
  394. if (!da_peer)
  395. return false;
  396. /* TA peer cannot be same as peer(DA) on which AST is present
  397. * this indicates a change in topology and that AST entries
  398. * are yet to be updated.
  399. */
  400. if (da_peer == ta_peer)
  401. return false;
  402. if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
  403. memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
  404. len = qdf_nbuf_len(nbuf);
  405. /* linearize the nbuf just before we send to
  406. * dp_tx_send()
  407. */
  408. if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) {
  409. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  410. return false;
  411. nbuf = qdf_nbuf_unshare(nbuf);
  412. if (!nbuf) {
  413. DP_STATS_INC_PKT(ta_peer,
  414. rx.intra_bss.fail,
  415. 1,
  416. len);
  417. /* return true even though the pkt is
  418. * not forwarded. Basically skb_unshare
  419. * failed and we want to continue with
  420. * next nbuf.
  421. */
  422. return true;
  423. }
  424. }
  425. if (!dp_tx_send(ta_peer->vdev, nbuf)) {
  426. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  427. len);
  428. return true;
  429. } else {
  430. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  431. len);
  432. return false;
  433. }
  434. }
  435. }
  436. /* if it is a broadcast pkt (eg: ARP) and it is not its own
  437. * source, then clone the pkt and send the cloned pkt for
  438. * intra BSS forwarding and original pkt up the network stack
  439. * Note: how do we handle multicast pkts. do we forward
  440. * all multicast pkts as is or let a higher layer module
  441. * like igmpsnoop decide whether to forward or not with
  442. * Mcast enhancement.
  443. */
  444. else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
  445. !ta_peer->bss_peer))) {
  446. nbuf_copy = qdf_nbuf_copy(nbuf);
  447. if (!nbuf_copy)
  448. return false;
  449. memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  450. len = qdf_nbuf_len(nbuf_copy);
  451. if (dp_tx_send(ta_peer->vdev, nbuf_copy)) {
  452. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
  453. qdf_nbuf_free(nbuf_copy);
  454. } else {
  455. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
  456. }
  457. }
  458. /* return false as we have to still send the original pkt
  459. * up the stack
  460. */
  461. return false;
  462. }
  463. #ifdef MESH_MODE_SUPPORT
  464. /**
  465. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  466. *
  467. * @vdev: DP Virtual device handle
  468. * @nbuf: Buffer pointer
  469. * @rx_tlv_hdr: start of rx tlv header
  470. * @peer: pointer to peer
  471. *
  472. * This function allocated memory for mesh receive stats and fill the
  473. * required stats. Stores the memory address in skb cb.
  474. *
  475. * Return: void
  476. */
  477. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  478. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  479. {
  480. struct mesh_recv_hdr_s *rx_info = NULL;
  481. uint32_t pkt_type;
  482. uint32_t nss;
  483. uint32_t rate_mcs;
  484. uint32_t bw;
  485. /* fill recv mesh stats */
  486. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  487. /* upper layers are resposible to free this memory */
  488. if (rx_info == NULL) {
  489. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  490. "Memory allocation failed for mesh rx stats");
  491. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  492. return;
  493. }
  494. rx_info->rs_flags = MESH_RXHDR_VER1;
  495. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  496. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  497. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  498. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  499. if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
  500. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  501. rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
  502. if (vdev->osif_get_key)
  503. vdev->osif_get_key(vdev->osif_vdev,
  504. &rx_info->rs_decryptkey[0],
  505. &peer->mac_addr.raw[0],
  506. rx_info->rs_keyix);
  507. }
  508. rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
  509. rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
  510. pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
  511. rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
  512. bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
  513. nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
  514. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  515. (bw << 24);
  516. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  517. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  518. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
  519. rx_info->rs_flags,
  520. rx_info->rs_rssi,
  521. rx_info->rs_channel,
  522. rx_info->rs_ratephy1,
  523. rx_info->rs_keyix);
  524. }
  525. /**
  526. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  527. *
  528. * @vdev: DP Virtual device handle
  529. * @nbuf: Buffer pointer
  530. * @rx_tlv_hdr: start of rx tlv header
  531. *
  532. * This checks if the received packet is matching any filter out
  533. * catogery and and drop the packet if it matches.
  534. *
  535. * Return: status(0 indicates drop, 1 indicate to no drop)
  536. */
  537. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  538. uint8_t *rx_tlv_hdr)
  539. {
  540. union dp_align_mac_addr mac_addr;
  541. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  542. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  543. if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr))
  544. return QDF_STATUS_SUCCESS;
  545. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  546. if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
  547. return QDF_STATUS_SUCCESS;
  548. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  549. if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)
  550. && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
  551. return QDF_STATUS_SUCCESS;
  552. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  553. if (hal_rx_mpdu_get_addr1(rx_tlv_hdr,
  554. &mac_addr.raw[0]))
  555. return QDF_STATUS_E_FAILURE;
  556. if (!qdf_mem_cmp(&mac_addr.raw[0],
  557. &vdev->mac_addr.raw[0],
  558. DP_MAC_ADDR_LEN))
  559. return QDF_STATUS_SUCCESS;
  560. }
  561. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  562. if (hal_rx_mpdu_get_addr2(rx_tlv_hdr,
  563. &mac_addr.raw[0]))
  564. return QDF_STATUS_E_FAILURE;
  565. if (!qdf_mem_cmp(&mac_addr.raw[0],
  566. &vdev->mac_addr.raw[0],
  567. DP_MAC_ADDR_LEN))
  568. return QDF_STATUS_SUCCESS;
  569. }
  570. }
  571. return QDF_STATUS_E_FAILURE;
  572. }
  573. #else
  574. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  575. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  576. {
  577. }
  578. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  579. uint8_t *rx_tlv_hdr)
  580. {
  581. return QDF_STATUS_E_FAILURE;
  582. }
  583. #endif
  584. #ifdef CONFIG_WIN
  585. /**
  586. * dp_rx_nac_filter(): Function to perform filtering of non-associated
  587. * clients
  588. * @pdev: DP pdev handle
  589. * @rx_pkt_hdr: Rx packet Header
  590. *
  591. * return: dp_vdev*
  592. */
  593. static
  594. struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
  595. uint8_t *rx_pkt_hdr)
  596. {
  597. struct ieee80211_frame *wh;
  598. struct dp_neighbour_peer *peer = NULL;
  599. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  600. if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
  601. return NULL;
  602. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  603. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  604. neighbour_peer_list_elem) {
  605. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  606. wh->i_addr2, DP_MAC_ADDR_LEN) == 0) {
  607. QDF_TRACE(
  608. QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  609. FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
  610. peer->neighbour_peers_macaddr.raw[0],
  611. peer->neighbour_peers_macaddr.raw[1],
  612. peer->neighbour_peers_macaddr.raw[2],
  613. peer->neighbour_peers_macaddr.raw[3],
  614. peer->neighbour_peers_macaddr.raw[4],
  615. peer->neighbour_peers_macaddr.raw[5]);
  616. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  617. return pdev->monitor_vdev;
  618. }
  619. }
  620. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  621. return NULL;
  622. }
  623. /**
  624. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  625. * @soc: DP SOC handle
  626. * @mpdu: mpdu for which peer is invalid
  627. *
  628. * return: integer type
  629. */
  630. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
  631. {
  632. struct dp_invalid_peer_msg msg;
  633. struct dp_vdev *vdev = NULL;
  634. struct dp_pdev *pdev = NULL;
  635. struct ieee80211_frame *wh;
  636. uint8_t i;
  637. qdf_nbuf_t curr_nbuf, next_nbuf;
  638. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  639. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  640. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  641. if (!DP_FRAME_IS_DATA(wh)) {
  642. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  643. "NAWDS valid only for data frames");
  644. goto free;
  645. }
  646. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  647. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  648. "Invalid nbuf length");
  649. goto free;
  650. }
  651. for (i = 0; i < MAX_PDEV_CNT; i++) {
  652. pdev = soc->pdev_list[i];
  653. if (!pdev) {
  654. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  655. "PDEV not found");
  656. continue;
  657. }
  658. if (pdev->filter_neighbour_peers) {
  659. /* Next Hop scenario not yet handle */
  660. vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
  661. if (vdev) {
  662. dp_rx_mon_deliver(soc, i,
  663. pdev->invalid_peer_head_msdu,
  664. pdev->invalid_peer_tail_msdu);
  665. pdev->invalid_peer_head_msdu = NULL;
  666. pdev->invalid_peer_tail_msdu = NULL;
  667. return 0;
  668. }
  669. }
  670. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  671. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  672. DP_MAC_ADDR_LEN) == 0) {
  673. goto out;
  674. }
  675. }
  676. }
  677. if (!vdev) {
  678. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  679. "VDEV not found");
  680. goto free;
  681. }
  682. out:
  683. msg.wh = wh;
  684. qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
  685. msg.nbuf = mpdu;
  686. msg.vdev_id = vdev->vdev_id;
  687. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
  688. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev,
  689. &msg);
  690. free:
  691. /* Drop and free packet */
  692. curr_nbuf = mpdu;
  693. while (curr_nbuf) {
  694. next_nbuf = qdf_nbuf_next(curr_nbuf);
  695. qdf_nbuf_free(curr_nbuf);
  696. curr_nbuf = next_nbuf;
  697. }
  698. return 0;
  699. }
  700. /**
  701. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  702. * @soc: DP SOC handle
  703. * @mpdu: mpdu for which peer is invalid
  704. * @mpdu_done: if an mpdu is completed
  705. *
  706. * return: integer type
  707. */
  708. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  709. qdf_nbuf_t mpdu, bool mpdu_done)
  710. {
  711. /* Only trigger the process when mpdu is completed */
  712. if (mpdu_done)
  713. dp_rx_process_invalid_peer(soc, mpdu);
  714. }
  715. #else
  716. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
  717. {
  718. qdf_nbuf_t curr_nbuf, next_nbuf;
  719. struct dp_pdev *pdev;
  720. uint8_t i;
  721. struct dp_vdev *vdev = NULL;
  722. struct ieee80211_frame *wh;
  723. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  724. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  725. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  726. if (!DP_FRAME_IS_DATA(wh)) {
  727. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
  728. "only for data frames");
  729. goto free;
  730. }
  731. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  732. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  733. "Invalid nbuf length");
  734. goto free;
  735. }
  736. for (i = 0; i < MAX_PDEV_CNT; i++) {
  737. pdev = soc->pdev_list[i];
  738. if (!pdev) {
  739. QDF_TRACE(QDF_MODULE_ID_DP,
  740. QDF_TRACE_LEVEL_ERROR,
  741. "PDEV not found");
  742. continue;
  743. }
  744. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  745. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  746. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  747. DP_MAC_ADDR_LEN) == 0) {
  748. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  749. goto out;
  750. }
  751. }
  752. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  753. }
  754. if (NULL == vdev) {
  755. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  756. "VDEV not found");
  757. goto free;
  758. }
  759. out:
  760. if (soc->cdp_soc.ol_ops->rx_invalid_peer)
  761. soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
  762. free:
  763. /* reset the head and tail pointers */
  764. for (i = 0; i < MAX_PDEV_CNT; i++) {
  765. pdev = soc->pdev_list[i];
  766. if (!pdev) {
  767. QDF_TRACE(QDF_MODULE_ID_DP,
  768. QDF_TRACE_LEVEL_ERROR,
  769. "PDEV not found");
  770. continue;
  771. }
  772. pdev->invalid_peer_head_msdu = NULL;
  773. pdev->invalid_peer_tail_msdu = NULL;
  774. }
  775. /* Drop and free packet */
  776. curr_nbuf = mpdu;
  777. while (curr_nbuf) {
  778. next_nbuf = qdf_nbuf_next(curr_nbuf);
  779. qdf_nbuf_free(curr_nbuf);
  780. curr_nbuf = next_nbuf;
  781. }
  782. return 0;
  783. }
  784. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  785. qdf_nbuf_t mpdu, bool mpdu_done)
  786. {
  787. /* Process the nbuf */
  788. dp_rx_process_invalid_peer(soc, mpdu);
  789. }
  790. #endif
  791. #ifdef RECEIVE_OFFLOAD
  792. /**
  793. * dp_rx_print_offload_info() - Print offload info from RX TLV
  794. * @rx_tlv: RX TLV for which offload information is to be printed
  795. *
  796. * Return: None
  797. */
  798. static void dp_rx_print_offload_info(uint8_t *rx_tlv)
  799. {
  800. dp_debug("----------------------RX DESC LRO/GRO----------------------");
  801. dp_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
  802. dp_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
  803. dp_debug("chksum 0x%x", HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv));
  804. dp_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
  805. dp_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
  806. dp_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
  807. dp_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
  808. dp_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
  809. dp_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
  810. dp_debug("---------------------------------------------------------");
  811. }
  812. /**
  813. * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
  814. * @soc: DP SOC handle
  815. * @rx_tlv: RX TLV received for the msdu
  816. * @msdu: msdu for which GRO info needs to be filled
  817. *
  818. * Return: None
  819. */
  820. static
  821. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  822. qdf_nbuf_t msdu)
  823. {
  824. if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
  825. return;
  826. /* Filling up RX offload info only for TCP packets */
  827. if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
  828. return;
  829. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
  830. HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
  831. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
  832. HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
  833. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  834. HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv);
  835. QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
  836. HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
  837. QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
  838. HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
  839. QDF_NBUF_CB_RX_TCP_WIN(msdu) =
  840. HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
  841. QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
  842. HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
  843. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
  844. HAL_RX_TLV_GET_IPV6(rx_tlv);
  845. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
  846. HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
  847. QDF_NBUF_CB_RX_FLOW_ID(msdu) =
  848. HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
  849. dp_rx_print_offload_info(rx_tlv);
  850. }
  851. #else
  852. static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  853. qdf_nbuf_t msdu)
  854. {
  855. }
  856. #endif /* RECEIVE_OFFLOAD */
  857. /**
  858. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  859. *
  860. * @nbuf: pointer to msdu.
  861. * @mpdu_len: mpdu length
  862. *
  863. * Return: returns true if nbuf is last msdu of mpdu else retuns false.
  864. */
  865. static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
  866. {
  867. bool last_nbuf;
  868. if (*mpdu_len > (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
  869. qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE);
  870. last_nbuf = false;
  871. } else {
  872. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
  873. last_nbuf = true;
  874. }
  875. *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN);
  876. return last_nbuf;
  877. }
  878. /**
  879. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  880. * multiple nbufs.
  881. * @nbuf: pointer to the first msdu of an amsdu.
  882. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  883. *
  884. *
  885. * This function implements the creation of RX frag_list for cases
  886. * where an MSDU is spread across multiple nbufs.
  887. *
  888. * Return: returns the head nbuf which contains complete frag_list.
  889. */
  890. qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  891. {
  892. qdf_nbuf_t parent, next, frag_list;
  893. uint16_t frag_list_len = 0;
  894. uint16_t mpdu_len;
  895. bool last_nbuf;
  896. mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  897. /*
  898. * this is a case where the complete msdu fits in one single nbuf.
  899. * in this case HW sets both start and end bit and we only need to
  900. * reset these bits for RAW mode simulator to decap the pkt
  901. */
  902. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  903. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  904. qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
  905. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  906. return nbuf;
  907. }
  908. /*
  909. * This is a case where we have multiple msdus (A-MSDU) spread across
  910. * multiple nbufs. here we create a fraglist out of these nbufs.
  911. *
  912. * the moment we encounter a nbuf with continuation bit set we
  913. * know for sure we have an MSDU which is spread across multiple
  914. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  915. */
  916. parent = nbuf;
  917. frag_list = nbuf->next;
  918. nbuf = nbuf->next;
  919. /*
  920. * set the start bit in the first nbuf we encounter with continuation
  921. * bit set. This has the proper mpdu length set as it is the first
  922. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  923. * nbufs will form the frag_list of the parent nbuf.
  924. */
  925. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  926. last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
  927. /*
  928. * this is where we set the length of the fragments which are
  929. * associated to the parent nbuf. We iterate through the frag_list
  930. * till we hit the last_nbuf of the list.
  931. */
  932. do {
  933. last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
  934. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  935. frag_list_len += qdf_nbuf_len(nbuf);
  936. if (last_nbuf) {
  937. next = nbuf->next;
  938. nbuf->next = NULL;
  939. break;
  940. }
  941. nbuf = nbuf->next;
  942. } while (!last_nbuf);
  943. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  944. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  945. parent->next = next;
  946. qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
  947. return parent;
  948. }
  949. static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
  950. struct dp_peer *peer,
  951. qdf_nbuf_t nbuf_head,
  952. qdf_nbuf_t nbuf_tail)
  953. {
  954. /*
  955. * highly unlikely to have a vdev without a registered rx
  956. * callback function. if so let us free the nbuf_list.
  957. */
  958. if (qdf_unlikely(!vdev->osif_rx)) {
  959. qdf_nbuf_t nbuf;
  960. do {
  961. nbuf = nbuf_head;
  962. nbuf_head = nbuf_head->next;
  963. qdf_nbuf_free(nbuf);
  964. } while (nbuf_head);
  965. return;
  966. }
  967. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  968. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  969. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  970. &nbuf_tail, (struct cdp_peer *) peer);
  971. }
  972. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  973. }
  974. /**
  975. * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
  976. * @nbuf: pointer to the first msdu of an amsdu.
  977. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  978. *
  979. * The ipsumed field of the skb is set based on whether HW validated the
  980. * IP/TCP/UDP checksum.
  981. *
  982. * Return: void
  983. */
  984. static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
  985. qdf_nbuf_t nbuf,
  986. uint8_t *rx_tlv_hdr)
  987. {
  988. qdf_nbuf_rx_cksum_t cksum = {0};
  989. bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
  990. bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
  991. if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
  992. cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
  993. qdf_nbuf_set_rx_cksum(nbuf, &cksum);
  994. } else {
  995. DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
  996. DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
  997. }
  998. }
  999. /**
  1000. * dp_rx_msdu_stats_update() - update per msdu stats.
  1001. * @soc: core txrx main context
  1002. * @nbuf: pointer to the first msdu of an amsdu.
  1003. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  1004. * @peer: pointer to the peer object.
  1005. * @ring_id: reo dest ring number on which pkt is reaped.
  1006. *
  1007. * update all the per msdu stats for that nbuf.
  1008. * Return: void
  1009. */
  1010. static void dp_rx_msdu_stats_update(struct dp_soc *soc,
  1011. qdf_nbuf_t nbuf,
  1012. uint8_t *rx_tlv_hdr,
  1013. struct dp_peer *peer,
  1014. uint8_t ring_id)
  1015. {
  1016. bool is_ampdu, is_not_amsdu;
  1017. uint16_t peer_id;
  1018. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  1019. struct dp_vdev *vdev = peer->vdev;
  1020. qdf_ether_header_t *eh;
  1021. uint16_t msdu_len = qdf_nbuf_len(nbuf);
  1022. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  1023. hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr));
  1024. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  1025. qdf_nbuf_is_rx_chfrag_end(nbuf);
  1026. DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
  1027. DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
  1028. DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
  1029. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
  1030. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  1031. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1032. DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
  1033. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  1034. DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
  1035. }
  1036. }
  1037. /*
  1038. * currently we can return from here as we have similar stats
  1039. * updated at per ppdu level instead of msdu level
  1040. */
  1041. if (!soc->process_rx_status)
  1042. return;
  1043. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
  1044. DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
  1045. DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
  1046. sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
  1047. mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
  1048. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  1049. bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
  1050. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  1051. rx_tlv_hdr);
  1052. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  1053. pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
  1054. DP_STATS_INC(peer, rx.bw[bw], 1);
  1055. DP_STATS_INC(peer, rx.nss[nss], 1);
  1056. DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
  1057. DP_STATS_INCC(peer, rx.err.mic_err, 1,
  1058. hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
  1059. DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
  1060. hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
  1061. DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
  1062. DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
  1063. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1064. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  1065. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1066. ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  1067. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1068. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  1069. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1070. ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  1071. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1072. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  1073. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1074. ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  1075. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1076. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  1077. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1078. ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  1079. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1080. ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
  1081. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1082. ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
  1083. if ((soc->process_rx_status) &&
  1084. hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
  1085. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  1086. if (!vdev->pdev)
  1087. return;
  1088. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  1089. &peer->stats, peer_id,
  1090. UPDATE_PEER_STATS,
  1091. vdev->pdev->pdev_id);
  1092. #endif
  1093. }
  1094. }
  1095. static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
  1096. void *rx_tlv_hdr)
  1097. {
  1098. if ((hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr) &&
  1099. (hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr) >
  1100. wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
  1101. (hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) &&
  1102. (hal_rx_msdu_end_da_idx_get(soc->hal_soc,
  1103. rx_tlv_hdr) >
  1104. wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
  1105. return false;
  1106. return true;
  1107. }
  1108. #ifdef WDS_VENDOR_EXTENSION
  1109. int dp_wds_rx_policy_check(
  1110. uint8_t *rx_tlv_hdr,
  1111. struct dp_vdev *vdev,
  1112. struct dp_peer *peer,
  1113. int rx_mcast
  1114. )
  1115. {
  1116. struct dp_peer *bss_peer;
  1117. int fr_ds, to_ds, rx_3addr, rx_4addr;
  1118. int rx_policy_ucast, rx_policy_mcast;
  1119. if (vdev->opmode == wlan_op_mode_ap) {
  1120. TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) {
  1121. if (bss_peer->bss_peer) {
  1122. /* if wds policy check is not enabled on this vdev, accept all frames */
  1123. if (!bss_peer->wds_ecm.wds_rx_filter) {
  1124. return 1;
  1125. }
  1126. break;
  1127. }
  1128. }
  1129. rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr;
  1130. rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr;
  1131. } else { /* sta mode */
  1132. if (!peer->wds_ecm.wds_rx_filter) {
  1133. return 1;
  1134. }
  1135. rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr;
  1136. rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr;
  1137. }
  1138. /* ------------------------------------------------
  1139. * self
  1140. * peer- rx rx-
  1141. * wds ucast mcast dir policy accept note
  1142. * ------------------------------------------------
  1143. * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
  1144. * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
  1145. * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
  1146. * 1 1 0 00 x1 0 bad frame, won't see it
  1147. * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
  1148. * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
  1149. * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
  1150. * 1 0 1 00 1x 0 bad frame, won't see it
  1151. * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
  1152. * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
  1153. * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
  1154. * 1 1 0 00 x0 0 bad frame, won't see it
  1155. * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
  1156. * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
  1157. * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
  1158. * 1 0 1 00 0x 0 bad frame, won't see it
  1159. *
  1160. * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode.
  1161. * 0 x x 01 xx 1
  1162. * 0 x x 10 xx 0
  1163. * 0 x x 00 xx 0 bad frame, won't see it
  1164. * ------------------------------------------------
  1165. */
  1166. fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
  1167. to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
  1168. rx_3addr = fr_ds ^ to_ds;
  1169. rx_4addr = fr_ds & to_ds;
  1170. if (vdev->opmode == wlan_op_mode_ap) {
  1171. if ((!peer->wds_enabled && rx_3addr && to_ds) ||
  1172. (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) ||
  1173. (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) {
  1174. return 1;
  1175. }
  1176. } else { /* sta mode */
  1177. if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
  1178. (rx_mcast && (rx_4addr == rx_policy_mcast))) {
  1179. return 1;
  1180. }
  1181. }
  1182. return 0;
  1183. }
  1184. #else
  1185. int dp_wds_rx_policy_check(
  1186. uint8_t *rx_tlv_hdr,
  1187. struct dp_vdev *vdev,
  1188. struct dp_peer *peer,
  1189. int rx_mcast
  1190. )
  1191. {
  1192. return 1;
  1193. }
  1194. #endif
  1195. /**
  1196. * dp_rx_process() - Brain of the Rx processing functionality
  1197. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  1198. * @soc: core txrx main context
  1199. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  1200. * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
  1201. * @quota: No. of units (packets) that can be serviced in one shot.
  1202. *
  1203. * This function implements the core of Rx functionality. This is
  1204. * expected to handle only non-error frames.
  1205. *
  1206. * Return: uint32_t: No. of elements processed
  1207. */
  1208. uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring,
  1209. uint8_t reo_ring_num, uint32_t quota)
  1210. {
  1211. void *hal_soc;
  1212. void *ring_desc;
  1213. struct dp_rx_desc *rx_desc = NULL;
  1214. qdf_nbuf_t nbuf, next;
  1215. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1216. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1217. uint32_t rx_bufs_used = 0, rx_buf_cookie;
  1218. uint32_t l2_hdr_offset = 0;
  1219. uint16_t msdu_len = 0;
  1220. uint16_t peer_id;
  1221. struct dp_peer *peer = NULL;
  1222. struct dp_vdev *vdev = NULL;
  1223. uint32_t pkt_len = 0;
  1224. struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
  1225. struct hal_rx_msdu_desc_info msdu_desc_info = { 0 };
  1226. enum hal_reo_error_status error;
  1227. uint32_t peer_mdata;
  1228. uint8_t *rx_tlv_hdr;
  1229. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1230. uint8_t mac_id = 0;
  1231. struct dp_pdev *pdev;
  1232. struct dp_srng *dp_rxdma_srng;
  1233. struct rx_desc_pool *rx_desc_pool;
  1234. struct dp_soc *soc = int_ctx->soc;
  1235. uint8_t ring_id = 0;
  1236. uint8_t core_id = 0;
  1237. qdf_nbuf_t nbuf_head = NULL;
  1238. qdf_nbuf_t nbuf_tail = NULL;
  1239. qdf_nbuf_t deliver_list_head = NULL;
  1240. qdf_nbuf_t deliver_list_tail = NULL;
  1241. int32_t tid = 0;
  1242. uint32_t dst_num_valid = 0;
  1243. DP_HIST_INIT();
  1244. /* Debug -- Remove later */
  1245. qdf_assert(soc && hal_ring);
  1246. hal_soc = soc->hal_soc;
  1247. /* Debug -- Remove later */
  1248. qdf_assert(hal_soc);
  1249. hif_pm_runtime_mark_last_busy(soc->osdev->dev);
  1250. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  1251. /*
  1252. * Need API to convert from hal_ring pointer to
  1253. * Ring Type / Ring Id combo
  1254. */
  1255. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1256. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1257. FL("HAL RING Access Failed -- %pK"), hal_ring);
  1258. hal_srng_access_end(hal_soc, hal_ring);
  1259. goto done;
  1260. }
  1261. /*
  1262. * start reaping the buffers from reo ring and queue
  1263. * them in per vdev queue.
  1264. * Process the received pkts in a different per vdev loop.
  1265. */
  1266. while (qdf_likely(quota)) {
  1267. ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring);
  1268. /*
  1269. * in case HW has updated hp after we cached the hp
  1270. * ring_desc can be NULL even there are entries
  1271. * available in the ring. Update the cached_hp
  1272. * and reap the buffers available to read complete
  1273. * mpdu in one reap
  1274. *
  1275. * This is needed for RAW mode we have to read all
  1276. * msdus corresponding to amsdu in one reap to create
  1277. * SG list properly but due to mismatch in cached_hp
  1278. * and actual hp sometimes we are unable to read
  1279. * complete mpdu in one reap.
  1280. */
  1281. if (qdf_unlikely(!ring_desc)) {
  1282. dst_num_valid = hal_srng_dst_num_valid(hal_soc,
  1283. hal_ring,
  1284. true);
  1285. if (dst_num_valid) {
  1286. DP_STATS_INC(soc, rx.hp_oos, 1);
  1287. hal_srng_access_end_unlocked(hal_soc,
  1288. hal_ring);
  1289. continue;
  1290. } else {
  1291. break;
  1292. }
  1293. }
  1294. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1295. ring_id = hal_srng_ring_id_get(hal_ring);
  1296. if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
  1297. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1298. FL("HAL RING 0x%pK:error %d"), hal_ring, error);
  1299. DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
  1300. /* Don't know how to deal with this -- assert */
  1301. qdf_assert(0);
  1302. }
  1303. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1304. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1305. qdf_assert(rx_desc);
  1306. /*
  1307. * this is a unlikely scenario where the host is reaping
  1308. * a descriptor which it already reaped just a while ago
  1309. * but is yet to replenish it back to HW.
  1310. * In this case host will dump the last 128 descriptors
  1311. * including the software descriptor rx_desc and assert.
  1312. */
  1313. if (qdf_unlikely(!rx_desc->in_use)) {
  1314. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  1315. dp_rx_dump_info_and_assert(soc, hal_ring,
  1316. ring_desc, rx_desc);
  1317. }
  1318. rx_bufs_reaped[rx_desc->pool_id]++;
  1319. /* TODO */
  1320. /*
  1321. * Need a separate API for unmapping based on
  1322. * phyiscal address
  1323. */
  1324. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  1325. QDF_DMA_BIDIRECTIONAL);
  1326. core_id = smp_processor_id();
  1327. DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
  1328. /* Get MPDU DESC info */
  1329. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1330. hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf),
  1331. mpdu_desc_info.peer_meta_data);
  1332. /* Get MSDU DESC info */
  1333. hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
  1334. /*
  1335. * save msdu flags first, last and continuation msdu in
  1336. * nbuf->cb
  1337. */
  1338. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  1339. qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
  1340. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  1341. qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
  1342. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  1343. qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
  1344. QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
  1345. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1346. /*
  1347. * if continuation bit is set then we have MSDU spread
  1348. * across multiple buffers, let us not decrement quota
  1349. * till we reap all buffers of that MSDU.
  1350. */
  1351. if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
  1352. quota -= 1;
  1353. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1354. &tail[rx_desc->pool_id],
  1355. rx_desc);
  1356. }
  1357. done:
  1358. hal_srng_access_end(hal_soc, hal_ring);
  1359. if (nbuf_tail)
  1360. QDF_NBUF_CB_RX_FLUSH_IND(nbuf_tail) = 1;
  1361. /* Update histogram statistics by looping through pdev's */
  1362. DP_RX_HIST_STATS_PER_PDEV();
  1363. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1364. /*
  1365. * continue with next mac_id if no pkts were reaped
  1366. * from that pool
  1367. */
  1368. if (!rx_bufs_reaped[mac_id])
  1369. continue;
  1370. pdev = soc->pdev_list[mac_id];
  1371. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1372. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1373. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1374. rx_desc_pool, rx_bufs_reaped[mac_id],
  1375. &head[mac_id], &tail[mac_id]);
  1376. }
  1377. /* Peer can be NULL is case of LFR */
  1378. if (qdf_likely(peer != NULL))
  1379. vdev = NULL;
  1380. /*
  1381. * BIG loop where each nbuf is dequeued from global queue,
  1382. * processed and queued back on a per vdev basis. These nbufs
  1383. * are sent to stack as and when we run out of nbufs
  1384. * or a new nbuf dequeued from global queue has a different
  1385. * vdev when compared to previous nbuf.
  1386. */
  1387. nbuf = nbuf_head;
  1388. while (nbuf) {
  1389. next = nbuf->next;
  1390. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1391. /*
  1392. * Check if DMA completed -- msdu_done is the last bit
  1393. * to be written
  1394. */
  1395. if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
  1396. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1397. FL("MSDU DONE failure"));
  1398. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1399. QDF_TRACE_LEVEL_INFO);
  1400. qdf_assert(0);
  1401. }
  1402. peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr);
  1403. peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
  1404. peer = dp_peer_find_by_id(soc, peer_id);
  1405. if (peer) {
  1406. QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
  1407. qdf_dp_trace_set_track(nbuf, QDF_RX);
  1408. QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
  1409. QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
  1410. QDF_NBUF_RX_PKT_DATA_TRACK;
  1411. }
  1412. rx_bufs_used++;
  1413. if (deliver_list_head && peer && (vdev != peer->vdev)) {
  1414. dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
  1415. deliver_list_tail);
  1416. deliver_list_head = NULL;
  1417. deliver_list_tail = NULL;
  1418. }
  1419. if (qdf_likely(peer != NULL)) {
  1420. vdev = peer->vdev;
  1421. } else {
  1422. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1423. qdf_nbuf_len(nbuf));
  1424. qdf_nbuf_free(nbuf);
  1425. nbuf = next;
  1426. continue;
  1427. }
  1428. if (qdf_unlikely(vdev == NULL)) {
  1429. qdf_nbuf_free(nbuf);
  1430. nbuf = next;
  1431. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1432. dp_peer_unref_del_find_by_id(peer);
  1433. continue;
  1434. }
  1435. DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
  1436. /*
  1437. * First IF condition:
  1438. * 802.11 Fragmented pkts are reinjected to REO
  1439. * HW block as SG pkts and for these pkts we only
  1440. * need to pull the RX TLVS header length.
  1441. * Second IF condition:
  1442. * The below condition happens when an MSDU is spread
  1443. * across multiple buffers. This can happen in two cases
  1444. * 1. The nbuf size is smaller then the received msdu.
  1445. * ex: we have set the nbuf size to 2048 during
  1446. * nbuf_alloc. but we received an msdu which is
  1447. * 2304 bytes in size then this msdu is spread
  1448. * across 2 nbufs.
  1449. *
  1450. * 2. AMSDUs when RAW mode is enabled.
  1451. * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
  1452. * across 1st nbuf and 2nd nbuf and last MSDU is
  1453. * spread across 2nd nbuf and 3rd nbuf.
  1454. *
  1455. * for these scenarios let us create a skb frag_list and
  1456. * append these buffers till the last MSDU of the AMSDU
  1457. * Third condition:
  1458. * This is the most likely case, we receive 802.3 pkts
  1459. * decapsulated by HW, here we need to set the pkt length.
  1460. */
  1461. if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf)))
  1462. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  1463. else if (qdf_unlikely(vdev->rx_decap_type ==
  1464. htt_cmn_pkt_type_raw)) {
  1465. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  1466. nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr);
  1467. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  1468. DP_STATS_INC_PKT(peer, rx.raw, 1,
  1469. msdu_len);
  1470. next = nbuf->next;
  1471. } else {
  1472. l2_hdr_offset =
  1473. hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  1474. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  1475. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  1476. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1477. qdf_nbuf_pull_head(nbuf,
  1478. RX_PKT_TLVS_LEN +
  1479. l2_hdr_offset);
  1480. }
  1481. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer,
  1482. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  1483. QDF_TRACE(QDF_MODULE_ID_DP,
  1484. QDF_TRACE_LEVEL_ERROR,
  1485. FL("Policy Check Drop pkt"));
  1486. /* Drop & free packet */
  1487. qdf_nbuf_free(nbuf);
  1488. /* Statistics */
  1489. nbuf = next;
  1490. dp_peer_unref_del_find_by_id(peer);
  1491. continue;
  1492. }
  1493. if (qdf_unlikely(peer && peer->bss_peer)) {
  1494. QDF_TRACE(QDF_MODULE_ID_DP,
  1495. QDF_TRACE_LEVEL_ERROR,
  1496. FL("received pkt with same src MAC"));
  1497. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, msdu_len);
  1498. /* Drop & free packet */
  1499. qdf_nbuf_free(nbuf);
  1500. /* Statistics */
  1501. nbuf = next;
  1502. dp_peer_unref_del_find_by_id(peer);
  1503. continue;
  1504. }
  1505. if (qdf_unlikely(peer && (peer->nawds_enabled == true) &&
  1506. (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) &&
  1507. (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) {
  1508. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  1509. qdf_nbuf_free(nbuf);
  1510. nbuf = next;
  1511. dp_peer_unref_del_find_by_id(peer);
  1512. continue;
  1513. }
  1514. dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
  1515. dp_set_rx_queue(nbuf, ring_id);
  1516. /*
  1517. * HW structures call this L3 header padding --
  1518. * even though this is actually the offset from
  1519. * the buffer beginning where the L2 header
  1520. * begins.
  1521. */
  1522. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1523. FL("rxhash: flow id toeplitz: 0x%x"),
  1524. hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr));
  1525. dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id);
  1526. if (qdf_unlikely(vdev->mesh_vdev)) {
  1527. if (dp_rx_filter_mesh_packets(vdev, nbuf,
  1528. rx_tlv_hdr)
  1529. == QDF_STATUS_SUCCESS) {
  1530. QDF_TRACE(QDF_MODULE_ID_DP,
  1531. QDF_TRACE_LEVEL_INFO_MED,
  1532. FL("mesh pkt filtered"));
  1533. DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
  1534. 1);
  1535. qdf_nbuf_free(nbuf);
  1536. nbuf = next;
  1537. dp_peer_unref_del_find_by_id(peer);
  1538. continue;
  1539. }
  1540. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  1541. }
  1542. #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */
  1543. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1544. "p_id %d msdu_len %d hdr_off %d",
  1545. peer_id, msdu_len, l2_hdr_offset);
  1546. print_hex_dump(KERN_ERR,
  1547. "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
  1548. qdf_nbuf_data(nbuf), 128, false);
  1549. #endif /* NAPIER_EMULATION */
  1550. if (qdf_likely(vdev->rx_decap_type ==
  1551. htt_cmn_pkt_type_ethernet) &&
  1552. qdf_likely(!vdev->mesh_vdev)) {
  1553. /* WDS Destination Address Learning */
  1554. dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
  1555. /* Due to HW issue, sometimes we see that the sa_idx
  1556. * and da_idx are invalid with sa_valid and da_valid
  1557. * bits set
  1558. *
  1559. * in this case we also see that value of
  1560. * sa_sw_peer_id is set as 0
  1561. *
  1562. * Drop the packet if sa_idx and da_idx OOB or
  1563. * sa_sw_peerid is 0
  1564. */
  1565. if (!is_sa_da_idx_valid(soc, rx_tlv_hdr)) {
  1566. qdf_nbuf_free(nbuf);
  1567. nbuf = next;
  1568. DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
  1569. continue;
  1570. }
  1571. /* WDS Source Port Learning */
  1572. if (vdev->wds_enabled)
  1573. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr,
  1574. peer, nbuf);
  1575. /* Intrabss-fwd */
  1576. if (dp_rx_check_ap_bridge(vdev))
  1577. if (dp_rx_intrabss_fwd(soc,
  1578. peer,
  1579. rx_tlv_hdr,
  1580. nbuf)) {
  1581. nbuf = next;
  1582. dp_peer_unref_del_find_by_id(peer);
  1583. continue; /* Get next desc */
  1584. }
  1585. }
  1586. dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf);
  1587. qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id);
  1588. /* Get TID from first msdu per MPDU, save to skb->priority */
  1589. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  1590. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  1591. rx_tlv_hdr);
  1592. DP_RX_TID_SAVE(nbuf, tid);
  1593. DP_RX_LIST_APPEND(deliver_list_head,
  1594. deliver_list_tail,
  1595. nbuf);
  1596. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  1597. qdf_nbuf_len(nbuf));
  1598. nbuf = next;
  1599. dp_peer_unref_del_find_by_id(peer);
  1600. }
  1601. if (deliver_list_head)
  1602. dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
  1603. deliver_list_tail);
  1604. return rx_bufs_used; /* Assume no scale factor for now */
  1605. }
  1606. /**
  1607. * dp_rx_detach() - detach dp rx
  1608. * @pdev: core txrx pdev context
  1609. *
  1610. * This function will detach DP RX into main device context
  1611. * will free DP Rx resources.
  1612. *
  1613. * Return: void
  1614. */
  1615. void
  1616. dp_rx_pdev_detach(struct dp_pdev *pdev)
  1617. {
  1618. uint8_t pdev_id = pdev->pdev_id;
  1619. struct dp_soc *soc = pdev->soc;
  1620. struct rx_desc_pool *rx_desc_pool;
  1621. rx_desc_pool = &soc->rx_desc_buf[pdev_id];
  1622. if (rx_desc_pool->pool_size != 0) {
  1623. dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
  1624. }
  1625. return;
  1626. }
  1627. /**
  1628. * dp_rx_attach() - attach DP RX
  1629. * @pdev: core txrx pdev context
  1630. *
  1631. * This function will attach a DP RX instance into the main
  1632. * device (SOC) context. Will allocate dp rx resource and
  1633. * initialize resources.
  1634. *
  1635. * Return: QDF_STATUS_SUCCESS: success
  1636. * QDF_STATUS_E_RESOURCES: Error return
  1637. */
  1638. QDF_STATUS
  1639. dp_rx_pdev_attach(struct dp_pdev *pdev)
  1640. {
  1641. uint8_t pdev_id = pdev->pdev_id;
  1642. struct dp_soc *soc = pdev->soc;
  1643. uint32_t rxdma_entries;
  1644. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1645. union dp_rx_desc_list_elem_t *tail = NULL;
  1646. struct dp_srng *dp_rxdma_srng;
  1647. struct rx_desc_pool *rx_desc_pool;
  1648. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  1649. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1650. "nss-wifi<4> skip Rx refil %d", pdev_id);
  1651. return QDF_STATUS_SUCCESS;
  1652. }
  1653. pdev = soc->pdev_list[pdev_id];
  1654. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1655. rxdma_entries = dp_rxdma_srng->num_entries;
  1656. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  1657. rx_desc_pool = &soc->rx_desc_buf[pdev_id];
  1658. dp_rx_desc_pool_alloc(soc, pdev_id,
  1659. DP_RX_DESC_ALLOC_MULTIPLIER * rxdma_entries,
  1660. rx_desc_pool);
  1661. rx_desc_pool->owner = DP_WBM2SW_RBM;
  1662. /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
  1663. dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool,
  1664. 0, &desc_list, &tail);
  1665. return QDF_STATUS_SUCCESS;
  1666. }
  1667. /*
  1668. * dp_rx_nbuf_prepare() - prepare RX nbuf
  1669. * @soc: core txrx main context
  1670. * @pdev: core txrx pdev context
  1671. *
  1672. * This function alloc & map nbuf for RX dma usage, retry it if failed
  1673. * until retry times reaches max threshold or succeeded.
  1674. *
  1675. * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
  1676. */
  1677. qdf_nbuf_t
  1678. dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
  1679. {
  1680. uint8_t *buf;
  1681. int32_t nbuf_retry_count;
  1682. QDF_STATUS ret;
  1683. qdf_nbuf_t nbuf = NULL;
  1684. for (nbuf_retry_count = 0; nbuf_retry_count <
  1685. QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
  1686. nbuf_retry_count++) {
  1687. /* Allocate a new skb */
  1688. nbuf = qdf_nbuf_alloc(soc->osdev,
  1689. RX_BUFFER_SIZE,
  1690. RX_BUFFER_RESERVATION,
  1691. RX_BUFFER_ALIGNMENT,
  1692. FALSE);
  1693. if (nbuf == NULL) {
  1694. DP_STATS_INC(pdev,
  1695. replenish.nbuf_alloc_fail, 1);
  1696. continue;
  1697. }
  1698. buf = qdf_nbuf_data(nbuf);
  1699. memset(buf, 0, RX_BUFFER_SIZE);
  1700. ret = qdf_nbuf_map_single(soc->osdev, nbuf,
  1701. QDF_DMA_BIDIRECTIONAL);
  1702. /* nbuf map failed */
  1703. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  1704. qdf_nbuf_free(nbuf);
  1705. DP_STATS_INC(pdev, replenish.map_err, 1);
  1706. continue;
  1707. }
  1708. /* qdf_nbuf alloc and map succeeded */
  1709. break;
  1710. }
  1711. /* qdf_nbuf still alloc or map failed */
  1712. if (qdf_unlikely(nbuf_retry_count >=
  1713. QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
  1714. return NULL;
  1715. return nbuf;
  1716. }