dp_rx.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "hal_rx.h"
  23. #include "hal_api.h"
  24. #include "qdf_nbuf.h"
  25. #ifdef MESH_MODE_SUPPORT
  26. #include "if_meta_hdr.h"
  27. #endif
  28. #include "dp_internal.h"
  29. #include "dp_rx_mon.h"
  30. #ifdef RX_DESC_DEBUG_CHECK
  31. static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
  32. {
  33. rx_desc->magic = DP_RX_DESC_MAGIC;
  34. rx_desc->nbuf = nbuf;
  35. }
  36. #else
  37. static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
  38. {
  39. rx_desc->nbuf = nbuf;
  40. }
  41. #endif
  42. #ifdef CONFIG_WIN
  43. static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
  44. {
  45. return vdev->ap_bridge_enabled;
  46. }
  47. #else
  48. static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
  49. {
  50. if (vdev->opmode != wlan_op_mode_sta)
  51. return true;
  52. else
  53. return false;
  54. }
  55. #endif
  56. /*
  57. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  58. * called during dp rx initialization
  59. * and at the end of dp_rx_process.
  60. *
  61. * @soc: core txrx main context
  62. * @mac_id: mac_id which is one of 3 mac_ids
  63. * @dp_rxdma_srng: dp rxdma circular ring
  64. * @rx_desc_pool: Pointer to free Rx descriptor pool
  65. * @num_req_buffers: number of buffer to be replenished
  66. * @desc_list: list of descs if called from dp_rx_process
  67. * or NULL during dp rx initialization or out of buffer
  68. * interrupt.
  69. * @tail: tail of descs list
  70. * Return: return success or failure
  71. */
  72. QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  73. struct dp_srng *dp_rxdma_srng,
  74. struct rx_desc_pool *rx_desc_pool,
  75. uint32_t num_req_buffers,
  76. union dp_rx_desc_list_elem_t **desc_list,
  77. union dp_rx_desc_list_elem_t **tail)
  78. {
  79. uint32_t num_alloc_desc;
  80. uint16_t num_desc_to_free = 0;
  81. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
  82. uint32_t num_entries_avail;
  83. uint32_t count;
  84. int sync_hw_ptr = 1;
  85. qdf_dma_addr_t paddr;
  86. qdf_nbuf_t rx_netbuf;
  87. void *rxdma_ring_entry;
  88. union dp_rx_desc_list_elem_t *next;
  89. QDF_STATUS ret;
  90. void *rxdma_srng;
  91. rxdma_srng = dp_rxdma_srng->hal_srng;
  92. if (!rxdma_srng) {
  93. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  94. "rxdma srng not initialized");
  95. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  96. return QDF_STATUS_E_FAILURE;
  97. }
  98. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  99. "requested %d buffers for replenish", num_req_buffers);
  100. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  101. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  102. rxdma_srng,
  103. sync_hw_ptr);
  104. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  105. "no of available entries in rxdma ring: %d",
  106. num_entries_avail);
  107. if (!(*desc_list) && (num_entries_avail >
  108. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  109. num_req_buffers = num_entries_avail;
  110. } else if (num_entries_avail < num_req_buffers) {
  111. num_desc_to_free = num_req_buffers - num_entries_avail;
  112. num_req_buffers = num_entries_avail;
  113. }
  114. if (qdf_unlikely(!num_req_buffers)) {
  115. num_desc_to_free = num_req_buffers;
  116. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  117. goto free_descs;
  118. }
  119. /*
  120. * if desc_list is NULL, allocate the descs from freelist
  121. */
  122. if (!(*desc_list)) {
  123. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  124. rx_desc_pool,
  125. num_req_buffers,
  126. desc_list,
  127. tail);
  128. if (!num_alloc_desc) {
  129. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  130. "no free rx_descs in freelist");
  131. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  132. num_req_buffers);
  133. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  134. return QDF_STATUS_E_NOMEM;
  135. }
  136. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  137. "%d rx desc allocated", num_alloc_desc);
  138. num_req_buffers = num_alloc_desc;
  139. }
  140. count = 0;
  141. while (count < num_req_buffers) {
  142. rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
  143. RX_BUFFER_SIZE,
  144. RX_BUFFER_RESERVATION,
  145. RX_BUFFER_ALIGNMENT,
  146. FALSE);
  147. if (rx_netbuf == NULL) {
  148. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  149. continue;
  150. }
  151. ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
  152. QDF_DMA_BIDIRECTIONAL);
  153. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  154. qdf_nbuf_free(rx_netbuf);
  155. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  156. continue;
  157. }
  158. paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
  159. /*
  160. * check if the physical address of nbuf->data is
  161. * less then 0x50000000 then free the nbuf and try
  162. * allocating new nbuf. We can try for 100 times.
  163. * this is a temp WAR till we fix it properly.
  164. */
  165. ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
  166. if (ret == QDF_STATUS_E_FAILURE) {
  167. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  168. break;
  169. }
  170. count++;
  171. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  172. rxdma_srng);
  173. qdf_assert_always(rxdma_ring_entry);
  174. next = (*desc_list)->next;
  175. dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
  176. (*desc_list)->rx_desc.in_use = 1;
  177. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  178. "rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
  179. rx_netbuf, qdf_nbuf_data(rx_netbuf),
  180. (unsigned long long)paddr, (*desc_list)->rx_desc.cookie);
  181. hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
  182. (*desc_list)->rx_desc.cookie,
  183. rx_desc_pool->owner);
  184. *desc_list = next;
  185. }
  186. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  187. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  188. "successfully replenished %d buffers", num_req_buffers);
  189. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  190. "%d rx desc added back to free list", num_desc_to_free);
  191. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers,
  192. (RX_BUFFER_SIZE * num_req_buffers));
  193. free_descs:
  194. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  195. /*
  196. * add any available free desc back to the free list
  197. */
  198. if (*desc_list)
  199. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  200. mac_id, rx_desc_pool);
  201. return QDF_STATUS_SUCCESS;
  202. }
  203. /*
  204. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  205. * pkts to RAW mode simulation to
  206. * decapsulate the pkt.
  207. *
  208. * @vdev: vdev on which RAW mode is enabled
  209. * @nbuf_list: list of RAW pkts to process
  210. * @peer: peer object from which the pkt is rx
  211. *
  212. * Return: void
  213. */
  214. void
  215. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  216. struct dp_peer *peer)
  217. {
  218. qdf_nbuf_t deliver_list_head = NULL;
  219. qdf_nbuf_t deliver_list_tail = NULL;
  220. qdf_nbuf_t nbuf;
  221. nbuf = nbuf_list;
  222. while (nbuf) {
  223. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  224. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  225. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  226. DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
  227. /*
  228. * reset the chfrag_start and chfrag_end bits in nbuf cb
  229. * as this is a non-amsdu pkt and RAW mode simulation expects
  230. * these bit s to be 0 for non-amsdu pkt.
  231. */
  232. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  233. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  234. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  235. qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
  236. }
  237. nbuf = next;
  238. }
  239. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  240. &deliver_list_tail, (struct cdp_peer*) peer);
  241. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  242. }
  243. #ifdef DP_LFR
  244. /*
  245. * In case of LFR, data of a new peer might be sent up
  246. * even before peer is added.
  247. */
  248. static inline struct dp_vdev *
  249. dp_get_vdev_from_peer(struct dp_soc *soc,
  250. uint16_t peer_id,
  251. struct dp_peer *peer,
  252. struct hal_rx_mpdu_desc_info mpdu_desc_info)
  253. {
  254. struct dp_vdev *vdev;
  255. uint8_t vdev_id;
  256. if (unlikely(!peer)) {
  257. if (peer_id != HTT_INVALID_PEER) {
  258. vdev_id = DP_PEER_METADATA_ID_GET(
  259. mpdu_desc_info.peer_meta_data);
  260. QDF_TRACE(QDF_MODULE_ID_DP,
  261. QDF_TRACE_LEVEL_DEBUG,
  262. FL("PeerID %d not found use vdevID %d"),
  263. peer_id, vdev_id);
  264. vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
  265. vdev_id);
  266. } else {
  267. QDF_TRACE(QDF_MODULE_ID_DP,
  268. QDF_TRACE_LEVEL_DEBUG,
  269. FL("Invalid PeerID %d"),
  270. peer_id);
  271. return NULL;
  272. }
  273. } else {
  274. vdev = peer->vdev;
  275. }
  276. return vdev;
  277. }
  278. #else
  279. static inline struct dp_vdev *
  280. dp_get_vdev_from_peer(struct dp_soc *soc,
  281. uint16_t peer_id,
  282. struct dp_peer *peer,
  283. struct hal_rx_mpdu_desc_info mpdu_desc_info)
  284. {
  285. if (unlikely(!peer)) {
  286. QDF_TRACE(QDF_MODULE_ID_DP,
  287. QDF_TRACE_LEVEL_DEBUG,
  288. FL("Peer not found for peerID %d"),
  289. peer_id);
  290. return NULL;
  291. } else {
  292. return peer->vdev;
  293. }
  294. }
  295. #endif
  296. /**
  297. * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
  298. *
  299. * @soc: core txrx main context
  300. * @sa_peer : source peer entry
  301. * @rx_tlv_hdr : start address of rx tlvs
  302. * @nbuf : nbuf that has to be intrabss forwarded
  303. *
  304. * Return: bool: true if it is forwarded else false
  305. */
  306. static bool
  307. dp_rx_intrabss_fwd(struct dp_soc *soc,
  308. struct dp_peer *sa_peer,
  309. uint8_t *rx_tlv_hdr,
  310. qdf_nbuf_t nbuf)
  311. {
  312. uint16_t da_idx;
  313. uint16_t len;
  314. struct dp_peer *da_peer;
  315. struct dp_ast_entry *ast_entry;
  316. qdf_nbuf_t nbuf_copy;
  317. /* check if the destination peer is available in peer table
  318. * and also check if the source peer and destination peer
  319. * belong to the same vap and destination peer is not bss peer.
  320. */
  321. if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) &&
  322. !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  323. da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr);
  324. ast_entry = soc->ast_table[da_idx];
  325. if (!ast_entry)
  326. return false;
  327. da_peer = ast_entry->peer;
  328. if (!da_peer)
  329. return false;
  330. if (da_peer->vdev == sa_peer->vdev && !da_peer->bss_peer) {
  331. memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
  332. len = qdf_nbuf_len(nbuf);
  333. /* linearize the nbuf just before we send to
  334. * dp_tx_send()
  335. */
  336. if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) {
  337. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  338. return false;
  339. nbuf = qdf_nbuf_unshare(nbuf);
  340. if (!nbuf) {
  341. DP_STATS_INC_PKT(sa_peer,
  342. rx.intra_bss.fail,
  343. 1,
  344. len);
  345. /* return true even though the pkt is
  346. * not forwarded. Basically skb_unshare
  347. * failed and we want to continue with
  348. * next nbuf.
  349. */
  350. return true;
  351. }
  352. }
  353. if (!dp_tx_send(sa_peer->vdev, nbuf)) {
  354. DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts,
  355. 1, len);
  356. return true;
  357. } else {
  358. DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1,
  359. len);
  360. return false;
  361. }
  362. }
  363. }
  364. /* if it is a broadcast pkt (eg: ARP) and it is not its own
  365. * source, then clone the pkt and send the cloned pkt for
  366. * intra BSS forwarding and original pkt up the network stack
  367. * Note: how do we handle multicast pkts. do we forward
  368. * all multicast pkts as is or let a higher layer module
  369. * like igmpsnoop decide whether to forward or not with
  370. * Mcast enhancement.
  371. */
  372. else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
  373. !sa_peer->bss_peer))) {
  374. nbuf_copy = qdf_nbuf_copy(nbuf);
  375. if (!nbuf_copy)
  376. return false;
  377. memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  378. len = qdf_nbuf_len(nbuf_copy);
  379. if (dp_tx_send(sa_peer->vdev, nbuf_copy)) {
  380. DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, len);
  381. qdf_nbuf_free(nbuf_copy);
  382. } else
  383. DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len);
  384. }
  385. /* return false as we have to still send the original pkt
  386. * up the stack
  387. */
  388. return false;
  389. }
  390. #ifdef MESH_MODE_SUPPORT
  391. /**
  392. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  393. *
  394. * @vdev: DP Virtual device handle
  395. * @nbuf: Buffer pointer
  396. * @rx_tlv_hdr: start of rx tlv header
  397. * @peer: pointer to peer
  398. *
  399. * This function allocated memory for mesh receive stats and fill the
  400. * required stats. Stores the memory address in skb cb.
  401. *
  402. * Return: void
  403. */
  404. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  405. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  406. {
  407. struct mesh_recv_hdr_s *rx_info = NULL;
  408. uint32_t pkt_type;
  409. uint32_t nss;
  410. uint32_t rate_mcs;
  411. uint32_t bw;
  412. /* fill recv mesh stats */
  413. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  414. /* upper layers are resposible to free this memory */
  415. if (rx_info == NULL) {
  416. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  417. "Memory allocation failed for mesh rx stats");
  418. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  419. return;
  420. }
  421. rx_info->rs_flags = MESH_RXHDR_VER1;
  422. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  423. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  424. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  425. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  426. if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
  427. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  428. rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
  429. if (vdev->osif_get_key)
  430. vdev->osif_get_key(vdev->osif_vdev,
  431. &rx_info->rs_decryptkey[0],
  432. &peer->mac_addr.raw[0],
  433. rx_info->rs_keyix);
  434. }
  435. rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
  436. rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
  437. pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
  438. rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
  439. bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
  440. nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
  441. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  442. (bw << 24);
  443. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  444. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  445. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
  446. rx_info->rs_flags,
  447. rx_info->rs_rssi,
  448. rx_info->rs_channel,
  449. rx_info->rs_ratephy1,
  450. rx_info->rs_keyix);
  451. }
  452. /**
  453. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  454. *
  455. * @vdev: DP Virtual device handle
  456. * @nbuf: Buffer pointer
  457. * @rx_tlv_hdr: start of rx tlv header
  458. *
  459. * This checks if the received packet is matching any filter out
  460. * catogery and and drop the packet if it matches.
  461. *
  462. * Return: status(0 indicates drop, 1 indicate to no drop)
  463. */
  464. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  465. uint8_t *rx_tlv_hdr)
  466. {
  467. union dp_align_mac_addr mac_addr;
  468. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  469. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  470. if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr))
  471. return QDF_STATUS_SUCCESS;
  472. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  473. if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
  474. return QDF_STATUS_SUCCESS;
  475. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  476. if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)
  477. && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
  478. return QDF_STATUS_SUCCESS;
  479. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  480. if (hal_rx_mpdu_get_addr1(rx_tlv_hdr,
  481. &mac_addr.raw[0]))
  482. return QDF_STATUS_E_FAILURE;
  483. if (!qdf_mem_cmp(&mac_addr.raw[0],
  484. &vdev->mac_addr.raw[0],
  485. DP_MAC_ADDR_LEN))
  486. return QDF_STATUS_SUCCESS;
  487. }
  488. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  489. if (hal_rx_mpdu_get_addr2(rx_tlv_hdr,
  490. &mac_addr.raw[0]))
  491. return QDF_STATUS_E_FAILURE;
  492. if (!qdf_mem_cmp(&mac_addr.raw[0],
  493. &vdev->mac_addr.raw[0],
  494. DP_MAC_ADDR_LEN))
  495. return QDF_STATUS_SUCCESS;
  496. }
  497. }
  498. return QDF_STATUS_E_FAILURE;
  499. }
  500. #else
  501. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  502. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  503. {
  504. }
  505. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  506. uint8_t *rx_tlv_hdr)
  507. {
  508. return QDF_STATUS_E_FAILURE;
  509. }
  510. #endif
  511. #ifdef CONFIG_WIN
  512. /**
  513. * dp_rx_nac_filter(): Function to perform filtering of non-associated
  514. * clients
  515. * @pdev: DP pdev handle
  516. * @rx_pkt_hdr: Rx packet Header
  517. *
  518. * return: dp_vdev*
  519. */
  520. static
  521. struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
  522. uint8_t *rx_pkt_hdr)
  523. {
  524. struct ieee80211_frame *wh;
  525. struct dp_neighbour_peer *peer = NULL;
  526. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  527. if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
  528. return NULL;
  529. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  530. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  531. neighbour_peer_list_elem) {
  532. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  533. wh->i_addr2, DP_MAC_ADDR_LEN) == 0) {
  534. QDF_TRACE(
  535. QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  536. FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
  537. peer->neighbour_peers_macaddr.raw[0],
  538. peer->neighbour_peers_macaddr.raw[1],
  539. peer->neighbour_peers_macaddr.raw[2],
  540. peer->neighbour_peers_macaddr.raw[3],
  541. peer->neighbour_peers_macaddr.raw[4],
  542. peer->neighbour_peers_macaddr.raw[5]);
  543. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  544. return pdev->monitor_vdev;
  545. }
  546. }
  547. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  548. return NULL;
  549. }
  550. /**
  551. * dp_rx_process_nac_rssi_frames(): Store RSSI for configured NAC
  552. * @pdev: DP pdev handle
  553. * @rx_tlv_hdr: tlv hdr buf
  554. *
  555. * return: None
  556. */
  557. #ifdef ATH_SUPPORT_NAC_RSSI
  558. static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr)
  559. {
  560. struct dp_vdev *vdev = NULL;
  561. struct dp_soc *soc = pdev->soc;
  562. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  563. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  564. if (pdev->nac_rssi_filtering) {
  565. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  566. if (vdev->cdp_nac_rssi_enabled &&
  567. (qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
  568. wh->i_addr1, DP_MAC_ADDR_LEN) == 0)) {
  569. QDF_TRACE(QDF_MODULE_ID_DP,
  570. QDF_TRACE_LEVEL_DEBUG, "RSSI updated");
  571. vdev->cdp_nac_rssi.vdev_id = vdev->vdev_id;
  572. vdev->cdp_nac_rssi.client_rssi =
  573. hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
  574. dp_wdi_event_handler(WDI_EVENT_NAC_RSSI, soc,
  575. (void *)&vdev->cdp_nac_rssi,
  576. HTT_INVALID_PEER, WDI_NO_VAL,
  577. pdev->pdev_id);
  578. }
  579. }
  580. }
  581. }
  582. #else
  583. static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr)
  584. {
  585. }
  586. #endif
  587. /**
  588. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  589. * @soc: DP SOC handle
  590. * @mpdu: mpdu for which peer is invalid
  591. *
  592. * return: integer type
  593. */
  594. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
  595. {
  596. struct dp_invalid_peer_msg msg;
  597. struct dp_vdev *vdev = NULL;
  598. struct dp_pdev *pdev = NULL;
  599. struct ieee80211_frame *wh;
  600. uint8_t i;
  601. qdf_nbuf_t curr_nbuf, next_nbuf;
  602. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  603. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  604. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  605. if (!DP_FRAME_IS_DATA(wh)) {
  606. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  607. "NAWDS valid only for data frames");
  608. goto free;
  609. }
  610. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  611. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  612. "Invalid nbuf length");
  613. goto free;
  614. }
  615. for (i = 0; i < MAX_PDEV_CNT; i++) {
  616. pdev = soc->pdev_list[i];
  617. if (!pdev) {
  618. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  619. "PDEV not found");
  620. continue;
  621. }
  622. if (pdev->filter_neighbour_peers) {
  623. /* Next Hop scenario not yet handle */
  624. vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
  625. if (vdev) {
  626. dp_rx_mon_deliver(soc, i,
  627. pdev->invalid_peer_head_msdu,
  628. pdev->invalid_peer_tail_msdu);
  629. pdev->invalid_peer_head_msdu = NULL;
  630. pdev->invalid_peer_tail_msdu = NULL;
  631. return 0;
  632. }
  633. }
  634. dp_rx_process_nac_rssi_frames(pdev, rx_tlv_hdr);
  635. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  636. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  637. DP_MAC_ADDR_LEN) == 0) {
  638. goto out;
  639. }
  640. }
  641. }
  642. if (!vdev) {
  643. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  644. "VDEV not found");
  645. goto free;
  646. }
  647. out:
  648. msg.wh = wh;
  649. qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
  650. msg.nbuf = mpdu;
  651. msg.vdev_id = vdev->vdev_id;
  652. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
  653. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev,
  654. &msg);
  655. free:
  656. /* Drop and free packet */
  657. curr_nbuf = mpdu;
  658. while (curr_nbuf) {
  659. next_nbuf = qdf_nbuf_next(curr_nbuf);
  660. qdf_nbuf_free(curr_nbuf);
  661. curr_nbuf = next_nbuf;
  662. }
  663. return 0;
  664. }
  665. /**
  666. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  667. * @soc: DP SOC handle
  668. * @mpdu: mpdu for which peer is invalid
  669. * @mpdu_done: if an mpdu is completed
  670. *
  671. * return: integer type
  672. */
  673. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  674. qdf_nbuf_t mpdu, bool mpdu_done)
  675. {
  676. /* Only trigger the process when mpdu is completed */
  677. if (mpdu_done)
  678. dp_rx_process_invalid_peer(soc, mpdu);
  679. }
  680. #else
  681. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
  682. {
  683. qdf_nbuf_t curr_nbuf, next_nbuf;
  684. struct dp_pdev *pdev;
  685. uint8_t i;
  686. curr_nbuf = mpdu;
  687. while (curr_nbuf) {
  688. next_nbuf = qdf_nbuf_next(curr_nbuf);
  689. /* Drop and free packet */
  690. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  691. qdf_nbuf_len(curr_nbuf));
  692. qdf_nbuf_free(curr_nbuf);
  693. curr_nbuf = next_nbuf;
  694. }
  695. /* reset the head and tail pointers */
  696. for (i = 0; i < MAX_PDEV_CNT; i++) {
  697. pdev = soc->pdev_list[i];
  698. if (!pdev) {
  699. QDF_TRACE(QDF_MODULE_ID_DP,
  700. QDF_TRACE_LEVEL_ERROR,
  701. "PDEV not found");
  702. continue;
  703. }
  704. pdev->invalid_peer_head_msdu = NULL;
  705. pdev->invalid_peer_tail_msdu = NULL;
  706. }
  707. return 0;
  708. }
  709. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  710. qdf_nbuf_t mpdu, bool mpdu_done)
  711. {
  712. /* To avoid compiler warning */
  713. mpdu_done = mpdu_done;
  714. /* Process the nbuf */
  715. dp_rx_process_invalid_peer(soc, mpdu);
  716. }
  717. #endif
  718. #if defined(FEATURE_LRO)
  719. static void dp_rx_print_lro_info(uint8_t *rx_tlv)
  720. {
  721. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  722. FL("----------------------RX DESC LRO----------------------\n"));
  723. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  724. FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
  725. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  726. FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
  727. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  728. FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv));
  729. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  730. FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
  731. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  732. FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
  733. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  734. FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
  735. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  736. FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
  737. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  738. FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
  739. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  740. FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
  741. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  742. FL("---------------------------------------------------------\n"));
  743. }
  744. /**
  745. * dp_rx_lro() - LRO related processing
  746. * @rx_tlv: TLV data extracted from the rx packet
  747. * @peer: destination peer of the msdu
  748. * @msdu: network buffer
  749. * @ctx: LRO context
  750. *
  751. * This function performs the LRO related processing of the msdu
  752. *
  753. * Return: true: LRO enabled false: LRO is not enabled
  754. */
  755. static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
  756. qdf_nbuf_t msdu, qdf_lro_ctx_t ctx)
  757. {
  758. if (!peer || !peer->vdev || !peer->vdev->lro_enable) {
  759. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  760. FL("no peer, no vdev or LRO disabled"));
  761. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0;
  762. return;
  763. }
  764. qdf_assert(rx_tlv);
  765. dp_rx_print_lro_info(rx_tlv);
  766. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
  767. HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
  768. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
  769. HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
  770. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  771. HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv);
  772. QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
  773. HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
  774. QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
  775. HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
  776. QDF_NBUF_CB_RX_TCP_WIN(msdu) =
  777. HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
  778. QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
  779. HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
  780. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
  781. HAL_RX_TLV_GET_IPV6(rx_tlv);
  782. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
  783. HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
  784. QDF_NBUF_CB_RX_FLOW_ID(msdu) =
  785. HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
  786. QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx;
  787. }
  788. #else
  789. static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
  790. qdf_nbuf_t msdu, qdf_lro_ctx_t ctx)
  791. {
  792. }
  793. #endif
  794. /**
  795. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  796. *
  797. * @nbuf: pointer to msdu.
  798. * @mpdu_len: mpdu length
  799. *
  800. * Return: returns true if nbuf is last msdu of mpdu else retuns false.
  801. */
  802. static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
  803. {
  804. bool last_nbuf;
  805. if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
  806. qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE);
  807. last_nbuf = false;
  808. } else {
  809. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
  810. last_nbuf = true;
  811. }
  812. *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN);
  813. return last_nbuf;
  814. }
  815. /**
  816. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  817. * multiple nbufs.
  818. * @nbuf: pointer to the first msdu of an amsdu.
  819. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  820. *
  821. *
  822. * This function implements the creation of RX frag_list for cases
  823. * where an MSDU is spread across multiple nbufs.
  824. *
  825. * Return: returns the head nbuf which contains complete frag_list.
  826. */
  827. qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  828. {
  829. qdf_nbuf_t parent, next, frag_list;
  830. uint16_t frag_list_len = 0;
  831. uint16_t mpdu_len;
  832. bool last_nbuf;
  833. mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  834. /*
  835. * this is a case where the complete msdu fits in one single nbuf.
  836. * in this case HW sets both start and end bit and we only need to
  837. * reset these bits for RAW mode simulator to decap the pkt
  838. */
  839. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  840. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  841. qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
  842. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  843. return nbuf;
  844. }
  845. /*
  846. * This is a case where we have multiple msdus (A-MSDU) spread across
  847. * multiple nbufs. here we create a fraglist out of these nbufs.
  848. *
  849. * the moment we encounter a nbuf with continuation bit set we
  850. * know for sure we have an MSDU which is spread across multiple
  851. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  852. */
  853. parent = nbuf;
  854. frag_list = nbuf->next;
  855. nbuf = nbuf->next;
  856. /*
  857. * set the start bit in the first nbuf we encounter with continuation
  858. * bit set. This has the proper mpdu length set as it is the first
  859. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  860. * nbufs will form the frag_list of the parent nbuf.
  861. */
  862. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  863. last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
  864. /*
  865. * this is where we set the length of the fragments which are
  866. * associated to the parent nbuf. We iterate through the frag_list
  867. * till we hit the last_nbuf of the list.
  868. */
  869. do {
  870. last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
  871. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  872. frag_list_len += qdf_nbuf_len(nbuf);
  873. if (last_nbuf) {
  874. next = nbuf->next;
  875. nbuf->next = NULL;
  876. break;
  877. }
  878. nbuf = nbuf->next;
  879. } while (!last_nbuf);
  880. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  881. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  882. parent->next = next;
  883. qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
  884. return parent;
  885. }
  886. static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
  887. struct dp_peer *peer,
  888. qdf_nbuf_t nbuf_head,
  889. qdf_nbuf_t nbuf_tail)
  890. {
  891. /*
  892. * highly unlikely to have a vdev without a registered rx
  893. * callback function. if so let us free the nbuf_list.
  894. */
  895. if (qdf_unlikely(!vdev->osif_rx)) {
  896. qdf_nbuf_t nbuf;
  897. do {
  898. nbuf = nbuf_head;
  899. nbuf_head = nbuf_head->next;
  900. qdf_nbuf_free(nbuf);
  901. } while (nbuf_head);
  902. return;
  903. }
  904. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  905. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  906. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  907. &nbuf_tail, (struct cdp_peer *) peer);
  908. }
  909. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  910. }
  911. /**
  912. * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
  913. * @nbuf: pointer to the first msdu of an amsdu.
  914. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  915. *
  916. * The ipsumed field of the skb is set based on whether HW validated the
  917. * IP/TCP/UDP checksum.
  918. *
  919. * Return: void
  920. */
  921. static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
  922. qdf_nbuf_t nbuf,
  923. uint8_t *rx_tlv_hdr)
  924. {
  925. qdf_nbuf_rx_cksum_t cksum = {0};
  926. bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
  927. bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
  928. if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
  929. cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
  930. qdf_nbuf_set_rx_cksum(nbuf, &cksum);
  931. } else {
  932. DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
  933. DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
  934. }
  935. }
  936. /**
  937. * dp_rx_msdu_stats_update() - update per msdu stats.
  938. * @soc: core txrx main context
  939. * @nbuf: pointer to the first msdu of an amsdu.
  940. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  941. * @peer: pointer to the peer object.
  942. * @ring_id: reo dest ring number on which pkt is reaped.
  943. *
  944. * update all the per msdu stats for that nbuf.
  945. * Return: void
  946. */
  947. static void dp_rx_msdu_stats_update(struct dp_soc *soc,
  948. qdf_nbuf_t nbuf,
  949. uint8_t *rx_tlv_hdr,
  950. struct dp_peer *peer,
  951. uint8_t ring_id)
  952. {
  953. bool is_ampdu, is_not_amsdu;
  954. uint16_t peer_id;
  955. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  956. struct dp_vdev *vdev = peer->vdev;
  957. struct ether_header *eh;
  958. uint16_t msdu_len = qdf_nbuf_len(nbuf);
  959. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  960. hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr));
  961. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  962. qdf_nbuf_is_rx_chfrag_end(nbuf);
  963. DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
  964. DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
  965. DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
  966. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
  967. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  968. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  969. if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) {
  970. DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
  971. } else {
  972. DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
  973. }
  974. }
  975. /*
  976. * currently we can return from here as we have similar stats
  977. * updated at per ppdu level instead of msdu level
  978. */
  979. if (!soc->process_rx_status)
  980. return;
  981. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
  982. DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
  983. DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
  984. sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
  985. mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
  986. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  987. bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
  988. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  989. rx_tlv_hdr);
  990. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  991. pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
  992. /* Save tid to skb->priority */
  993. DP_RX_TID_SAVE(nbuf, tid);
  994. DP_STATS_INC(peer, rx.bw[bw], 1);
  995. DP_STATS_INC(peer, rx.nss[nss], 1);
  996. DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
  997. DP_STATS_INCC(peer, rx.err.mic_err, 1,
  998. hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
  999. DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
  1000. hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
  1001. DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
  1002. DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
  1003. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
  1004. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  1005. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1006. ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  1007. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
  1008. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  1009. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1010. ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  1011. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
  1012. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  1013. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1014. ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  1015. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
  1016. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  1017. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1018. ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  1019. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1,
  1020. ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
  1021. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1022. ((mcs <= MAX_MCS) && (pkt_type == DOT11_AX)));
  1023. if ((soc->process_rx_status) &&
  1024. hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
  1025. if (soc->cdp_soc.ol_ops->update_dp_stats) {
  1026. soc->cdp_soc.ol_ops->update_dp_stats(
  1027. vdev->pdev->ctrl_pdev,
  1028. &peer->stats,
  1029. peer_id,
  1030. UPDATE_PEER_STATS);
  1031. }
  1032. }
  1033. }
  1034. #ifdef WDS_VENDOR_EXTENSION
  1035. int dp_wds_rx_policy_check(
  1036. uint8_t *rx_tlv_hdr,
  1037. struct dp_vdev *vdev,
  1038. struct dp_peer *peer,
  1039. int rx_mcast
  1040. )
  1041. {
  1042. struct dp_peer *bss_peer;
  1043. int fr_ds, to_ds, rx_3addr, rx_4addr;
  1044. int rx_policy_ucast, rx_policy_mcast;
  1045. if (vdev->opmode == wlan_op_mode_ap) {
  1046. TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) {
  1047. if (bss_peer->bss_peer) {
  1048. /* if wds policy check is not enabled on this vdev, accept all frames */
  1049. if (!bss_peer->wds_ecm.wds_rx_filter) {
  1050. return 1;
  1051. }
  1052. break;
  1053. }
  1054. }
  1055. rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr;
  1056. rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr;
  1057. } else { /* sta mode */
  1058. if (!peer->wds_ecm.wds_rx_filter) {
  1059. return 1;
  1060. }
  1061. rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr;
  1062. rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr;
  1063. }
  1064. /* ------------------------------------------------
  1065. * self
  1066. * peer- rx rx-
  1067. * wds ucast mcast dir policy accept note
  1068. * ------------------------------------------------
  1069. * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
  1070. * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
  1071. * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
  1072. * 1 1 0 00 x1 0 bad frame, won't see it
  1073. * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
  1074. * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
  1075. * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
  1076. * 1 0 1 00 1x 0 bad frame, won't see it
  1077. * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
  1078. * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
  1079. * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
  1080. * 1 1 0 00 x0 0 bad frame, won't see it
  1081. * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
  1082. * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
  1083. * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
  1084. * 1 0 1 00 0x 0 bad frame, won't see it
  1085. *
  1086. * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode.
  1087. * 0 x x 01 xx 1
  1088. * 0 x x 10 xx 0
  1089. * 0 x x 00 xx 0 bad frame, won't see it
  1090. * ------------------------------------------------
  1091. */
  1092. fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
  1093. to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
  1094. rx_3addr = fr_ds ^ to_ds;
  1095. rx_4addr = fr_ds & to_ds;
  1096. if (vdev->opmode == wlan_op_mode_ap) {
  1097. if ((!peer->wds_enabled && rx_3addr && to_ds) ||
  1098. (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) ||
  1099. (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) {
  1100. return 1;
  1101. }
  1102. } else { /* sta mode */
  1103. if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
  1104. (rx_mcast && (rx_4addr == rx_policy_mcast))) {
  1105. return 1;
  1106. }
  1107. }
  1108. return 0;
  1109. }
  1110. #else
  1111. int dp_wds_rx_policy_check(
  1112. uint8_t *rx_tlv_hdr,
  1113. struct dp_vdev *vdev,
  1114. struct dp_peer *peer,
  1115. int rx_mcast
  1116. )
  1117. {
  1118. return 1;
  1119. }
  1120. #endif
  1121. /**
  1122. * dp_rx_process() - Brain of the Rx processing functionality
  1123. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  1124. * @soc: core txrx main context
  1125. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  1126. * @quota: No. of units (packets) that can be serviced in one shot.
  1127. *
  1128. * This function implements the core of Rx functionality. This is
  1129. * expected to handle only non-error frames.
  1130. *
  1131. * Return: uint32_t: No. of elements processed
  1132. */
  1133. uint32_t
  1134. dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
  1135. {
  1136. void *hal_soc;
  1137. void *ring_desc;
  1138. struct dp_rx_desc *rx_desc = NULL;
  1139. qdf_nbuf_t nbuf, next;
  1140. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1141. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1142. uint32_t rx_bufs_used = 0, rx_buf_cookie;
  1143. uint32_t l2_hdr_offset = 0;
  1144. uint16_t msdu_len = 0;
  1145. uint16_t peer_id;
  1146. struct dp_peer *peer = NULL;
  1147. struct dp_vdev *vdev = NULL;
  1148. uint32_t pkt_len = 0;
  1149. struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
  1150. struct hal_rx_msdu_desc_info msdu_desc_info = { 0 };
  1151. enum hal_reo_error_status error;
  1152. uint32_t peer_mdata;
  1153. uint8_t *rx_tlv_hdr;
  1154. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1155. uint8_t mac_id = 0;
  1156. struct dp_pdev *pdev;
  1157. struct dp_srng *dp_rxdma_srng;
  1158. struct rx_desc_pool *rx_desc_pool;
  1159. struct dp_soc *soc = int_ctx->soc;
  1160. uint8_t ring_id = 0;
  1161. uint8_t core_id = 0;
  1162. qdf_nbuf_t nbuf_head = NULL;
  1163. qdf_nbuf_t nbuf_tail = NULL;
  1164. qdf_nbuf_t deliver_list_head = NULL;
  1165. qdf_nbuf_t deliver_list_tail = NULL;
  1166. DP_HIST_INIT();
  1167. /* Debug -- Remove later */
  1168. qdf_assert(soc && hal_ring);
  1169. hal_soc = soc->hal_soc;
  1170. /* Debug -- Remove later */
  1171. qdf_assert(hal_soc);
  1172. hif_pm_runtime_mark_last_busy(soc->osdev->dev);
  1173. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  1174. /*
  1175. * Need API to convert from hal_ring pointer to
  1176. * Ring Type / Ring Id combo
  1177. */
  1178. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1179. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1180. FL("HAL RING Access Failed -- %pK"), hal_ring);
  1181. hal_srng_access_end(hal_soc, hal_ring);
  1182. goto done;
  1183. }
  1184. /*
  1185. * start reaping the buffers from reo ring and queue
  1186. * them in per vdev queue.
  1187. * Process the received pkts in a different per vdev loop.
  1188. */
  1189. while (qdf_likely(quota)) {
  1190. ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring);
  1191. /*
  1192. * in case HW has updated hp after we cached the hp
  1193. * ring_desc can be NULL even there are entries
  1194. * available in the ring. Update the cached_hp
  1195. * and reap the buffers available to read complete
  1196. * mpdu in one reap
  1197. *
  1198. * This is needed for RAW mode we have to read all
  1199. * msdus corresponding to amsdu in one reap to create
  1200. * SG list properly but due to mismatch in cached_hp
  1201. * and actual hp sometimes we are unable to read
  1202. * complete mpdu in one reap.
  1203. */
  1204. if (qdf_unlikely(!ring_desc)) {
  1205. hal_srng_access_start_unlocked(hal_soc, hal_ring);
  1206. ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring);
  1207. if (!ring_desc)
  1208. break;
  1209. }
  1210. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1211. ring_id = hal_srng_ring_id_get(hal_ring);
  1212. if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
  1213. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1214. FL("HAL RING 0x%pK:error %d"), hal_ring, error);
  1215. DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
  1216. /* Don't know how to deal with this -- assert */
  1217. qdf_assert(0);
  1218. }
  1219. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1220. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1221. qdf_assert(rx_desc);
  1222. rx_bufs_reaped[rx_desc->pool_id]++;
  1223. /* TODO */
  1224. /*
  1225. * Need a separate API for unmapping based on
  1226. * phyiscal address
  1227. */
  1228. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  1229. QDF_DMA_BIDIRECTIONAL);
  1230. core_id = smp_processor_id();
  1231. DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
  1232. /* Get MPDU DESC info */
  1233. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1234. hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf),
  1235. mpdu_desc_info.peer_meta_data);
  1236. /* Get MSDU DESC info */
  1237. hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
  1238. /*
  1239. * save msdu flags first, last and continuation msdu in
  1240. * nbuf->cb
  1241. */
  1242. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  1243. qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
  1244. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  1245. qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
  1246. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  1247. qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
  1248. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1249. /*
  1250. * if continuation bit is set then we have MSDU spread
  1251. * across multiple buffers, let us not decrement quota
  1252. * till we reap all buffers of that MSDU.
  1253. */
  1254. if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
  1255. quota -= 1;
  1256. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1257. &tail[rx_desc->pool_id],
  1258. rx_desc);
  1259. }
  1260. done:
  1261. hal_srng_access_end(hal_soc, hal_ring);
  1262. /* Update histogram statistics by looping through pdev's */
  1263. DP_RX_HIST_STATS_PER_PDEV();
  1264. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1265. /*
  1266. * continue with next mac_id if no pkts were reaped
  1267. * from that pool
  1268. */
  1269. if (!rx_bufs_reaped[mac_id])
  1270. continue;
  1271. pdev = soc->pdev_list[mac_id];
  1272. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1273. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1274. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1275. rx_desc_pool, rx_bufs_reaped[mac_id],
  1276. &head[mac_id], &tail[mac_id]);
  1277. }
  1278. /* Peer can be NULL is case of LFR */
  1279. if (qdf_likely(peer != NULL))
  1280. vdev = NULL;
  1281. /*
  1282. * BIG loop where each nbuf is dequeued from global queue,
  1283. * processed and queued back on a per vdev basis. These nbufs
  1284. * are sent to stack as and when we run out of nbufs
  1285. * or a new nbuf dequeued from global queue has a different
  1286. * vdev when compared to previous nbuf.
  1287. */
  1288. nbuf = nbuf_head;
  1289. while (nbuf) {
  1290. next = nbuf->next;
  1291. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1292. /*
  1293. * Check if DMA completed -- msdu_done is the last bit
  1294. * to be written
  1295. */
  1296. if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
  1297. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1298. FL("MSDU DONE failure"));
  1299. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1300. QDF_TRACE_LEVEL_INFO);
  1301. qdf_assert(0);
  1302. }
  1303. peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr);
  1304. peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
  1305. peer = dp_peer_find_by_id(soc, peer_id);
  1306. if (peer) {
  1307. QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
  1308. qdf_dp_trace_set_track(nbuf, QDF_RX);
  1309. QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
  1310. QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
  1311. QDF_NBUF_RX_PKT_DATA_TRACK;
  1312. }
  1313. rx_bufs_used++;
  1314. if (deliver_list_head && peer && (vdev != peer->vdev)) {
  1315. dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
  1316. deliver_list_tail);
  1317. deliver_list_head = NULL;
  1318. deliver_list_tail = NULL;
  1319. }
  1320. if (qdf_likely(peer != NULL)) {
  1321. vdev = peer->vdev;
  1322. } else {
  1323. qdf_nbuf_free(nbuf);
  1324. nbuf = next;
  1325. continue;
  1326. }
  1327. if (qdf_unlikely(vdev == NULL)) {
  1328. qdf_nbuf_free(nbuf);
  1329. nbuf = next;
  1330. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1331. continue;
  1332. }
  1333. DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
  1334. /*
  1335. * First IF condition:
  1336. * 802.11 Fragmented pkts are reinjected to REO
  1337. * HW block as SG pkts and for these pkts we only
  1338. * need to pull the RX TLVS header length.
  1339. * Second IF condition:
  1340. * The below condition happens when an MSDU is spread
  1341. * across multiple buffers. This can happen in two cases
  1342. * 1. The nbuf size is smaller then the received msdu.
  1343. * ex: we have set the nbuf size to 2048 during
  1344. * nbuf_alloc. but we received an msdu which is
  1345. * 2304 bytes in size then this msdu is spread
  1346. * across 2 nbufs.
  1347. *
  1348. * 2. AMSDUs when RAW mode is enabled.
  1349. * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
  1350. * across 1st nbuf and 2nd nbuf and last MSDU is
  1351. * spread across 2nd nbuf and 3rd nbuf.
  1352. *
  1353. * for these scenarios let us create a skb frag_list and
  1354. * append these buffers till the last MSDU of the AMSDU
  1355. * Third condition:
  1356. * This is the most likely case, we receive 802.3 pkts
  1357. * decapsulated by HW, here we need to set the pkt length.
  1358. */
  1359. if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf)))
  1360. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  1361. else if (qdf_unlikely(vdev->rx_decap_type ==
  1362. htt_cmn_pkt_type_raw)) {
  1363. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  1364. nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr);
  1365. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  1366. DP_STATS_INC_PKT(peer, rx.raw, 1,
  1367. msdu_len);
  1368. next = nbuf->next;
  1369. } else {
  1370. l2_hdr_offset =
  1371. hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  1372. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  1373. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  1374. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1375. qdf_nbuf_pull_head(nbuf,
  1376. RX_PKT_TLVS_LEN +
  1377. l2_hdr_offset);
  1378. }
  1379. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer,
  1380. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  1381. QDF_TRACE(QDF_MODULE_ID_DP,
  1382. QDF_TRACE_LEVEL_ERROR,
  1383. FL("Policy Check Drop pkt"));
  1384. /* Drop & free packet */
  1385. qdf_nbuf_free(nbuf);
  1386. /* Statistics */
  1387. nbuf = next;
  1388. continue;
  1389. }
  1390. if (qdf_unlikely(peer && peer->bss_peer)) {
  1391. QDF_TRACE(QDF_MODULE_ID_DP,
  1392. QDF_TRACE_LEVEL_ERROR,
  1393. FL("received pkt with same src MAC"));
  1394. DP_STATS_INC(vdev->pdev, dropped.mec, 1);
  1395. /* Drop & free packet */
  1396. qdf_nbuf_free(nbuf);
  1397. /* Statistics */
  1398. nbuf = next;
  1399. continue;
  1400. }
  1401. if (qdf_unlikely(peer && (peer->nawds_enabled == true) &&
  1402. (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) &&
  1403. (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) {
  1404. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  1405. qdf_nbuf_free(nbuf);
  1406. nbuf = next;
  1407. continue;
  1408. }
  1409. dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
  1410. dp_set_rx_queue(nbuf, ring_id);
  1411. /*
  1412. * HW structures call this L3 header padding --
  1413. * even though this is actually the offset from
  1414. * the buffer beginning where the L2 header
  1415. * begins.
  1416. */
  1417. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1418. FL("rxhash: flow id toeplitz: 0x%x\n"),
  1419. hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr));
  1420. dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id);
  1421. if (qdf_unlikely(vdev->mesh_vdev)) {
  1422. if (dp_rx_filter_mesh_packets(vdev, nbuf,
  1423. rx_tlv_hdr)
  1424. == QDF_STATUS_SUCCESS) {
  1425. QDF_TRACE(QDF_MODULE_ID_DP,
  1426. QDF_TRACE_LEVEL_INFO_MED,
  1427. FL("mesh pkt filtered"));
  1428. DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
  1429. 1);
  1430. qdf_nbuf_free(nbuf);
  1431. nbuf = next;
  1432. continue;
  1433. }
  1434. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  1435. }
  1436. #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */
  1437. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1438. "p_id %d msdu_len %d hdr_off %d",
  1439. peer_id, msdu_len, l2_hdr_offset);
  1440. print_hex_dump(KERN_ERR,
  1441. "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
  1442. qdf_nbuf_data(nbuf), 128, false);
  1443. #endif /* NAPIER_EMULATION */
  1444. if (qdf_likely(vdev->rx_decap_type ==
  1445. htt_cmn_pkt_type_ethernet) &&
  1446. (qdf_likely(!vdev->mesh_vdev)) &&
  1447. (vdev->wds_enabled)) {
  1448. /* WDS Source Port Learning */
  1449. dp_rx_wds_srcport_learn(soc,
  1450. rx_tlv_hdr,
  1451. peer,
  1452. nbuf);
  1453. /* Intrabss-fwd */
  1454. if (dp_rx_check_ap_bridge(vdev))
  1455. if (dp_rx_intrabss_fwd(soc,
  1456. peer,
  1457. rx_tlv_hdr,
  1458. nbuf)) {
  1459. nbuf = next;
  1460. continue; /* Get next desc */
  1461. }
  1462. }
  1463. dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx);
  1464. DP_RX_LIST_APPEND(deliver_list_head,
  1465. deliver_list_tail,
  1466. nbuf);
  1467. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  1468. qdf_nbuf_len(nbuf));
  1469. nbuf = next;
  1470. }
  1471. if (deliver_list_head)
  1472. dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
  1473. deliver_list_tail);
  1474. return rx_bufs_used; /* Assume no scale factor for now */
  1475. }
  1476. /**
  1477. * dp_rx_detach() - detach dp rx
  1478. * @pdev: core txrx pdev context
  1479. *
  1480. * This function will detach DP RX into main device context
  1481. * will free DP Rx resources.
  1482. *
  1483. * Return: void
  1484. */
  1485. void
  1486. dp_rx_pdev_detach(struct dp_pdev *pdev)
  1487. {
  1488. uint8_t pdev_id = pdev->pdev_id;
  1489. struct dp_soc *soc = pdev->soc;
  1490. struct rx_desc_pool *rx_desc_pool;
  1491. rx_desc_pool = &soc->rx_desc_buf[pdev_id];
  1492. if (rx_desc_pool->pool_size != 0) {
  1493. dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
  1494. }
  1495. return;
  1496. }
  1497. /**
  1498. * dp_rx_attach() - attach DP RX
  1499. * @pdev: core txrx pdev context
  1500. *
  1501. * This function will attach a DP RX instance into the main
  1502. * device (SOC) context. Will allocate dp rx resource and
  1503. * initialize resources.
  1504. *
  1505. * Return: QDF_STATUS_SUCCESS: success
  1506. * QDF_STATUS_E_RESOURCES: Error return
  1507. */
  1508. QDF_STATUS
  1509. dp_rx_pdev_attach(struct dp_pdev *pdev)
  1510. {
  1511. uint8_t pdev_id = pdev->pdev_id;
  1512. struct dp_soc *soc = pdev->soc;
  1513. struct dp_srng rxdma_srng;
  1514. uint32_t rxdma_entries;
  1515. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1516. union dp_rx_desc_list_elem_t *tail = NULL;
  1517. struct dp_srng *dp_rxdma_srng;
  1518. struct rx_desc_pool *rx_desc_pool;
  1519. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  1520. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1521. "nss-wifi<4> skip Rx refil %d", pdev_id);
  1522. return QDF_STATUS_SUCCESS;
  1523. }
  1524. pdev = soc->pdev_list[pdev_id];
  1525. rxdma_srng = pdev->rx_refill_buf_ring;
  1526. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  1527. rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize(
  1528. soc->hal_soc, RXDMA_BUF);
  1529. rx_desc_pool = &soc->rx_desc_buf[pdev_id];
  1530. dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool);
  1531. rx_desc_pool->owner = DP_WBM2SW_RBM;
  1532. /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
  1533. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1534. dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool,
  1535. 0, &desc_list, &tail);
  1536. return QDF_STATUS_SUCCESS;
  1537. }
  1538. /*
  1539. * dp_rx_nbuf_prepare() - prepare RX nbuf
  1540. * @soc: core txrx main context
  1541. * @pdev: core txrx pdev context
  1542. *
  1543. * This function alloc & map nbuf for RX dma usage, retry it if failed
  1544. * until retry times reaches max threshold or succeeded.
  1545. *
  1546. * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
  1547. */
  1548. qdf_nbuf_t
  1549. dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
  1550. {
  1551. uint8_t *buf;
  1552. int32_t nbuf_retry_count;
  1553. QDF_STATUS ret;
  1554. qdf_nbuf_t nbuf = NULL;
  1555. for (nbuf_retry_count = 0; nbuf_retry_count <
  1556. QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
  1557. nbuf_retry_count++) {
  1558. /* Allocate a new skb */
  1559. nbuf = qdf_nbuf_alloc(soc->osdev,
  1560. RX_BUFFER_SIZE,
  1561. RX_BUFFER_RESERVATION,
  1562. RX_BUFFER_ALIGNMENT,
  1563. FALSE);
  1564. if (nbuf == NULL) {
  1565. DP_STATS_INC(pdev,
  1566. replenish.nbuf_alloc_fail, 1);
  1567. continue;
  1568. }
  1569. buf = qdf_nbuf_data(nbuf);
  1570. memset(buf, 0, RX_BUFFER_SIZE);
  1571. ret = qdf_nbuf_map_single(soc->osdev, nbuf,
  1572. QDF_DMA_BIDIRECTIONAL);
  1573. /* nbuf map failed */
  1574. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  1575. qdf_nbuf_free(nbuf);
  1576. DP_STATS_INC(pdev, replenish.map_err, 1);
  1577. continue;
  1578. }
  1579. /* qdf_nbuf alloc and map succeeded */
  1580. break;
  1581. }
  1582. /* qdf_nbuf still alloc or map failed */
  1583. if (qdf_unlikely(nbuf_retry_count >=
  1584. QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
  1585. return NULL;
  1586. return nbuf;
  1587. }