ol_rx.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493
  1. /*
  2. * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
  27. #include <cdf_util.h> /* cdf_os_cpu_to_le64 */
  28. #include <cdf_types.h> /* bool */
  29. #include <cds_ieee80211_common.h> /* ieee80211_frame */
  30. /* external API header files */
  31. #include <ol_ctrl_txrx_api.h> /* ol_rx_notify */
  32. #include <ol_htt_api.h> /* htt_pdev_handle */
  33. #include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
  34. #include <ol_txrx_htt_api.h> /* ol_rx_indication_handler */
  35. #include <ol_htt_rx_api.h> /* htt_rx_peer_id, etc. */
  36. /* internal API header files */
  37. #include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
  38. #include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
  39. #include <ol_rx_reorder.h> /* ol_rx_reorder_store, etc. */
  40. #include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_UPDATE */
  41. #include <ol_rx_defrag.h> /* ol_rx_defrag_waitlist_flush */
  42. #include <ol_txrx_internal.h>
  43. #include <wdi_event.h>
  44. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  45. #include <ol_txrx_encap.h> /* ol_rx_decap_info_t, etc */
  46. #endif
  47. /* FIX THIS: txrx should not include private header files of other modules */
  48. #include <htt_types.h>
  49. #include <ol_if_athvar.h>
  50. #include <enet.h> /* ethernet + SNAP/LLC header defs and
  51. ethertype values */
  52. #include <ip_prot.h> /* IP protocol values */
  53. #include <ipv4.h> /* IPv4 header defs */
  54. #include <ipv6_defs.h> /* IPv6 header defs */
  55. #include <ol_vowext_dbg_defs.h>
  56. #include <ol_txrx_osif_api.h>
  57. #include <wma.h>
  58. #ifdef HTT_RX_RESTORE
  59. #if defined(CONFIG_CNSS)
  60. #include <net/cnss.h>
  61. #endif
  62. #endif
  63. void ol_rx_data_process(struct ol_txrx_peer_t *peer,
  64. cdf_nbuf_t rx_buf_list);
  65. #ifdef HTT_RX_RESTORE
  66. static void ol_rx_restore_handler(struct work_struct *htt_rx)
  67. {
  68. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
  69. "Enter: %s", __func__);
  70. cnss_device_self_recovery();
  71. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
  72. "Exit: %s", __func__);
  73. }
  74. static DECLARE_WORK(ol_rx_restore_work, ol_rx_restore_handler);
  75. void ol_rx_trigger_restore(htt_pdev_handle htt_pdev, cdf_nbuf_t head_msdu,
  76. cdf_nbuf_t tail_msdu)
  77. {
  78. cdf_nbuf_t next;
  79. while (head_msdu) {
  80. next = cdf_nbuf_next(head_msdu);
  81. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
  82. "freeing %p\n", head_msdu);
  83. cdf_nbuf_free(head_msdu);
  84. head_msdu = next;
  85. }
  86. if (!htt_pdev->rx_ring.htt_rx_restore) {
  87. cds_set_recovery_in_progress(true);
  88. htt_pdev->rx_ring.htt_rx_restore = 1;
  89. schedule_work(&ol_rx_restore_work);
  90. }
  91. }
  92. #endif
  93. static void ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,
  94. void *rx_mpdu_desc, cdf_nbuf_t msdu)
  95. {
  96. uint8_t a1[IEEE80211_ADDR_LEN];
  97. htt_pdev_handle htt_pdev = pdev->htt_pdev;
  98. struct ol_txrx_vdev_t *vdev = NULL;
  99. struct ieee80211_frame *wh;
  100. struct wdi_event_rx_peer_invalid_msg msg;
  101. wh = (struct ieee80211_frame *)
  102. htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev, rx_mpdu_desc);
  103. /*
  104. * Klocwork issue #6152
  105. * All targets that send a "INVALID_PEER" rx status provide a
  106. * 802.11 header for each rx MPDU, so it is certain that
  107. * htt_rx_mpdu_wifi_hdr_retrieve will succeed.
  108. * However, both for robustness, e.g. if this function is given a
  109. * MSDU descriptor rather than a MPDU descriptor, and to make it
  110. * clear to static analysis that this code is safe, add an explicit
  111. * check that htt_rx_mpdu_wifi_hdr_retrieve provides a non-NULL value.
  112. */
  113. if (wh == NULL || !IEEE80211_IS_DATA(wh))
  114. return;
  115. /* ignore frames for non-existent bssids */
  116. cdf_mem_copy(a1, wh->i_addr1, IEEE80211_ADDR_LEN);
  117. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  118. if (cdf_mem_compare(a1, vdev->mac_addr.raw, IEEE80211_ADDR_LEN)
  119. == 0) {
  120. break;
  121. }
  122. }
  123. if (!vdev)
  124. return;
  125. msg.wh = wh;
  126. msg.msdu = msdu;
  127. msg.vdev_id = vdev->vdev_id;
  128. #ifdef WDI_EVENT_ENABLE
  129. wdi_event_handler(WDI_EVENT_RX_PEER_INVALID, pdev, &msg);
  130. #endif
  131. }
  132. #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
  133. static inline int16_t
  134. ol_rx_rssi_avg(struct ol_txrx_pdev_t *pdev, int16_t rssi_old, int16_t rssi_new)
  135. {
  136. int rssi_old_weight;
  137. if (rssi_new == HTT_RSSI_INVALID)
  138. return rssi_old;
  139. if (rssi_old == HTT_RSSI_INVALID)
  140. return rssi_new;
  141. rssi_old_weight =
  142. (1 << pdev->rssi_update_shift) - pdev->rssi_new_weight;
  143. return (rssi_new * pdev->rssi_new_weight +
  144. rssi_old * rssi_old_weight) >> pdev->rssi_update_shift;
  145. }
  146. static void
  147. ol_rx_ind_rssi_update(struct ol_txrx_peer_t *peer, cdf_nbuf_t rx_ind_msg)
  148. {
  149. struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
  150. peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
  151. htt_rx_ind_rssi_dbm(pdev->htt_pdev,
  152. rx_ind_msg));
  153. }
  154. static void
  155. ol_rx_mpdu_rssi_update(struct ol_txrx_peer_t *peer, void *rx_mpdu_desc)
  156. {
  157. struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
  158. if (!peer)
  159. return;
  160. peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
  161. htt_rx_mpdu_desc_rssi_dbm(
  162. pdev->htt_pdev,
  163. rx_mpdu_desc));
  164. }
  165. #else
  166. #define ol_rx_ind_rssi_update(peer, rx_ind_msg) /* no-op */
  167. #define ol_rx_mpdu_rssi_update(peer, rx_mpdu_desc) /* no-op */
  168. #endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
  169. void discard_msdus(htt_pdev_handle htt_pdev,
  170. cdf_nbuf_t head_msdu,
  171. cdf_nbuf_t tail_msdu)
  172. {
  173. while (1) {
  174. cdf_nbuf_t next;
  175. next = cdf_nbuf_next(
  176. head_msdu);
  177. htt_rx_desc_frame_free
  178. (htt_pdev,
  179. head_msdu);
  180. if (head_msdu ==
  181. tail_msdu) {
  182. break;
  183. }
  184. head_msdu = next;
  185. }
  186. return;
  187. }
  188. void chain_msdus(htt_pdev_handle htt_pdev,
  189. cdf_nbuf_t head_msdu,
  190. cdf_nbuf_t tail_msdu)
  191. {
  192. while (1) {
  193. cdf_nbuf_t next;
  194. next = cdf_nbuf_next(head_msdu);
  195. htt_rx_desc_frame_free(
  196. htt_pdev,
  197. head_msdu);
  198. if (head_msdu == tail_msdu)
  199. break;
  200. head_msdu = next;
  201. }
  202. return;
  203. }
  204. void process_reorder(ol_txrx_pdev_handle pdev,
  205. void *rx_mpdu_desc,
  206. uint8_t tid,
  207. struct ol_txrx_peer_t *peer,
  208. cdf_nbuf_t head_msdu,
  209. cdf_nbuf_t tail_msdu,
  210. int num_mpdu_ranges,
  211. int num_pdus,
  212. bool rx_ind_release
  213. )
  214. {
  215. htt_pdev_handle htt_pdev = pdev->htt_pdev;
  216. enum htt_rx_status mpdu_status;
  217. int reorder_idx;
  218. reorder_idx = htt_rx_mpdu_desc_reorder_idx(htt_pdev, rx_mpdu_desc);
  219. OL_RX_REORDER_TRACE_ADD(pdev, tid,
  220. reorder_idx,
  221. htt_rx_mpdu_desc_seq_num(htt_pdev,
  222. rx_mpdu_desc),
  223. 1);
  224. ol_rx_mpdu_rssi_update(peer, rx_mpdu_desc);
  225. /*
  226. * In most cases, out-of-bounds and duplicate sequence number detection
  227. * is performed by the target, but in some cases it is done by the host.
  228. * Specifically, the host does rx out-of-bounds sequence number
  229. * detection for:
  230. * 1. Peregrine or Rome target
  231. * for peer-TIDs that do not have aggregation enabled, if the
  232. * RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK flag
  233. * is set during the driver build.
  234. * 2. Riva-family targets, which have rx reorder timeouts handled by
  235. * the host rather than the target.
  236. * (The target already does duplicate detection, but the host
  237. * may have given up waiting for a particular sequence number before
  238. * it arrives. In this case, the out-of-bounds sequence number
  239. * of the late frame allows the host to discard it, rather than
  240. * sending it out of order.
  241. */
  242. mpdu_status = OL_RX_SEQ_NUM_CHECK(pdev,
  243. peer,
  244. tid,
  245. rx_mpdu_desc);
  246. if (mpdu_status != htt_rx_status_ok) {
  247. /*
  248. * If the sequence number was out of bounds, the MPDU needs
  249. * to be discarded.
  250. */
  251. discard_msdus(htt_pdev, head_msdu, tail_msdu);
  252. /*
  253. * For Peregrine and Rome,
  254. * OL_RX_REORDER_SEQ_NUM_CHECK should only fail for the case
  255. * of (duplicate) non-aggregates.
  256. *
  257. * For Riva, Pronto and Northstar,
  258. * there should be only one MPDU delivered at a time.
  259. * Thus, there are no further MPDUs that need to be
  260. * processed here.
  261. * Just to be sure this is true, check the assumption
  262. * that this was the only MPDU referenced by the rx
  263. * indication.
  264. */
  265. TXRX_ASSERT2((num_mpdu_ranges == 1) && num_mpdus == 1);
  266. /*
  267. * The MPDU was not stored in the rx reorder array, so
  268. * there's nothing to release.
  269. */
  270. rx_ind_release = false;
  271. } else {
  272. ol_rx_reorder_store(pdev, peer, tid,
  273. reorder_idx, head_msdu, tail_msdu);
  274. if (peer->tids_rx_reorder[tid].win_sz_mask == 0) {
  275. peer->tids_last_seq[tid] = htt_rx_mpdu_desc_seq_num(
  276. htt_pdev,
  277. rx_mpdu_desc);
  278. }
  279. }
  280. return;
  281. } /* process_reorder */
  282. void
  283. ol_rx_indication_handler(ol_txrx_pdev_handle pdev,
  284. cdf_nbuf_t rx_ind_msg,
  285. uint16_t peer_id, uint8_t tid, int num_mpdu_ranges)
  286. {
  287. int mpdu_range, i;
  288. unsigned seq_num_start = 0, seq_num_end = 0;
  289. bool rx_ind_release = false;
  290. struct ol_txrx_vdev_t *vdev = NULL;
  291. struct ol_txrx_peer_t *peer;
  292. htt_pdev_handle htt_pdev;
  293. uint16_t center_freq;
  294. uint16_t chan1;
  295. uint16_t chan2;
  296. uint8_t phymode;
  297. bool ret;
  298. htt_pdev = pdev->htt_pdev;
  299. peer = ol_txrx_peer_find_by_id(pdev, peer_id);
  300. if (!peer) {
  301. /*
  302. * If we can't find a peer send this packet to OCB interface
  303. * using OCB self peer
  304. */
  305. if (!ol_txrx_get_ocb_peer(pdev, &peer))
  306. peer = NULL;
  307. }
  308. if (peer) {
  309. vdev = peer->vdev;
  310. ol_rx_ind_rssi_update(peer, rx_ind_msg);
  311. if (vdev->opmode == wlan_op_mode_ocb) {
  312. htt_rx_ind_legacy_rate(pdev->htt_pdev, rx_ind_msg,
  313. &peer->last_pkt_legacy_rate,
  314. &peer->last_pkt_legacy_rate_sel);
  315. peer->last_pkt_rssi_cmb = htt_rx_ind_rssi_dbm(
  316. pdev->htt_pdev, rx_ind_msg);
  317. for (i = 0; i < 4; i++)
  318. peer->last_pkt_rssi[i] =
  319. htt_rx_ind_rssi_dbm_chain(
  320. pdev->htt_pdev, rx_ind_msg, i);
  321. htt_rx_ind_timestamp(pdev->htt_pdev, rx_ind_msg,
  322. &peer->last_pkt_timestamp_microsec,
  323. &peer->last_pkt_timestamp_submicrosec);
  324. peer->last_pkt_tsf = htt_rx_ind_tsf32(pdev->htt_pdev,
  325. rx_ind_msg);
  326. peer->last_pkt_tid = htt_rx_ind_ext_tid(pdev->htt_pdev,
  327. rx_ind_msg);
  328. }
  329. }
  330. TXRX_STATS_INCR(pdev, priv.rx.normal.ppdus);
  331. OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
  332. if (htt_rx_ind_flush(pdev->htt_pdev, rx_ind_msg) && peer) {
  333. htt_rx_ind_flush_seq_num_range(pdev->htt_pdev, rx_ind_msg,
  334. &seq_num_start, &seq_num_end);
  335. if (tid == HTT_INVALID_TID) {
  336. /*
  337. * host/FW reorder state went out-of sync
  338. * for a while because FW ran out of Rx indication
  339. * buffer. We have to discard all the buffers in
  340. * reorder queue.
  341. */
  342. ol_rx_reorder_peer_cleanup(vdev, peer);
  343. } else {
  344. ol_rx_reorder_flush(vdev, peer, tid, seq_num_start,
  345. seq_num_end, htt_rx_flush_release);
  346. }
  347. }
  348. if (htt_rx_ind_release(pdev->htt_pdev, rx_ind_msg)) {
  349. /* the ind info of release is saved here and do release at the
  350. * end. This is for the reason of in HL case, the cdf_nbuf_t
  351. * for msg and payload are the same buf. And the buf will be
  352. * changed during processing */
  353. rx_ind_release = true;
  354. htt_rx_ind_release_seq_num_range(pdev->htt_pdev, rx_ind_msg,
  355. &seq_num_start, &seq_num_end);
  356. }
  357. #ifdef DEBUG_DMA_DONE
  358. pdev->htt_pdev->rx_ring.dbg_initial_msdu_payld =
  359. pdev->htt_pdev->rx_ring.sw_rd_idx.msdu_payld;
  360. #endif
  361. for (mpdu_range = 0; mpdu_range < num_mpdu_ranges; mpdu_range++) {
  362. enum htt_rx_status status;
  363. int i, num_mpdus;
  364. cdf_nbuf_t head_msdu, tail_msdu, msdu;
  365. void *rx_mpdu_desc;
  366. #ifdef DEBUG_DMA_DONE
  367. pdev->htt_pdev->rx_ring.dbg_mpdu_range = mpdu_range;
  368. #endif
  369. htt_rx_ind_mpdu_range_info(pdev->htt_pdev, rx_ind_msg,
  370. mpdu_range, &status, &num_mpdus);
  371. if ((status == htt_rx_status_ok) && peer) {
  372. TXRX_STATS_ADD(pdev, priv.rx.normal.mpdus, num_mpdus);
  373. /* valid frame - deposit it into rx reordering buffer */
  374. for (i = 0; i < num_mpdus; i++) {
  375. int msdu_chaining;
  376. /*
  377. * Get a linked list of the MSDUs that comprise
  378. * this MPDU.
  379. * This also attaches each rx MSDU descriptor to
  380. * the corresponding rx MSDU network buffer.
  381. * (In some systems, the rx MSDU desc is already
  382. * in the same buffer as the MSDU payload; in
  383. * other systems they are separate, so a pointer
  384. * needs to be set in the netbuf to locate the
  385. * corresponding rx descriptor.)
  386. *
  387. * It is neccessary to call htt_rx_amsdu_pop
  388. * before htt_rx_mpdu_desc_list_next, because
  389. * the (MPDU) rx descriptor has DMA unmapping
  390. * done during the htt_rx_amsdu_pop call.
  391. * The rx desc should not be accessed until this
  392. * DMA unmapping has been done, since the DMA
  393. * unmapping involves making sure the cache area
  394. * for the mapped buffer is flushed, so the data
  395. * written by the MAC DMA into memory will be
  396. * fetched, rather than garbage from the cache.
  397. */
  398. #ifdef DEBUG_DMA_DONE
  399. pdev->htt_pdev->rx_ring.dbg_mpdu_count = i;
  400. #endif
  401. msdu_chaining =
  402. htt_rx_amsdu_pop(htt_pdev,
  403. rx_ind_msg,
  404. &head_msdu,
  405. &tail_msdu);
  406. #ifdef HTT_RX_RESTORE
  407. if (htt_pdev->rx_ring.rx_reset) {
  408. ol_rx_trigger_restore(htt_pdev,
  409. head_msdu,
  410. tail_msdu);
  411. return;
  412. }
  413. #endif
  414. rx_mpdu_desc =
  415. htt_rx_mpdu_desc_list_next(htt_pdev,
  416. rx_ind_msg);
  417. ret = htt_rx_msdu_center_freq(htt_pdev, peer,
  418. rx_mpdu_desc, &center_freq, &chan1,
  419. &chan2, &phymode);
  420. if (ret == true) {
  421. peer->last_pkt_center_freq =
  422. center_freq;
  423. } else {
  424. peer->last_pkt_center_freq = 0;
  425. }
  426. /* Pktlog */
  427. #ifdef WDI_EVENT_ENABLE
  428. wdi_event_handler(WDI_EVENT_RX_DESC_REMOTE,
  429. pdev, head_msdu);
  430. #endif
  431. if (msdu_chaining) {
  432. /*
  433. * TBDXXX - to deliver SDU with
  434. * chaining, we need to stitch those
  435. * scattered buffers into one single
  436. * buffer.
  437. * Just discard it now.
  438. */
  439. chain_msdus(htt_pdev,
  440. head_msdu,
  441. tail_msdu);
  442. } else {
  443. process_reorder(pdev, rx_mpdu_desc,
  444. tid, peer,
  445. head_msdu, tail_msdu,
  446. num_mpdu_ranges,
  447. num_mpdus,
  448. rx_ind_release);
  449. }
  450. }
  451. } else {
  452. /* invalid frames - discard them */
  453. OL_RX_REORDER_TRACE_ADD(pdev, tid,
  454. TXRX_SEQ_NUM_ERR(status),
  455. TXRX_SEQ_NUM_ERR(status),
  456. num_mpdus);
  457. TXRX_STATS_ADD(pdev, priv.rx.err.mpdu_bad, num_mpdus);
  458. for (i = 0; i < num_mpdus; i++) {
  459. /* pull the MPDU's MSDUs off the buffer queue */
  460. htt_rx_amsdu_pop(htt_pdev, rx_ind_msg, &msdu,
  461. &tail_msdu);
  462. #ifdef HTT_RX_RESTORE
  463. if (htt_pdev->rx_ring.rx_reset) {
  464. ol_rx_trigger_restore(htt_pdev, msdu,
  465. tail_msdu);
  466. return;
  467. }
  468. #endif
  469. /* pull the MPDU desc off the desc queue */
  470. rx_mpdu_desc =
  471. htt_rx_mpdu_desc_list_next(htt_pdev,
  472. rx_ind_msg);
  473. OL_RX_ERR_STATISTICS_2(pdev, vdev, peer,
  474. rx_mpdu_desc, msdu,
  475. status);
  476. if (status == htt_rx_status_tkip_mic_err &&
  477. vdev != NULL && peer != NULL) {
  478. union htt_rx_pn_t pn;
  479. uint8_t key_id;
  480. htt_rx_mpdu_desc_pn(
  481. pdev->htt_pdev,
  482. htt_rx_msdu_desc_retrieve(
  483. pdev->htt_pdev,
  484. msdu), &pn, 48);
  485. if (htt_rx_msdu_desc_key_id(
  486. pdev->htt_pdev,
  487. htt_rx_msdu_desc_retrieve(
  488. pdev->htt_pdev,
  489. msdu),
  490. &key_id) == true) {
  491. ol_rx_err(pdev->ctrl_pdev,
  492. vdev->vdev_id,
  493. peer->mac_addr.raw,
  494. tid, 0,
  495. OL_RX_ERR_TKIP_MIC,
  496. msdu, &pn.pn48,
  497. key_id);
  498. }
  499. }
  500. #ifdef WDI_EVENT_ENABLE
  501. if (status != htt_rx_status_ctrl_mgmt_null) {
  502. /* Pktlog */
  503. wdi_event_handler(
  504. WDI_EVENT_RX_DESC_REMOTE, pdev,
  505. msdu);
  506. }
  507. #endif
  508. if (status == htt_rx_status_err_inv_peer) {
  509. /* once per mpdu */
  510. ol_rx_process_inv_peer(pdev,
  511. rx_mpdu_desc,
  512. msdu);
  513. }
  514. while (1) {
  515. /* Free the nbuf */
  516. cdf_nbuf_t next;
  517. next = cdf_nbuf_next(msdu);
  518. htt_rx_desc_frame_free(htt_pdev, msdu);
  519. if (msdu == tail_msdu)
  520. break;
  521. msdu = next;
  522. }
  523. }
  524. }
  525. }
  526. /*
  527. * Now that a whole batch of MSDUs have been pulled out of HTT
  528. * and put into the rx reorder array, it is an appropriate time
  529. * to request HTT to provide new rx MSDU buffers for the target
  530. * to fill.
  531. * This could be done after the end of this function, but it's
  532. * better to do it now, rather than waiting until after the driver
  533. * and OS finish processing the batch of rx MSDUs.
  534. */
  535. htt_rx_msdu_buff_replenish(htt_pdev);
  536. if ((true == rx_ind_release) && peer && vdev) {
  537. ol_rx_reorder_release(vdev, peer, tid, seq_num_start,
  538. seq_num_end);
  539. }
  540. OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
  541. OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
  542. if (pdev->rx.flags.defrag_timeout_check)
  543. ol_rx_defrag_waitlist_flush(pdev);
  544. }
  545. void
  546. ol_rx_sec_ind_handler(ol_txrx_pdev_handle pdev,
  547. uint16_t peer_id,
  548. enum htt_sec_type sec_type,
  549. int is_unicast, uint32_t *michael_key, uint32_t *rx_pn)
  550. {
  551. struct ol_txrx_peer_t *peer;
  552. int sec_index, i;
  553. peer = ol_txrx_peer_find_by_id(pdev, peer_id);
  554. if (!peer) {
  555. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  556. "Couldn't find peer from ID %d - skipping security inits\n",
  557. peer_id);
  558. return;
  559. }
  560. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  561. "sec spec for peer %p (%02x:%02x:%02x:%02x:%02x:%02x): "
  562. "%s key of type %d\n",
  563. peer,
  564. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  565. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  566. peer->mac_addr.raw[4], peer->mac_addr.raw[5],
  567. is_unicast ? "ucast" : "mcast", sec_type);
  568. sec_index = is_unicast ? txrx_sec_ucast : txrx_sec_mcast;
  569. peer->security[sec_index].sec_type = sec_type;
  570. /* michael key only valid for TKIP
  571. but for simplicity, copy it anyway */
  572. cdf_mem_copy(&peer->security[sec_index].michael_key[0],
  573. michael_key,
  574. sizeof(peer->security[sec_index].michael_key));
  575. if (sec_type != htt_sec_type_wapi) {
  576. cdf_mem_set(peer->tids_last_pn_valid,
  577. OL_TXRX_NUM_EXT_TIDS, 0x00);
  578. } else if (sec_index == txrx_sec_mcast || peer->tids_last_pn_valid[0]) {
  579. for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
  580. /*
  581. * Setting PN valid bit for WAPI sec_type,
  582. * since WAPI PN has to be started with predefined value
  583. */
  584. peer->tids_last_pn_valid[i] = 1;
  585. cdf_mem_copy((uint8_t *) &peer->tids_last_pn[i],
  586. (uint8_t *) rx_pn,
  587. sizeof(union htt_rx_pn_t));
  588. peer->tids_last_pn[i].pn128[1] =
  589. cdf_os_cpu_to_le64(
  590. peer->tids_last_pn[i].pn128[1]);
  591. peer->tids_last_pn[i].pn128[0] =
  592. cdf_os_cpu_to_le64(
  593. peer->tids_last_pn[i].pn128[0]);
  594. }
  595. }
  596. }
  597. #if defined(PERE_IP_HDR_ALIGNMENT_WAR)
  598. #include <cds_ieee80211_common.h>
  599. static void transcap_nwifi_to_8023(cdf_nbuf_t msdu)
  600. {
  601. struct ieee80211_frame *wh;
  602. uint32_t hdrsize;
  603. struct llc *llchdr;
  604. struct ether_header *eth_hdr;
  605. uint16_t ether_type = 0;
  606. uint8_t a1[IEEE80211_ADDR_LEN];
  607. uint8_t a2[IEEE80211_ADDR_LEN];
  608. uint8_t a3[IEEE80211_ADDR_LEN];
  609. uint8_t fc1;
  610. wh = (struct ieee80211_frame *)cdf_nbuf_data(msdu);
  611. cdf_mem_copy(a1, wh->i_addr1, IEEE80211_ADDR_LEN);
  612. cdf_mem_copy(a2, wh->i_addr2, IEEE80211_ADDR_LEN);
  613. cdf_mem_copy(a3, wh->i_addr3, IEEE80211_ADDR_LEN);
  614. fc1 = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
  615. /* Native Wifi header is 80211 non-QoS header */
  616. hdrsize = sizeof(struct ieee80211_frame);
  617. llchdr = (struct llc *)(((uint8_t *) cdf_nbuf_data(msdu)) + hdrsize);
  618. ether_type = llchdr->llc_un.type_snap.ether_type;
  619. /*
  620. * Now move the data pointer to the beginning of the mac header :
  621. * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
  622. */
  623. cdf_nbuf_pull_head(msdu,
  624. (hdrsize + sizeof(struct llc) -
  625. sizeof(struct ether_header)));
  626. eth_hdr = (struct ether_header *)(cdf_nbuf_data(msdu));
  627. switch (fc1) {
  628. case IEEE80211_FC1_DIR_NODS:
  629. cdf_mem_copy(eth_hdr->ether_dhost, a1, IEEE80211_ADDR_LEN);
  630. cdf_mem_copy(eth_hdr->ether_shost, a2, IEEE80211_ADDR_LEN);
  631. break;
  632. case IEEE80211_FC1_DIR_TODS:
  633. cdf_mem_copy(eth_hdr->ether_dhost, a3, IEEE80211_ADDR_LEN);
  634. cdf_mem_copy(eth_hdr->ether_shost, a2, IEEE80211_ADDR_LEN);
  635. break;
  636. case IEEE80211_FC1_DIR_FROMDS:
  637. cdf_mem_copy(eth_hdr->ether_dhost, a1, IEEE80211_ADDR_LEN);
  638. cdf_mem_copy(eth_hdr->ether_shost, a3, IEEE80211_ADDR_LEN);
  639. break;
  640. case IEEE80211_FC1_DIR_DSTODS:
  641. break;
  642. }
  643. eth_hdr->ether_type = ether_type;
  644. }
  645. #endif
  646. void ol_rx_notify(ol_pdev_handle pdev,
  647. uint8_t vdev_id,
  648. uint8_t *peer_mac_addr,
  649. int tid,
  650. uint32_t tsf32,
  651. enum ol_rx_notify_type notify_type, cdf_nbuf_t rx_frame)
  652. {
  653. /*
  654. * NOTE: This is used in qca_main for AP mode to handle IGMP
  655. * packets specially. Umac has a corresponding handler for this
  656. * not sure if we need to have this for CLD as well.
  657. */
  658. }
  659. /**
  660. * @brief Look into a rx MSDU to see what kind of special handling it requires
  661. * @details
  662. * This function is called when the host rx SW sees that the target
  663. * rx FW has marked a rx MSDU as needing inspection.
  664. * Based on the results of the inspection, the host rx SW will infer
  665. * what special handling to perform on the rx frame.
  666. * Currently, the only type of frames that require special handling
  667. * are IGMP frames. The rx data-path SW checks if the frame is IGMP
  668. * (it should be, since the target would not have set the inspect flag
  669. * otherwise), and then calls the ol_rx_notify function so the
  670. * control-path SW can perform multicast group membership learning
  671. * by sniffing the IGMP frame.
  672. */
  673. #define SIZEOF_80211_HDR (sizeof(struct ieee80211_frame))
  674. void
  675. ol_rx_inspect(struct ol_txrx_vdev_t *vdev,
  676. struct ol_txrx_peer_t *peer,
  677. unsigned tid, cdf_nbuf_t msdu, void *rx_desc)
  678. {
  679. ol_txrx_pdev_handle pdev = vdev->pdev;
  680. uint8_t *data, *l3_hdr;
  681. uint16_t ethertype;
  682. int offset;
  683. data = cdf_nbuf_data(msdu);
  684. if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
  685. offset = SIZEOF_80211_HDR + LLC_SNAP_HDR_OFFSET_ETHERTYPE;
  686. l3_hdr = data + SIZEOF_80211_HDR + LLC_SNAP_HDR_LEN;
  687. } else {
  688. offset = ETHERNET_ADDR_LEN * 2;
  689. l3_hdr = data + ETHERNET_HDR_LEN;
  690. }
  691. ethertype = (data[offset] << 8) | data[offset + 1];
  692. if (ethertype == ETHERTYPE_IPV4) {
  693. offset = IPV4_HDR_OFFSET_PROTOCOL;
  694. if (l3_hdr[offset] == IP_PROTOCOL_IGMP) {
  695. ol_rx_notify(pdev->ctrl_pdev,
  696. vdev->vdev_id,
  697. peer->mac_addr.raw,
  698. tid,
  699. htt_rx_mpdu_desc_tsf32(pdev->htt_pdev,
  700. rx_desc),
  701. OL_RX_NOTIFY_IPV4_IGMP, msdu);
  702. }
  703. }
  704. }
  705. void
  706. ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,
  707. cdf_nbuf_t msg, int msdu_cnt)
  708. {
  709. int vdev_id, peer_id, tid;
  710. cdf_nbuf_t head_buf, tail_buf, buf;
  711. struct ol_txrx_peer_t *peer;
  712. uint8_t fw_desc;
  713. htt_pdev_handle htt_pdev = pdev->htt_pdev;
  714. while (msdu_cnt) {
  715. htt_rx_offload_msdu_pop(htt_pdev, msg, &vdev_id, &peer_id,
  716. &tid, &fw_desc, &head_buf, &tail_buf);
  717. peer = ol_txrx_peer_find_by_id(pdev, peer_id);
  718. if (peer) {
  719. ol_rx_data_process(peer, head_buf);
  720. } else {
  721. buf = head_buf;
  722. while (1) {
  723. cdf_nbuf_t next;
  724. next = cdf_nbuf_next(buf);
  725. htt_rx_desc_frame_free(htt_pdev, buf);
  726. if (buf == tail_buf)
  727. break;
  728. buf = next;
  729. }
  730. }
  731. msdu_cnt--;
  732. }
  733. htt_rx_msdu_buff_replenish(htt_pdev);
  734. }
  735. void
  736. ol_rx_mic_error_handler(
  737. ol_txrx_pdev_handle pdev,
  738. u_int8_t tid,
  739. u_int16_t peer_id,
  740. void *msdu_desc,
  741. cdf_nbuf_t msdu)
  742. {
  743. union htt_rx_pn_t pn = {0};
  744. u_int8_t key_id = 0;
  745. struct ol_txrx_peer_t *peer = NULL;
  746. struct ol_txrx_vdev_t *vdev = NULL;
  747. if (pdev) {
  748. peer = ol_txrx_peer_find_by_id(pdev, peer_id);
  749. if (peer) {
  750. vdev = peer->vdev;
  751. if (vdev) {
  752. htt_rx_mpdu_desc_pn(vdev->pdev->htt_pdev,
  753. msdu_desc, &pn, 48);
  754. if (htt_rx_msdu_desc_key_id(
  755. vdev->pdev->htt_pdev, msdu_desc,
  756. &key_id) == true) {
  757. ol_rx_err(vdev->pdev->ctrl_pdev,
  758. vdev->vdev_id,
  759. peer->mac_addr.raw, tid, 0,
  760. OL_RX_ERR_TKIP_MIC, msdu,
  761. &pn.pn48, key_id);
  762. }
  763. }
  764. }
  765. }
  766. }
  767. /**
  768. * @brief Check the first msdu to decide whether the a-msdu should be accepted.
  769. */
  770. bool
  771. ol_rx_filter(struct ol_txrx_vdev_t *vdev,
  772. struct ol_txrx_peer_t *peer, cdf_nbuf_t msdu, void *rx_desc)
  773. {
  774. #define FILTER_STATUS_REJECT 1
  775. #define FILTER_STATUS_ACCEPT 0
  776. uint8_t *wh;
  777. uint32_t offset = 0;
  778. uint16_t ether_type = 0;
  779. bool is_encrypted = false, is_mcast = false;
  780. uint8_t i;
  781. enum privacy_filter_packet_type packet_type =
  782. PRIVACY_FILTER_PACKET_UNICAST;
  783. ol_txrx_pdev_handle pdev = vdev->pdev;
  784. htt_pdev_handle htt_pdev = pdev->htt_pdev;
  785. int sec_idx;
  786. /*
  787. * Safemode must avoid the PrivacyExemptionList and
  788. * ExcludeUnencrypted checking
  789. */
  790. if (vdev->safemode)
  791. return FILTER_STATUS_ACCEPT;
  792. is_mcast = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc);
  793. if (vdev->num_filters > 0) {
  794. if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
  795. offset = SIZEOF_80211_HDR +
  796. LLC_SNAP_HDR_OFFSET_ETHERTYPE;
  797. } else {
  798. offset = ETHERNET_ADDR_LEN * 2;
  799. }
  800. /* get header info from msdu */
  801. wh = cdf_nbuf_data(msdu);
  802. /* get ether type */
  803. ether_type = (wh[offset] << 8) | wh[offset + 1];
  804. /* get packet type */
  805. if (true == is_mcast)
  806. packet_type = PRIVACY_FILTER_PACKET_MULTICAST;
  807. else
  808. packet_type = PRIVACY_FILTER_PACKET_UNICAST;
  809. }
  810. /* get encrypt info */
  811. is_encrypted = htt_rx_mpdu_is_encrypted(htt_pdev, rx_desc);
  812. #ifdef ATH_SUPPORT_WAPI
  813. if ((true == is_encrypted) && (ETHERTYPE_WAI == ether_type)) {
  814. /* We expect the WAI frames to be always unencrypted when
  815. the UMAC gets it.*/
  816. return FILTER_STATUS_REJECT;
  817. }
  818. #endif /* ATH_SUPPORT_WAPI */
  819. for (i = 0; i < vdev->num_filters; i++) {
  820. enum privacy_filter filter_type;
  821. enum privacy_filter_packet_type filter_packet_type;
  822. /* skip if the ether type does not match */
  823. if (vdev->privacy_filters[i].ether_type != ether_type)
  824. continue;
  825. /* skip if the packet type does not match */
  826. filter_packet_type = vdev->privacy_filters[i].packet_type;
  827. if (filter_packet_type != packet_type &&
  828. filter_packet_type != PRIVACY_FILTER_PACKET_BOTH) {
  829. continue;
  830. }
  831. filter_type = vdev->privacy_filters[i].filter_type;
  832. if (filter_type == PRIVACY_FILTER_ALWAYS) {
  833. /*
  834. * In this case, we accept the frame if and only if
  835. * it was originally NOT encrypted.
  836. */
  837. if (true == is_encrypted)
  838. return FILTER_STATUS_REJECT;
  839. else
  840. return FILTER_STATUS_ACCEPT;
  841. } else if (filter_type == PRIVACY_FILTER_KEY_UNAVAILABLE) {
  842. /*
  843. * In this case, we reject the frame if it was
  844. * originally NOT encrypted but we have the key mapping
  845. * key for this frame.
  846. */
  847. if (!is_encrypted &&
  848. !is_mcast &&
  849. (peer->security[txrx_sec_ucast].sec_type !=
  850. htt_sec_type_none) &&
  851. (peer->keyinstalled || !ETHERTYPE_IS_EAPOL_WAPI(
  852. ether_type))) {
  853. return FILTER_STATUS_REJECT;
  854. } else {
  855. return FILTER_STATUS_ACCEPT;
  856. }
  857. } else {
  858. /*
  859. * The privacy exemption does not apply to this frame.
  860. */
  861. break;
  862. }
  863. }
  864. /*
  865. * If the privacy exemption list does not apply to the frame,
  866. * check ExcludeUnencrypted.
  867. * If ExcludeUnencrypted is not set, or if this was oringially
  868. * an encrypted frame, it will be accepted.
  869. */
  870. if (!vdev->drop_unenc || (true == is_encrypted))
  871. return FILTER_STATUS_ACCEPT;
  872. /*
  873. * If this is a open connection, it will be accepted.
  874. */
  875. sec_idx = (true == is_mcast) ? txrx_sec_mcast : txrx_sec_ucast;
  876. if (peer->security[sec_idx].sec_type == htt_sec_type_none)
  877. return FILTER_STATUS_ACCEPT;
  878. if ((false == is_encrypted) && vdev->drop_unenc) {
  879. OL_RX_ERR_STATISTICS(pdev, vdev, OL_RX_ERR_PRIVACY,
  880. pdev->sec_types[htt_sec_type_none],
  881. is_mcast);
  882. }
  883. return FILTER_STATUS_REJECT;
  884. }
  885. void
  886. ol_rx_deliver(struct ol_txrx_vdev_t *vdev,
  887. struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
  888. {
  889. ol_txrx_pdev_handle pdev = vdev->pdev;
  890. htt_pdev_handle htt_pdev = pdev->htt_pdev;
  891. cdf_nbuf_t deliver_list_head = NULL;
  892. cdf_nbuf_t deliver_list_tail = NULL;
  893. cdf_nbuf_t msdu;
  894. bool filter = false;
  895. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  896. struct ol_rx_decap_info_t info;
  897. cdf_mem_set(&info, sizeof(info), 0);
  898. #endif
  899. msdu = msdu_list;
  900. /*
  901. * Check each MSDU to see whether it requires special handling,
  902. * and free each MSDU's rx descriptor
  903. */
  904. while (msdu) {
  905. void *rx_desc;
  906. int discard, inspect, dummy_fwd;
  907. cdf_nbuf_t next = cdf_nbuf_next(msdu);
  908. rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
  909. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  910. info.is_msdu_cmpl_mpdu =
  911. htt_rx_msdu_desc_completes_mpdu(htt_pdev, rx_desc);
  912. info.is_first_subfrm =
  913. htt_rx_msdu_first_msdu_flag(htt_pdev, rx_desc);
  914. if (OL_RX_DECAP(vdev, peer, msdu, &info) != A_OK) {
  915. discard = 1;
  916. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  917. "decap error %p from peer %p "
  918. "(%02x:%02x:%02x:%02x:%02x:%02x) len %d\n",
  919. msdu, peer,
  920. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  921. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  922. peer->mac_addr.raw[4], peer->mac_addr.raw[5],
  923. cdf_nbuf_len(msdu));
  924. goto DONE;
  925. }
  926. #endif
  927. htt_rx_msdu_actions(pdev->htt_pdev, rx_desc, &discard,
  928. &dummy_fwd, &inspect);
  929. if (inspect)
  930. ol_rx_inspect(vdev, peer, tid, msdu, rx_desc);
  931. /*
  932. * Check the first msdu in the mpdu, if it will be filtered out,
  933. * then discard the entire mpdu.
  934. */
  935. if (htt_rx_msdu_first_msdu_flag(htt_pdev, rx_desc))
  936. filter = ol_rx_filter(vdev, peer, msdu, rx_desc);
  937. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  938. DONE:
  939. #endif
  940. htt_rx_msdu_desc_free(htt_pdev, msdu);
  941. if (discard || (true == filter)) {
  942. ol_txrx_frms_dump("rx discarding:",
  943. pdev, deliver_list_head,
  944. ol_txrx_frm_dump_tcp_seq |
  945. ol_txrx_frm_dump_contents,
  946. 0 /* don't print contents */);
  947. cdf_nbuf_free(msdu);
  948. /* If discarding packet is last packet of the delivery
  949. list, NULL terminator should be added
  950. for delivery list. */
  951. if (next == NULL && deliver_list_head) {
  952. /* add NULL terminator */
  953. cdf_nbuf_set_next(deliver_list_tail, NULL);
  954. }
  955. } else {
  956. /*
  957. * If this is for OCB,
  958. * then prepend the RX stats header.
  959. */
  960. if (vdev->opmode == wlan_op_mode_ocb) {
  961. int i;
  962. struct ol_txrx_ocb_chan_info *chan_info = 0;
  963. int packet_freq = peer->last_pkt_center_freq;
  964. for (i = 0; i < vdev->ocb_channel_count; i++) {
  965. if (vdev->ocb_channel_info[i].
  966. chan_freq == packet_freq) {
  967. chan_info = &vdev->
  968. ocb_channel_info[i];
  969. break;
  970. }
  971. }
  972. if (!chan_info || !chan_info->
  973. disable_rx_stats_hdr) {
  974. struct ether_header eth_header = {
  975. {0} };
  976. struct ocb_rx_stats_hdr_t rx_header = {
  977. 0};
  978. /*
  979. * Construct the RX stats header and
  980. * push that to the frontof the packet.
  981. */
  982. rx_header.version = 1;
  983. rx_header.length = sizeof(rx_header);
  984. rx_header.channel_freq =
  985. peer->last_pkt_center_freq;
  986. rx_header.rssi_cmb =
  987. peer->last_pkt_rssi_cmb;
  988. cdf_mem_copy(rx_header.rssi,
  989. peer->last_pkt_rssi,
  990. sizeof(rx_header.rssi));
  991. if (peer->last_pkt_legacy_rate_sel ==
  992. 0) {
  993. switch (peer->
  994. last_pkt_legacy_rate) {
  995. case 0x8:
  996. rx_header.datarate = 6;
  997. break;
  998. case 0x9:
  999. rx_header.datarate = 4;
  1000. break;
  1001. case 0xA:
  1002. rx_header.datarate = 2;
  1003. break;
  1004. case 0xB:
  1005. rx_header.datarate = 0;
  1006. break;
  1007. case 0xC:
  1008. rx_header.datarate = 7;
  1009. break;
  1010. case 0xD:
  1011. rx_header.datarate = 5;
  1012. break;
  1013. case 0xE:
  1014. rx_header.datarate = 3;
  1015. break;
  1016. case 0xF:
  1017. rx_header.datarate = 1;
  1018. break;
  1019. default:
  1020. rx_header.datarate =
  1021. 0xFF;
  1022. break;
  1023. }
  1024. } else {
  1025. rx_header.datarate = 0xFF;
  1026. }
  1027. rx_header.timestamp_microsec = peer->
  1028. last_pkt_timestamp_microsec;
  1029. rx_header.timestamp_submicrosec = peer->
  1030. last_pkt_timestamp_submicrosec;
  1031. rx_header.tsf32 = peer->last_pkt_tsf;
  1032. rx_header.ext_tid = peer->last_pkt_tid;
  1033. cdf_nbuf_push_head(msdu,
  1034. sizeof(rx_header));
  1035. cdf_mem_copy(cdf_nbuf_data(msdu),
  1036. &rx_header, sizeof(rx_header));
  1037. /* Construct the ethernet header with
  1038. type 0x8152 and push that to the
  1039. front of the packet to indicate the
  1040. RX stats header. */
  1041. eth_header.ether_type = CDF_SWAP_U16(
  1042. ETHERTYPE_OCB_RX);
  1043. cdf_nbuf_push_head(msdu,
  1044. sizeof(eth_header));
  1045. cdf_mem_copy(cdf_nbuf_data(msdu),
  1046. &eth_header,
  1047. sizeof(eth_header));
  1048. }
  1049. }
  1050. OL_RX_PEER_STATS_UPDATE(peer, msdu);
  1051. OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc,
  1052. OL_RX_ERR_NONE);
  1053. TXRX_STATS_MSDU_INCR(vdev->pdev, rx.delivered, msdu);
  1054. OL_TXRX_LIST_APPEND(deliver_list_head,
  1055. deliver_list_tail, msdu);
  1056. }
  1057. msdu = next;
  1058. }
  1059. /* sanity check - are there any frames left to give to the OS shim? */
  1060. if (!deliver_list_head)
  1061. return;
  1062. #if defined(PERE_IP_HDR_ALIGNMENT_WAR)
  1063. if (pdev->host_80211_enable)
  1064. for (msdu = deliver_list_head; msdu; msdu = cdf_nbuf_next(msdu))
  1065. transcap_nwifi_to_8023(msdu);
  1066. #endif
  1067. ol_txrx_frms_dump("rx delivering:",
  1068. pdev, deliver_list_head,
  1069. ol_txrx_frm_dump_tcp_seq | ol_txrx_frm_dump_contents,
  1070. 0 /* don't print contents */);
  1071. ol_rx_data_process(peer, deliver_list_head);
  1072. }
  1073. void
  1074. ol_rx_discard(struct ol_txrx_vdev_t *vdev,
  1075. struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
  1076. {
  1077. ol_txrx_pdev_handle pdev = vdev->pdev;
  1078. htt_pdev_handle htt_pdev = pdev->htt_pdev;
  1079. while (msdu_list) {
  1080. cdf_nbuf_t msdu = msdu_list;
  1081. msdu_list = cdf_nbuf_next(msdu_list);
  1082. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  1083. "discard rx %p from partly-deleted peer %p "
  1084. "(%02x:%02x:%02x:%02x:%02x:%02x)\n",
  1085. msdu, peer,
  1086. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  1087. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  1088. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  1089. htt_rx_desc_frame_free(htt_pdev, msdu);
  1090. }
  1091. }
  1092. void ol_rx_peer_init(struct ol_txrx_pdev_t *pdev, struct ol_txrx_peer_t *peer)
  1093. {
  1094. uint8_t tid;
  1095. for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
  1096. ol_rx_reorder_init(&peer->tids_rx_reorder[tid], tid);
  1097. /* invalid sequence number */
  1098. peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX;
  1099. }
  1100. /*
  1101. * Set security defaults: no PN check, no security.
  1102. * The target may send a HTT SEC_IND message to overwrite
  1103. * these defaults.
  1104. */
  1105. peer->security[txrx_sec_ucast].sec_type =
  1106. peer->security[txrx_sec_mcast].sec_type = htt_sec_type_none;
  1107. peer->keyinstalled = 0;
  1108. cdf_atomic_init(&peer->fw_pn_check);
  1109. }
  1110. void
  1111. ol_rx_peer_cleanup(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer)
  1112. {
  1113. peer->keyinstalled = 0;
  1114. ol_rx_reorder_peer_cleanup(vdev, peer);
  1115. }
  1116. /*
  1117. * Free frames including both rx descriptors and buffers
  1118. */
  1119. void ol_rx_frames_free(htt_pdev_handle htt_pdev, cdf_nbuf_t frames)
  1120. {
  1121. cdf_nbuf_t next, frag = frames;
  1122. while (frag) {
  1123. next = cdf_nbuf_next(frag);
  1124. htt_rx_desc_frame_free(htt_pdev, frag);
  1125. frag = next;
  1126. }
  1127. }
  1128. void
  1129. ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
  1130. cdf_nbuf_t rx_ind_msg,
  1131. uint16_t peer_id,
  1132. uint8_t tid, uint8_t is_offload)
  1133. {
  1134. struct ol_txrx_vdev_t *vdev = NULL;
  1135. struct ol_txrx_peer_t *peer = NULL;
  1136. htt_pdev_handle htt_pdev = NULL;
  1137. int status;
  1138. cdf_nbuf_t head_msdu, tail_msdu = NULL;
  1139. if (pdev) {
  1140. peer = ol_txrx_peer_find_by_id(pdev, peer_id);
  1141. htt_pdev = pdev->htt_pdev;
  1142. } else {
  1143. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1144. "%s: Invalid pdev passed!\n", __func__);
  1145. cdf_assert_always(pdev);
  1146. return;
  1147. }
  1148. #if defined(HELIUMPLUS_DEBUG)
  1149. cdf_print("%s %d: rx_ind_msg 0x%p peer_id %d tid %d is_offload %d\n",
  1150. __func__, __LINE__, rx_ind_msg, peer_id, tid, is_offload);
  1151. #endif
  1152. /*
  1153. * Get a linked list of the MSDUs in the rx in order indication.
  1154. * This also attaches each rx MSDU descriptor to the
  1155. * corresponding rx MSDU network buffer.
  1156. */
  1157. status = htt_rx_amsdu_pop(htt_pdev, rx_ind_msg, &head_msdu, &tail_msdu);
  1158. if (cdf_unlikely(0 == status)) {
  1159. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  1160. "%s: Pop status is 0, returning here\n", __func__);
  1161. return;
  1162. }
  1163. /* Replenish the rx buffer ring first to provide buffers to the target
  1164. rather than waiting for the indeterminate time taken by the OS
  1165. to consume the rx frames */
  1166. htt_rx_msdu_buff_replenish(htt_pdev);
  1167. /* Send the chain of MSDUs to the OS */
  1168. /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
  1169. cdf_nbuf_set_next(tail_msdu, NULL);
  1170. /* Pktlog */
  1171. #ifdef WDI_EVENT_ENABLE
  1172. wdi_event_handler(WDI_EVENT_RX_DESC_REMOTE, pdev, head_msdu);
  1173. #endif
  1174. /* if this is an offload indication, peer id is carried in the
  1175. rx buffer */
  1176. if (peer) {
  1177. vdev = peer->vdev;
  1178. } else {
  1179. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
  1180. "%s: Couldn't find peer from ID 0x%x\n",
  1181. __func__, peer_id);
  1182. while (head_msdu) {
  1183. cdf_nbuf_t msdu = head_msdu;
  1184. head_msdu = cdf_nbuf_next(head_msdu);
  1185. htt_rx_desc_frame_free(htt_pdev, msdu);
  1186. }
  1187. return;
  1188. }
  1189. peer->rx_opt_proc(vdev, peer, tid, head_msdu);
  1190. }
  1191. /* the msdu_list passed here must be NULL terminated */
  1192. void
  1193. ol_rx_in_order_deliver(struct ol_txrx_vdev_t *vdev,
  1194. struct ol_txrx_peer_t *peer,
  1195. unsigned tid, cdf_nbuf_t msdu_list)
  1196. {
  1197. cdf_nbuf_t msdu;
  1198. msdu = msdu_list;
  1199. /*
  1200. * Currently, this does not check each MSDU to see whether it requires
  1201. * special handling. MSDUs that need special handling (example: IGMP
  1202. * frames) should be sent via a seperate HTT message. Also, this does
  1203. * not do rx->tx forwarding or filtering.
  1204. */
  1205. while (msdu) {
  1206. cdf_nbuf_t next = cdf_nbuf_next(msdu);
  1207. OL_RX_PEER_STATS_UPDATE(peer, msdu);
  1208. OL_RX_ERR_STATISTICS_1(vdev->pdev, vdev, peer, rx_desc,
  1209. OL_RX_ERR_NONE);
  1210. TXRX_STATS_MSDU_INCR(vdev->pdev, rx.delivered, msdu);
  1211. msdu = next;
  1212. }
  1213. ol_txrx_frms_dump("rx delivering:",
  1214. pdev, deliver_list_head,
  1215. ol_txrx_frm_dump_tcp_seq | ol_txrx_frm_dump_contents,
  1216. 0 /* don't print contents */);
  1217. ol_rx_data_process(peer, msdu_list);
  1218. }
  1219. void
  1220. ol_rx_offload_paddr_deliver_ind_handler(htt_pdev_handle htt_pdev,
  1221. uint32_t msdu_count,
  1222. uint32_t *msg_word)
  1223. {
  1224. int vdev_id, peer_id, tid;
  1225. cdf_nbuf_t head_buf, tail_buf, buf;
  1226. struct ol_txrx_peer_t *peer;
  1227. uint8_t fw_desc;
  1228. int msdu_iter = 0;
  1229. while (msdu_count) {
  1230. htt_rx_offload_paddr_msdu_pop_ll(htt_pdev, msg_word, msdu_iter,
  1231. &vdev_id, &peer_id, &tid,
  1232. &fw_desc, &head_buf,
  1233. &tail_buf);
  1234. peer = ol_txrx_peer_find_by_id(htt_pdev->txrx_pdev, peer_id);
  1235. if (peer) {
  1236. ol_rx_data_process(peer, head_buf);
  1237. } else {
  1238. buf = head_buf;
  1239. while (1) {
  1240. cdf_nbuf_t next;
  1241. next = cdf_nbuf_next(buf);
  1242. htt_rx_desc_frame_free(htt_pdev, buf);
  1243. if (buf == tail_buf)
  1244. break;
  1245. buf = next;
  1246. }
  1247. }
  1248. msdu_iter++;
  1249. msdu_count--;
  1250. }
  1251. htt_rx_msdu_buff_replenish(htt_pdev);
  1252. }
  1253. #ifdef NEVERDEFINED
  1254. /**
  1255. * @brief populates vow ext stats in given network buffer.
  1256. * @param msdu - network buffer handle
  1257. * @param pdev - handle to htt dev.
  1258. */
  1259. void ol_ath_add_vow_extstats(htt_pdev_handle pdev, cdf_nbuf_t msdu)
  1260. {
  1261. /* FIX THIS:
  1262. * txrx should not be directly using data types (scn)
  1263. * that are internal to other modules.
  1264. */
  1265. struct ol_ath_softc_net80211 *scn =
  1266. (struct ol_ath_softc_net80211 *)pdev->ctrl_pdev;
  1267. if (scn->vow_extstats == 0) {
  1268. return;
  1269. } else {
  1270. uint8_t *data, *l3_hdr, *bp;
  1271. uint16_t ethertype;
  1272. int offset;
  1273. struct vow_extstats vowstats;
  1274. data = cdf_nbuf_data(msdu);
  1275. offset = ETHERNET_ADDR_LEN * 2;
  1276. l3_hdr = data + ETHERNET_HDR_LEN;
  1277. ethertype = (data[offset] << 8) | data[offset + 1];
  1278. if (ethertype == ETHERTYPE_IPV4) {
  1279. offset = IPV4_HDR_OFFSET_PROTOCOL;
  1280. if ((l3_hdr[offset] == IP_PROTOCOL_UDP) &&
  1281. (l3_hdr[0] == IP_VER4_N_NO_EXTRA_HEADERS)) {
  1282. bp = data + EXT_HDR_OFFSET;
  1283. if ((data[RTP_HDR_OFFSET] == UDP_PDU_RTP_EXT) &&
  1284. (bp[0] == 0x12) &&
  1285. (bp[1] == 0x34) &&
  1286. (bp[2] == 0x00) && (bp[3] == 0x08)) {
  1287. /*
  1288. * Clear UDP checksum so we do not have
  1289. * to recalculate it
  1290. * after filling in status fields.
  1291. */
  1292. data[UDP_CKSUM_OFFSET] = 0;
  1293. data[(UDP_CKSUM_OFFSET + 1)] = 0;
  1294. bp += IPERF3_DATA_OFFSET;
  1295. htt_rx_get_vowext_stats(msdu,
  1296. &vowstats);
  1297. /* control channel RSSI */
  1298. *bp++ = vowstats.rx_rssi_ctl0;
  1299. *bp++ = vowstats.rx_rssi_ctl1;
  1300. *bp++ = vowstats.rx_rssi_ctl2;
  1301. /* rx rate info */
  1302. *bp++ = vowstats.rx_bw;
  1303. *bp++ = vowstats.rx_sgi;
  1304. *bp++ = vowstats.rx_nss;
  1305. *bp++ = vowstats.rx_rssi_comb;
  1306. /* rsflags */
  1307. *bp++ = vowstats.rx_rs_flags;
  1308. /* Time stamp Lo */
  1309. *bp++ = (uint8_t)
  1310. ((vowstats.
  1311. rx_macTs & 0x0000ff00) >> 8);
  1312. *bp++ = (uint8_t)
  1313. (vowstats.rx_macTs & 0x0000ff);
  1314. /* rx phy errors */
  1315. *bp++ = (uint8_t)
  1316. ((scn->chan_stats.
  1317. phy_err_cnt >> 8) & 0xff);
  1318. *bp++ =
  1319. (uint8_t) (scn->chan_stats.
  1320. phy_err_cnt & 0xff);
  1321. /* rx clear count */
  1322. *bp++ = (uint8_t)
  1323. ((scn->mib_cycle_cnts.
  1324. rx_clear_count >> 24) & 0xff);
  1325. *bp++ = (uint8_t)
  1326. ((scn->mib_cycle_cnts.
  1327. rx_clear_count >> 16) & 0xff);
  1328. *bp++ = (uint8_t)
  1329. ((scn->mib_cycle_cnts.
  1330. rx_clear_count >> 8) & 0xff);
  1331. *bp++ = (uint8_t)
  1332. (scn->mib_cycle_cnts.
  1333. rx_clear_count & 0xff);
  1334. /* rx cycle count */
  1335. *bp++ = (uint8_t)
  1336. ((scn->mib_cycle_cnts.
  1337. cycle_count >> 24) & 0xff);
  1338. *bp++ = (uint8_t)
  1339. ((scn->mib_cycle_cnts.
  1340. cycle_count >> 16) & 0xff);
  1341. *bp++ = (uint8_t)
  1342. ((scn->mib_cycle_cnts.
  1343. cycle_count >> 8) & 0xff);
  1344. *bp++ = (uint8_t)
  1345. (scn->mib_cycle_cnts.
  1346. cycle_count & 0xff);
  1347. *bp++ = vowstats.rx_ratecode;
  1348. *bp++ = vowstats.rx_moreaggr;
  1349. /* sequence number */
  1350. *bp++ = (uint8_t)
  1351. ((vowstats.rx_seqno >> 8) &
  1352. 0xff);
  1353. *bp++ = (uint8_t)
  1354. (vowstats.rx_seqno & 0xff);
  1355. }
  1356. }
  1357. }
  1358. }
  1359. }
  1360. #endif