ol_rx_defrag.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. /*
  2. * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /*-
  27. * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
  28. * All rights reserved.
  29. *
  30. * Redistribution and use in source and binary forms, with or without
  31. * modification, are permitted provided that the following conditions
  32. * are met:
  33. * 1. Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * 2. Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in the
  37. * documentation and/or other materials provided with the distribution.
  38. *
  39. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  40. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  41. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  42. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  43. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  44. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  45. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  46. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  47. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  48. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  49. */
  50. #include <ol_htt_api.h>
  51. #include <ol_txrx_api.h>
  52. #include <ol_txrx_htt_api.h>
  53. #include <ol_htt_rx_api.h>
  54. #include <ol_txrx_types.h>
  55. #include <ol_rx_reorder.h>
  56. #include <ol_rx_pn.h>
  57. #include <ol_rx_fwd.h>
  58. #include <ol_rx.h>
  59. #include <ol_txrx_internal.h>
  60. #include <ol_ctrl_txrx_api.h>
  61. #include <ol_txrx_peer_find.h>
  62. #include <cdf_nbuf.h>
  63. #include <ieee80211.h>
  64. #include <cdf_util.h>
  65. #include <athdefs.h>
  66. #include <cdf_memory.h>
  67. #include <ol_rx_defrag.h>
  68. #include <enet.h>
  69. #include <cdf_time.h> /* cdf_system_time */
  70. #define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \
  71. (cdf_mem_compare(a1, a2, IEEE80211_ADDR_LEN) == 0)
  72. #define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \
  73. cdf_mem_copy(dst, src, IEEE80211_ADDR_LEN)
  74. #define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \
  75. (((wh)->i_fc[0] & \
  76. (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
  77. (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
  78. #define DEFRAG_IEEE80211_QOS_GET_TID(_x) \
  79. ((_x)->i_qos[0] & IEEE80211_QOS_TID)
  80. const struct ol_rx_defrag_cipher f_ccmp = {
  81. "AES-CCM",
  82. IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
  83. IEEE80211_WEP_MICLEN,
  84. 0,
  85. };
  86. const struct ol_rx_defrag_cipher f_tkip = {
  87. "TKIP",
  88. IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
  89. IEEE80211_WEP_CRCLEN,
  90. IEEE80211_WEP_MICLEN,
  91. };
  92. const struct ol_rx_defrag_cipher f_wep = {
  93. "WEP",
  94. IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
  95. IEEE80211_WEP_CRCLEN,
  96. 0,
  97. };
  98. inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
  99. htt_pdev_handle htt_pdev,
  100. cdf_nbuf_t frag)
  101. {
  102. return
  103. (struct ieee80211_frame *) cdf_nbuf_data(frag);
  104. }
  105. #define ol_rx_frag_pull_hdr(pdev, frag, hdrsize) \
  106. cdf_nbuf_pull_head(frag, hdrsize);
  107. #define OL_RX_FRAG_CLONE(frag) NULL /* no-op */
  108. static inline void
  109. ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
  110. cdf_nbuf_t msdu,
  111. void **rx_desc_old_position,
  112. void **ind_old_position, int *rx_desc_len)
  113. {
  114. *rx_desc_old_position = NULL;
  115. *ind_old_position = NULL;
  116. *rx_desc_len = 0;
  117. }
  118. /*
  119. * Process incoming fragments
  120. */
  121. void
  122. ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
  123. cdf_nbuf_t rx_frag_ind_msg,
  124. uint16_t peer_id, uint8_t tid)
  125. {
  126. uint16_t seq_num;
  127. int seq_num_start, seq_num_end;
  128. struct ol_txrx_peer_t *peer;
  129. htt_pdev_handle htt_pdev;
  130. cdf_nbuf_t head_msdu, tail_msdu;
  131. void *rx_mpdu_desc;
  132. htt_pdev = pdev->htt_pdev;
  133. peer = ol_txrx_peer_find_by_id(pdev, peer_id);
  134. if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) &&
  135. htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) {
  136. htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev,
  137. rx_frag_ind_msg,
  138. &seq_num_start,
  139. &seq_num_end);
  140. /*
  141. * Assuming flush indication for frags sent from target is
  142. * separate from normal frames
  143. */
  144. ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start);
  145. }
  146. if (peer) {
  147. htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
  148. &tail_msdu);
  149. cdf_assert(head_msdu == tail_msdu);
  150. if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
  151. rx_mpdu_desc =
  152. htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu);
  153. } else {
  154. rx_mpdu_desc =
  155. htt_rx_mpdu_desc_list_next(htt_pdev,
  156. rx_frag_ind_msg);
  157. }
  158. seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, rx_mpdu_desc);
  159. OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc,
  160. OL_RX_ERR_NONE_FRAG);
  161. ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu);
  162. } else {
  163. /* invalid frame - discard it */
  164. htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
  165. &tail_msdu);
  166. if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
  167. htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu);
  168. else
  169. htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg);
  170. htt_rx_desc_frame_free(htt_pdev, head_msdu);
  171. }
  172. /* request HTT to provide new rx MSDU buffers for the target to fill. */
  173. htt_rx_msdu_buff_replenish(htt_pdev);
  174. }
  175. /*
  176. * Flushing fragments
  177. */
  178. void
  179. ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev,
  180. struct ol_txrx_peer_t *peer, unsigned tid, int seq_num)
  181. {
  182. struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
  183. int seq;
  184. seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
  185. rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
  186. if (rx_reorder_array_elem->head) {
  187. ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head);
  188. rx_reorder_array_elem->head = NULL;
  189. rx_reorder_array_elem->tail = NULL;
  190. }
  191. }
  192. /*
  193. * Reorder and store fragments
  194. */
  195. void
  196. ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
  197. struct ol_txrx_peer_t *peer,
  198. unsigned tid, uint16_t seq_num, cdf_nbuf_t frag)
  199. {
  200. struct ieee80211_frame *fmac_hdr, *mac_hdr;
  201. uint8_t fragno, more_frag, all_frag_present = 0;
  202. struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
  203. uint16_t frxseq, rxseq, seq;
  204. htt_pdev_handle htt_pdev = pdev->htt_pdev;
  205. seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
  206. cdf_assert(seq == 0);
  207. rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
  208. mac_hdr = (struct ieee80211_frame *)
  209. ol_rx_frag_get_mac_hdr(htt_pdev, frag);
  210. rxseq = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >>
  211. IEEE80211_SEQ_SEQ_SHIFT;
  212. fragno = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
  213. IEEE80211_SEQ_FRAG_MASK;
  214. more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
  215. if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
  216. rx_reorder_array_elem->head = frag;
  217. rx_reorder_array_elem->tail = frag;
  218. cdf_nbuf_set_next(frag, NULL);
  219. ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
  220. rx_reorder_array_elem->head = NULL;
  221. rx_reorder_array_elem->tail = NULL;
  222. return;
  223. }
  224. if (rx_reorder_array_elem->head) {
  225. fmac_hdr = (struct ieee80211_frame *)
  226. ol_rx_frag_get_mac_hdr(htt_pdev,
  227. rx_reorder_array_elem->head);
  228. frxseq = cdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >>
  229. IEEE80211_SEQ_SEQ_SHIFT;
  230. if (rxseq != frxseq
  231. || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1,
  232. fmac_hdr->i_addr1)
  233. || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2,
  234. fmac_hdr->i_addr2)) {
  235. ol_rx_frames_free(htt_pdev,
  236. rx_reorder_array_elem->head);
  237. rx_reorder_array_elem->head = NULL;
  238. rx_reorder_array_elem->tail = NULL;
  239. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  240. "\n ol_rx_reorder_store: %s mismatch \n",
  241. (rxseq == frxseq)
  242. ? "address"
  243. : "seq number");
  244. }
  245. }
  246. ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head,
  247. &rx_reorder_array_elem->tail, frag,
  248. &all_frag_present);
  249. if (pdev->rx.flags.defrag_timeout_check)
  250. ol_rx_defrag_waitlist_remove(peer, tid);
  251. if (all_frag_present) {
  252. ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
  253. rx_reorder_array_elem->head = NULL;
  254. rx_reorder_array_elem->tail = NULL;
  255. peer->tids_rx_reorder[tid].defrag_timeout_ms = 0;
  256. peer->tids_last_seq[tid] = seq_num;
  257. } else if (pdev->rx.flags.defrag_timeout_check) {
  258. uint32_t now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
  259. peer->tids_rx_reorder[tid].defrag_timeout_ms =
  260. now_ms + pdev->rx.defrag.timeout_ms;
  261. ol_rx_defrag_waitlist_add(peer, tid);
  262. }
  263. }
  264. /*
  265. * Insert and store fragments
  266. */
  267. void
  268. ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
  269. cdf_nbuf_t *head_addr,
  270. cdf_nbuf_t *tail_addr,
  271. cdf_nbuf_t frag, uint8_t *all_frag_present)
  272. {
  273. cdf_nbuf_t next, prev = NULL, cur = *head_addr;
  274. struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr;
  275. uint8_t fragno, cur_fragno, lfragno, next_fragno;
  276. uint8_t last_morefrag = 1, count = 0;
  277. cdf_nbuf_t frag_clone;
  278. cdf_assert(frag);
  279. frag_clone = OL_RX_FRAG_CLONE(frag);
  280. frag = frag_clone ? frag_clone : frag;
  281. mac_hdr = (struct ieee80211_frame *)
  282. ol_rx_frag_get_mac_hdr(htt_pdev, frag);
  283. fragno = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
  284. IEEE80211_SEQ_FRAG_MASK;
  285. if (!(*head_addr)) {
  286. *head_addr = frag;
  287. *tail_addr = frag;
  288. cdf_nbuf_set_next(*tail_addr, NULL);
  289. return;
  290. }
  291. /* For efficiency, compare with tail first */
  292. lmac_hdr = (struct ieee80211_frame *)
  293. ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr);
  294. lfragno = cdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) &
  295. IEEE80211_SEQ_FRAG_MASK;
  296. if (fragno > lfragno) {
  297. cdf_nbuf_set_next(*tail_addr, frag);
  298. *tail_addr = frag;
  299. cdf_nbuf_set_next(*tail_addr, NULL);
  300. } else {
  301. do {
  302. cmac_hdr = (struct ieee80211_frame *)
  303. ol_rx_frag_get_mac_hdr(htt_pdev, cur);
  304. cur_fragno =
  305. cdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) &
  306. IEEE80211_SEQ_FRAG_MASK;
  307. prev = cur;
  308. cur = cdf_nbuf_next(cur);
  309. } while (fragno > cur_fragno);
  310. if (fragno == cur_fragno) {
  311. htt_rx_desc_frame_free(htt_pdev, frag);
  312. *all_frag_present = 0;
  313. return;
  314. } else {
  315. cdf_nbuf_set_next(prev, frag);
  316. cdf_nbuf_set_next(frag, cur);
  317. }
  318. }
  319. next = cdf_nbuf_next(*head_addr);
  320. lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
  321. *tail_addr);
  322. last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
  323. if (!last_morefrag) {
  324. do {
  325. next_hdr =
  326. (struct ieee80211_frame *)
  327. ol_rx_frag_get_mac_hdr(htt_pdev, next);
  328. next_fragno =
  329. cdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) &
  330. IEEE80211_SEQ_FRAG_MASK;
  331. count++;
  332. if (next_fragno != count)
  333. break;
  334. next = cdf_nbuf_next(next);
  335. } while (next);
  336. if (!next) {
  337. *all_frag_present = 1;
  338. return;
  339. }
  340. }
  341. *all_frag_present = 0;
  342. }
  343. /*
  344. * add tid to pending fragment wait list
  345. */
  346. void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned tid)
  347. {
  348. struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
  349. struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
  350. TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder,
  351. defrag_waitlist_elem);
  352. }
  353. /*
  354. * remove tid from pending fragment wait list
  355. */
  356. void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned tid)
  357. {
  358. struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
  359. struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
  360. if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
  361. TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder,
  362. defrag_waitlist_elem);
  363. rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
  364. rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
  365. } else if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
  366. TXRX_PRINT(TXRX_PRINT_LEVEL_FATAL_ERR,
  367. "waitlist->tqe_prv = NULL\n");
  368. CDF_ASSERT(0);
  369. rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
  370. }
  371. }
  372. #ifndef container_of
  373. #define container_of(ptr, type, member) \
  374. ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
  375. #endif
  376. /*
  377. * flush stale fragments from the waitlist
  378. */
  379. void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev)
  380. {
  381. struct ol_rx_reorder_t *rx_reorder, *tmp;
  382. uint32_t now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
  383. TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist,
  384. defrag_waitlist_elem, tmp) {
  385. struct ol_txrx_peer_t *peer;
  386. struct ol_rx_reorder_t *rx_reorder_base;
  387. unsigned tid;
  388. if (rx_reorder->defrag_timeout_ms > now_ms)
  389. break;
  390. tid = rx_reorder->tid;
  391. /* get index 0 of the rx_reorder array */
  392. rx_reorder_base = rx_reorder - tid;
  393. peer =
  394. container_of(rx_reorder_base, struct ol_txrx_peer_t,
  395. tids_rx_reorder[0]);
  396. ol_rx_defrag_waitlist_remove(peer, tid);
  397. ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid,
  398. 0 /* frags always stored at seq 0 */);
  399. }
  400. }
  401. /*
  402. * Handling security checking and processing fragments
  403. */
  404. void
  405. ol_rx_defrag(ol_txrx_pdev_handle pdev,
  406. struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t frag_list)
  407. {
  408. struct ol_txrx_vdev_t *vdev = NULL;
  409. cdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list;
  410. uint8_t index, tkip_demic = 0;
  411. uint16_t hdr_space;
  412. void *rx_desc;
  413. struct ieee80211_frame *wh;
  414. uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
  415. htt_pdev_handle htt_pdev = pdev->htt_pdev;
  416. vdev = peer->vdev;
  417. /* bypass defrag for safe mode */
  418. if (vdev->safemode) {
  419. if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
  420. ol_rx_in_order_deliver(vdev, peer, tid, frag_list);
  421. else
  422. ol_rx_deliver(vdev, peer, tid, frag_list);
  423. return;
  424. }
  425. while (cur) {
  426. tmp_next = cdf_nbuf_next(cur);
  427. cdf_nbuf_set_next(cur, NULL);
  428. if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) {
  429. /* PN check failed,discard frags */
  430. if (prev) {
  431. cdf_nbuf_set_next(prev, NULL);
  432. ol_rx_frames_free(htt_pdev, frag_list);
  433. }
  434. ol_rx_frames_free(htt_pdev, tmp_next);
  435. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  436. "ol_rx_defrag: PN Check failed\n");
  437. return;
  438. }
  439. /* remove FCS from each fragment */
  440. cdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
  441. prev = cur;
  442. cdf_nbuf_set_next(cur, tmp_next);
  443. cur = tmp_next;
  444. }
  445. cur = frag_list;
  446. wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur);
  447. hdr_space = ol_rx_frag_hdrsize(wh);
  448. rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list);
  449. cdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc));
  450. index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ?
  451. txrx_sec_mcast : txrx_sec_ucast;
  452. switch (peer->security[index].sec_type) {
  453. case htt_sec_type_tkip:
  454. tkip_demic = 1;
  455. /* fall-through to rest of tkip ops */
  456. case htt_sec_type_tkip_nomic:
  457. while (cur) {
  458. tmp_next = cdf_nbuf_next(cur);
  459. if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) {
  460. /* TKIP decap failed, discard frags */
  461. ol_rx_frames_free(htt_pdev, frag_list);
  462. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  463. "\n ol_rx_defrag: TKIP decap failed\n");
  464. return;
  465. }
  466. cur = tmp_next;
  467. }
  468. break;
  469. case htt_sec_type_aes_ccmp:
  470. while (cur) {
  471. tmp_next = cdf_nbuf_next(cur);
  472. if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) {
  473. /* CCMP demic failed, discard frags */
  474. ol_rx_frames_free(htt_pdev, frag_list);
  475. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  476. "\n ol_rx_defrag: CCMP demic failed\n");
  477. return;
  478. }
  479. if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) {
  480. /* CCMP decap failed, discard frags */
  481. ol_rx_frames_free(htt_pdev, frag_list);
  482. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  483. "\n ol_rx_defrag: CCMP decap failed\n");
  484. return;
  485. }
  486. cur = tmp_next;
  487. }
  488. break;
  489. case htt_sec_type_wep40:
  490. case htt_sec_type_wep104:
  491. case htt_sec_type_wep128:
  492. while (cur) {
  493. tmp_next = cdf_nbuf_next(cur);
  494. if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) {
  495. /* wep decap failed, discard frags */
  496. ol_rx_frames_free(htt_pdev, frag_list);
  497. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  498. "\n ol_rx_defrag: wep decap failed\n");
  499. return;
  500. }
  501. cur = tmp_next;
  502. }
  503. break;
  504. default:
  505. break;
  506. }
  507. msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space);
  508. if (!msdu)
  509. return;
  510. if (tkip_demic) {
  511. cdf_mem_copy(key,
  512. peer->security[index].michael_key,
  513. sizeof(peer->security[index].michael_key));
  514. if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) {
  515. htt_rx_desc_frame_free(htt_pdev, msdu);
  516. ol_rx_err(pdev->ctrl_pdev,
  517. vdev->vdev_id, peer->mac_addr.raw, tid, 0,
  518. OL_RX_DEFRAG_ERR, msdu, NULL, 0);
  519. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  520. "\n ol_rx_defrag: TKIP demic failed\n");
  521. return;
  522. }
  523. }
  524. wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu);
  525. if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh))
  526. ol_rx_defrag_qos_decap(pdev, msdu, hdr_space);
  527. if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3)
  528. ol_rx_defrag_nwifi_to_8023(pdev, msdu);
  529. ol_rx_fwd_check(vdev, peer, tid, msdu);
  530. }
  531. /*
  532. * Handling TKIP processing for defragmentation
  533. */
  534. int
  535. ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
  536. cdf_nbuf_t msdu, uint16_t hdrlen)
  537. {
  538. uint8_t *ivp, *origHdr;
  539. void *rx_desc_old_position = NULL;
  540. void *ind_old_position = NULL;
  541. int rx_desc_len = 0;
  542. ol_rx_frag_desc_adjust(pdev,
  543. msdu,
  544. &rx_desc_old_position,
  545. &ind_old_position, &rx_desc_len);
  546. /* Header should have extended IV */
  547. origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
  548. ivp = origHdr + hdrlen;
  549. if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
  550. return OL_RX_DEFRAG_ERR;
  551. cdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
  552. cdf_nbuf_pull_head(msdu, f_tkip.ic_header);
  553. cdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
  554. return OL_RX_DEFRAG_OK;
  555. }
  556. /*
  557. * Handling WEP processing for defragmentation
  558. */
  559. int
  560. ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t hdrlen)
  561. {
  562. uint8_t *origHdr;
  563. void *rx_desc_old_position = NULL;
  564. void *ind_old_position = NULL;
  565. int rx_desc_len = 0;
  566. ol_rx_frag_desc_adjust(pdev,
  567. msdu,
  568. &rx_desc_old_position,
  569. &ind_old_position, &rx_desc_len);
  570. origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
  571. cdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
  572. cdf_nbuf_pull_head(msdu, f_wep.ic_header);
  573. cdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
  574. return OL_RX_DEFRAG_OK;
  575. }
  576. /*
  577. * Verify and strip MIC from the frame.
  578. */
  579. int
  580. ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key,
  581. cdf_nbuf_t msdu, uint16_t hdrlen)
  582. {
  583. int status;
  584. uint32_t pktlen;
  585. uint8_t mic[IEEE80211_WEP_MICLEN];
  586. uint8_t mic0[IEEE80211_WEP_MICLEN];
  587. void *rx_desc_old_position = NULL;
  588. void *ind_old_position = NULL;
  589. int rx_desc_len = 0;
  590. ol_rx_frag_desc_adjust(pdev,
  591. msdu,
  592. &rx_desc_old_position,
  593. &ind_old_position, &rx_desc_len);
  594. pktlen = ol_rx_defrag_len(msdu) - rx_desc_len;
  595. status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen,
  596. pktlen - (hdrlen + f_tkip.ic_miclen), mic);
  597. if (status != OL_RX_DEFRAG_OK)
  598. return OL_RX_DEFRAG_ERR;
  599. ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len,
  600. f_tkip.ic_miclen, (caddr_t) mic0);
  601. if (cdf_mem_compare(mic, mic0, f_tkip.ic_miclen))
  602. return OL_RX_DEFRAG_ERR;
  603. cdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen);
  604. return OL_RX_DEFRAG_OK;
  605. }
  606. /*
  607. * Handling CCMP processing for defragmentation
  608. */
  609. int
  610. ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
  611. cdf_nbuf_t nbuf, uint16_t hdrlen)
  612. {
  613. uint8_t *ivp, *origHdr;
  614. void *rx_desc_old_position = NULL;
  615. void *ind_old_position = NULL;
  616. int rx_desc_len = 0;
  617. ol_rx_frag_desc_adjust(pdev,
  618. nbuf,
  619. &rx_desc_old_position,
  620. &ind_old_position, &rx_desc_len);
  621. origHdr = (uint8_t *) (cdf_nbuf_data(nbuf) + rx_desc_len);
  622. ivp = origHdr + hdrlen;
  623. if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
  624. return OL_RX_DEFRAG_ERR;
  625. cdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
  626. cdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
  627. return OL_RX_DEFRAG_OK;
  628. }
  629. /*
  630. * Verify and strip MIC from the frame.
  631. */
  632. int
  633. ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
  634. cdf_nbuf_t wbuf, uint16_t hdrlen)
  635. {
  636. uint8_t *ivp, *origHdr;
  637. void *rx_desc_old_position = NULL;
  638. void *ind_old_position = NULL;
  639. int rx_desc_len = 0;
  640. ol_rx_frag_desc_adjust(pdev,
  641. wbuf,
  642. &rx_desc_old_position,
  643. &ind_old_position, &rx_desc_len);
  644. origHdr = (uint8_t *) (cdf_nbuf_data(wbuf) + rx_desc_len);
  645. ivp = origHdr + hdrlen;
  646. if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
  647. return OL_RX_DEFRAG_ERR;
  648. cdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer);
  649. return OL_RX_DEFRAG_OK;
  650. }
  651. /*
  652. * Craft pseudo header used to calculate the MIC.
  653. */
  654. void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[])
  655. {
  656. const struct ieee80211_frame_addr4 *wh =
  657. (const struct ieee80211_frame_addr4 *)wh0;
  658. switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
  659. case IEEE80211_FC1_DIR_NODS:
  660. DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
  661. DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
  662. wh->i_addr2);
  663. break;
  664. case IEEE80211_FC1_DIR_TODS:
  665. DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
  666. DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
  667. wh->i_addr2);
  668. break;
  669. case IEEE80211_FC1_DIR_FROMDS:
  670. DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
  671. DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
  672. wh->i_addr3);
  673. break;
  674. case IEEE80211_FC1_DIR_DSTODS:
  675. DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
  676. DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
  677. wh->i_addr4);
  678. break;
  679. }
  680. /*
  681. * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
  682. * it could also be set for deauth, disassoc, action, etc. for
  683. * a mgt type frame. It comes into picture for MFP.
  684. */
  685. if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
  686. const struct ieee80211_qosframe *qwh =
  687. (const struct ieee80211_qosframe *)wh;
  688. hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
  689. } else {
  690. hdr[12] = 0;
  691. }
  692. hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
  693. }
  694. /*
  695. * Michael_mic for defragmentation
  696. */
  697. int
  698. ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
  699. const uint8_t *key,
  700. cdf_nbuf_t wbuf,
  701. uint16_t off, uint16_t data_len, uint8_t mic[])
  702. {
  703. uint8_t hdr[16] = { 0, };
  704. uint32_t l, r;
  705. const uint8_t *data;
  706. uint32_t space;
  707. void *rx_desc_old_position = NULL;
  708. void *ind_old_position = NULL;
  709. int rx_desc_len = 0;
  710. ol_rx_frag_desc_adjust(pdev,
  711. wbuf,
  712. &rx_desc_old_position,
  713. &ind_old_position, &rx_desc_len);
  714. ol_rx_defrag_michdr((struct ieee80211_frame *)(cdf_nbuf_data(wbuf) +
  715. rx_desc_len), hdr);
  716. l = get_le32(key);
  717. r = get_le32(key + 4);
  718. /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
  719. l ^= get_le32(hdr);
  720. michael_block(l, r);
  721. l ^= get_le32(&hdr[4]);
  722. michael_block(l, r);
  723. l ^= get_le32(&hdr[8]);
  724. michael_block(l, r);
  725. l ^= get_le32(&hdr[12]);
  726. michael_block(l, r);
  727. /* first buffer has special handling */
  728. data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len + off;
  729. space = ol_rx_defrag_len(wbuf) - rx_desc_len - off;
  730. for (;; ) {
  731. if (space > data_len)
  732. space = data_len;
  733. /* collect 32-bit blocks from current buffer */
  734. while (space >= sizeof(uint32_t)) {
  735. l ^= get_le32(data);
  736. michael_block(l, r);
  737. data += sizeof(uint32_t);
  738. space -= sizeof(uint32_t);
  739. data_len -= sizeof(uint32_t);
  740. }
  741. if (data_len < sizeof(uint32_t))
  742. break;
  743. wbuf = cdf_nbuf_next(wbuf);
  744. if (wbuf == NULL)
  745. return OL_RX_DEFRAG_ERR;
  746. rx_desc_len = 0;
  747. if (space != 0) {
  748. const uint8_t *data_next;
  749. /*
  750. * Block straddles buffers, split references.
  751. */
  752. data_next =
  753. (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
  754. if ((ol_rx_defrag_len(wbuf) - rx_desc_len) <
  755. sizeof(uint32_t) - space) {
  756. return OL_RX_DEFRAG_ERR;
  757. }
  758. switch (space) {
  759. case 1:
  760. l ^= get_le32_split(data[0], data_next[0],
  761. data_next[1], data_next[2]);
  762. data = data_next + 3;
  763. space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
  764. - 3;
  765. break;
  766. case 2:
  767. l ^= get_le32_split(data[0], data[1],
  768. data_next[0], data_next[1]);
  769. data = data_next + 2;
  770. space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
  771. - 2;
  772. break;
  773. case 3:
  774. l ^= get_le32_split(data[0], data[1], data[2],
  775. data_next[0]);
  776. data = data_next + 1;
  777. space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
  778. - 1;
  779. break;
  780. }
  781. michael_block(l, r);
  782. data_len -= sizeof(uint32_t);
  783. } else {
  784. /*
  785. * Setup for next buffer.
  786. */
  787. data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
  788. space = ol_rx_defrag_len(wbuf) - rx_desc_len;
  789. }
  790. }
  791. /* Last block and padding (0x5a, 4..7 x 0) */
  792. switch (data_len) {
  793. case 0:
  794. l ^= get_le32_split(0x5a, 0, 0, 0);
  795. break;
  796. case 1:
  797. l ^= get_le32_split(data[0], 0x5a, 0, 0);
  798. break;
  799. case 2:
  800. l ^= get_le32_split(data[0], data[1], 0x5a, 0);
  801. break;
  802. case 3:
  803. l ^= get_le32_split(data[0], data[1], data[2], 0x5a);
  804. break;
  805. }
  806. michael_block(l, r);
  807. michael_block(l, r);
  808. put_le32(mic, l);
  809. put_le32(mic + 4, r);
  810. return OL_RX_DEFRAG_OK;
  811. }
  812. /*
  813. * Calculate headersize
  814. */
  815. uint16_t ol_rx_frag_hdrsize(const void *data)
  816. {
  817. const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
  818. uint16_t size = sizeof(struct ieee80211_frame);
  819. if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
  820. size += IEEE80211_ADDR_LEN;
  821. if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
  822. size += sizeof(uint16_t);
  823. if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
  824. size += sizeof(struct ieee80211_htc);
  825. }
  826. return size;
  827. }
  828. /*
  829. * Recombine and decap fragments
  830. */
  831. cdf_nbuf_t
  832. ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
  833. cdf_nbuf_t frag_list, uint16_t hdrsize)
  834. {
  835. cdf_nbuf_t tmp;
  836. cdf_nbuf_t msdu = frag_list;
  837. cdf_nbuf_t rx_nbuf = frag_list;
  838. struct ieee80211_frame *wh;
  839. msdu = cdf_nbuf_next(msdu);
  840. cdf_nbuf_set_next(rx_nbuf, NULL);
  841. while (msdu) {
  842. htt_rx_msdu_desc_free(htt_pdev, msdu);
  843. tmp = cdf_nbuf_next(msdu);
  844. cdf_nbuf_set_next(msdu, NULL);
  845. ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize);
  846. if (!ol_rx_defrag_concat(rx_nbuf, msdu)) {
  847. ol_rx_frames_free(htt_pdev, tmp);
  848. htt_rx_desc_frame_free(htt_pdev, rx_nbuf);
  849. cdf_nbuf_free(msdu);
  850. /* msdu rx desc already freed above */
  851. return NULL;
  852. }
  853. msdu = tmp;
  854. }
  855. wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
  856. rx_nbuf);
  857. wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
  858. *(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK;
  859. return rx_nbuf;
  860. }
  861. void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu)
  862. {
  863. struct ieee80211_frame wh;
  864. uint32_t hdrsize;
  865. struct llc_snap_hdr_t llchdr;
  866. struct ethernet_hdr_t *eth_hdr;
  867. void *rx_desc_old_position = NULL;
  868. void *ind_old_position = NULL;
  869. int rx_desc_len = 0;
  870. struct ieee80211_frame *wh_ptr;
  871. ol_rx_frag_desc_adjust(pdev,
  872. msdu,
  873. &rx_desc_old_position,
  874. &ind_old_position, &rx_desc_len);
  875. wh_ptr = (struct ieee80211_frame *)(cdf_nbuf_data(msdu) + rx_desc_len);
  876. cdf_mem_copy(&wh, wh_ptr, sizeof(wh));
  877. hdrsize = sizeof(struct ieee80211_frame);
  878. cdf_mem_copy(&llchdr, ((uint8_t *) (cdf_nbuf_data(msdu) +
  879. rx_desc_len)) + hdrsize,
  880. sizeof(struct llc_snap_hdr_t));
  881. /*
  882. * Now move the data pointer to the beginning of the mac header :
  883. * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
  884. */
  885. cdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
  886. sizeof(struct llc_snap_hdr_t) -
  887. sizeof(struct ethernet_hdr_t)));
  888. eth_hdr = (struct ethernet_hdr_t *)(cdf_nbuf_data(msdu));
  889. switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
  890. case IEEE80211_FC1_DIR_NODS:
  891. cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
  892. IEEE80211_ADDR_LEN);
  893. cdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
  894. break;
  895. case IEEE80211_FC1_DIR_TODS:
  896. cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3,
  897. IEEE80211_ADDR_LEN);
  898. cdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
  899. break;
  900. case IEEE80211_FC1_DIR_FROMDS:
  901. cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
  902. IEEE80211_ADDR_LEN);
  903. cdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, IEEE80211_ADDR_LEN);
  904. break;
  905. case IEEE80211_FC1_DIR_DSTODS:
  906. break;
  907. }
  908. cdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
  909. sizeof(llchdr.ethertype));
  910. }
  911. /*
  912. * Handling QOS for defragmentation
  913. */
  914. void
  915. ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
  916. cdf_nbuf_t nbuf, uint16_t hdrlen)
  917. {
  918. struct ieee80211_frame *wh;
  919. uint16_t qoslen;
  920. void *rx_desc_old_position = NULL;
  921. void *ind_old_position = NULL;
  922. int rx_desc_len = 0;
  923. ol_rx_frag_desc_adjust(pdev,
  924. nbuf,
  925. &rx_desc_old_position,
  926. &ind_old_position, &rx_desc_len);
  927. wh = (struct ieee80211_frame *)(cdf_nbuf_data(nbuf) + rx_desc_len);
  928. if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
  929. qoslen = sizeof(struct ieee80211_qoscntl);
  930. /* Qos frame with Order bit set indicates a HTC frame */
  931. if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
  932. qoslen += sizeof(struct ieee80211_htc);
  933. /* remove QoS filed from header */
  934. hdrlen -= qoslen;
  935. cdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen);
  936. wh = (struct ieee80211_frame *)cdf_nbuf_pull_head(nbuf,
  937. rx_desc_len +
  938. qoslen);
  939. /* clear QoS bit */
  940. /*
  941. * KW# 6154 'cdf_nbuf_pull_head' in turn calls
  942. * __cdf_nbuf_pull_head,
  943. * which returns NULL if there is not sufficient data to pull.
  944. * It's guaranteed that cdf_nbuf_pull_head will succeed rather
  945. * than returning NULL, since the entire rx frame is already
  946. * present in the rx buffer.
  947. * However, to make it obvious to static analyzers that this
  948. * code is safe, add an explicit check that cdf_nbuf_pull_head
  949. * returns a non-NULL value.
  950. * Since this part of the code is not performance-critical,
  951. * adding this explicit check is okay.
  952. */
  953. if (wh)
  954. wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
  955. }
  956. }