dp_rx_defrag.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. /*
  2. * Copyright (c) 2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "hal_api.h"
  22. #include "qdf_trace.h"
  23. #include "qdf_nbuf.h"
  24. #include "dp_rx_defrag.h"
  25. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  26. #include "dp_rx_defrag.h"
  27. const struct dp_rx_defrag_cipher dp_f_ccmp = {
  28. "AES-CCM",
  29. IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
  30. IEEE80211_WEP_MICLEN,
  31. 0,
  32. };
  33. const struct dp_rx_defrag_cipher dp_f_tkip = {
  34. "TKIP",
  35. IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
  36. IEEE80211_WEP_CRCLEN,
  37. IEEE80211_WEP_MICLEN,
  38. };
  39. const struct dp_rx_defrag_cipher dp_f_wep = {
  40. "WEP",
  41. IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
  42. IEEE80211_WEP_CRCLEN,
  43. 0,
  44. };
  45. /*
  46. * dp_rx_defrag_frames_free(): Free fragment chain
  47. * @frames: Fragment chain
  48. *
  49. * Iterates through the fragment chain and frees them
  50. * Returns: None
  51. */
  52. static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
  53. {
  54. qdf_nbuf_t next, frag = frames;
  55. while (frag) {
  56. next = qdf_nbuf_next(frag);
  57. qdf_nbuf_free(frag);
  58. frag = next;
  59. }
  60. }
  61. /*
  62. * dp_rx_clear_saved_desc_info(): Clears descriptor info
  63. * @peer: Pointer to the peer data structure
  64. * @tid: Transmit ID (TID)
  65. *
  66. * Saves MPDU descriptor info and MSDU link pointer from REO
  67. * ring descriptor. The cache is created per peer, per TID
  68. *
  69. * Returns: None
  70. */
  71. static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid)
  72. {
  73. hal_rx_clear_mpdu_desc_info(
  74. &peer->rx_tid[tid].transcap_rx_mpdu_desc_info);
  75. hal_rx_clear_msdu_link_ptr(
  76. &peer->rx_tid[tid].transcap_msdu_link_ptr[0],
  77. HAL_RX_MAX_SAVED_RING_DESC);
  78. }
  79. /*
  80. * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list
  81. * @peer: Pointer to the peer data structure
  82. * @tid: Transmit ID (TID)
  83. *
  84. * Appends per-tid fragments to global fragment wait list
  85. *
  86. * Returns: None
  87. */
  88. static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid)
  89. {
  90. struct dp_soc *psoc = peer->vdev->pdev->soc;
  91. struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid];
  92. /* TODO: use LIST macros instead of TAIL macros */
  93. TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder,
  94. defrag_waitlist_elem);
  95. }
  96. /*
  97. * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist
  98. * @peer: Pointer to the peer data structure
  99. * @tid: Transmit ID (TID)
  100. *
  101. * Remove fragments from waitlist
  102. *
  103. * Returns: None
  104. */
  105. static void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid)
  106. {
  107. struct dp_pdev *pdev = peer->vdev->pdev;
  108. struct dp_soc *soc = pdev->soc;
  109. struct dp_rx_tid *rx_reorder;
  110. if (tid > DP_MAX_TIDS) {
  111. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  112. "TID out of bounds: %d", tid);
  113. qdf_assert(0);
  114. return;
  115. }
  116. rx_reorder = &peer->rx_tid[tid];
  117. if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
  118. TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder,
  119. defrag_waitlist_elem);
  120. rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
  121. rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
  122. } else if (rx_reorder->defrag_waitlist_elem.tqe_prev == NULL) {
  123. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  124. "waitlist->tqe_prev is NULL");
  125. rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
  126. qdf_assert(0);
  127. }
  128. }
  129. /*
  130. * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list
  131. * @peer: Pointer to the peer data structure
  132. * @tid: Transmit ID (TID)
  133. * @head_addr: Pointer to head list
  134. * @tail_addr: Pointer to tail list
  135. * @frag: Incoming fragment
  136. * @all_frag_present: Flag to indicate whether all fragments are received
  137. *
  138. * Build a per-tid, per-sequence fragment list.
  139. *
  140. * Returns: None
  141. */
  142. static void dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid,
  143. qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag,
  144. uint8_t *all_frag_present)
  145. {
  146. qdf_nbuf_t next;
  147. qdf_nbuf_t prev = NULL;
  148. qdf_nbuf_t cur;
  149. uint16_t head_fragno, cur_fragno, next_fragno;
  150. uint8_t last_morefrag = 1, count = 0;
  151. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  152. uint8_t *rx_desc_info;
  153. qdf_assert(frag);
  154. qdf_assert(head_addr);
  155. qdf_assert(tail_addr);
  156. rx_desc_info = qdf_nbuf_data(frag);
  157. cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
  158. /* If this is the first fragment */
  159. if (!(*head_addr)) {
  160. *head_addr = *tail_addr = frag;
  161. qdf_nbuf_set_next(*tail_addr, NULL);
  162. rx_tid->curr_frag_num = cur_fragno;
  163. goto end;
  164. }
  165. /* In sequence fragment */
  166. if (cur_fragno > rx_tid->curr_frag_num) {
  167. qdf_nbuf_set_next(*tail_addr, frag);
  168. *tail_addr = frag;
  169. qdf_nbuf_set_next(*tail_addr, NULL);
  170. rx_tid->curr_frag_num = cur_fragno;
  171. } else {
  172. /* Out of sequence fragment */
  173. cur = *head_addr;
  174. rx_desc_info = qdf_nbuf_data(cur);
  175. head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
  176. if (cur_fragno == head_fragno) {
  177. qdf_nbuf_free(frag);
  178. *all_frag_present = 0;
  179. } else if (head_fragno > cur_fragno) {
  180. qdf_nbuf_set_next(frag, cur);
  181. cur = frag;
  182. *head_addr = frag; /* head pointer to be updated */
  183. } else {
  184. while ((cur_fragno > head_fragno) && cur != NULL) {
  185. prev = cur;
  186. cur = qdf_nbuf_next(cur);
  187. rx_desc_info = qdf_nbuf_data(cur);
  188. head_fragno =
  189. dp_rx_frag_get_mpdu_frag_number(
  190. rx_desc_info);
  191. }
  192. qdf_nbuf_set_next(prev, frag);
  193. qdf_nbuf_set_next(frag, cur);
  194. }
  195. }
  196. next = qdf_nbuf_next(*head_addr);
  197. rx_desc_info = qdf_nbuf_data(*tail_addr);
  198. last_morefrag = hal_rx_get_rx_more_frag_bit(rx_desc_info);
  199. /* TODO: optimize the loop */
  200. if (!last_morefrag) {
  201. /* Check if all fragments are present */
  202. do {
  203. rx_desc_info = qdf_nbuf_data(next);
  204. next_fragno =
  205. dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
  206. count++;
  207. if (next_fragno != count)
  208. break;
  209. next = qdf_nbuf_next(next);
  210. } while (next);
  211. if (!next) {
  212. *all_frag_present = 1;
  213. return;
  214. }
  215. }
  216. end:
  217. *all_frag_present = 0;
  218. }
  219. /*
  220. * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment
  221. * @msdu: Pointer to the fragment
  222. * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
  223. *
  224. * decap tkip encrypted fragment
  225. *
  226. * Returns: QDF_STATUS
  227. */
  228. static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen)
  229. {
  230. uint8_t *ivp, *orig_hdr;
  231. int rx_desc_len = sizeof(struct rx_pkt_tlvs);
  232. /* start of 802.11 header info */
  233. orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
  234. /* TKIP header is located post 802.11 header */
  235. ivp = orig_hdr + hdrlen;
  236. if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
  237. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  238. "IEEE80211_WEP_EXTIV is missing in TKIP fragment");
  239. return QDF_STATUS_E_DEFRAG_ERROR;
  240. }
  241. qdf_mem_move(orig_hdr + dp_f_tkip.ic_header, orig_hdr, hdrlen);
  242. qdf_nbuf_pull_head(msdu, dp_f_tkip.ic_header);
  243. qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
  244. return QDF_STATUS_SUCCESS;
  245. }
  246. /*
  247. * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment
  248. * @nbuf: Pointer to the fragment buffer
  249. * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
  250. *
  251. * Remove MIC information from CCMP fragment
  252. *
  253. * Returns: QDF_STATUS
  254. */
  255. static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen)
  256. {
  257. uint8_t *ivp, *orig_hdr;
  258. int rx_desc_len = sizeof(struct rx_pkt_tlvs);
  259. /* start of the 802.11 header */
  260. orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
  261. /* CCMP header is located after 802.11 header */
  262. ivp = orig_hdr + hdrlen;
  263. if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
  264. return QDF_STATUS_E_DEFRAG_ERROR;
  265. qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
  266. return QDF_STATUS_SUCCESS;
  267. }
  268. /*
  269. * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment
  270. * @nbuf: Pointer to the fragment
  271. * @hdrlen: length of the header information
  272. *
  273. * decap CCMP encrypted fragment
  274. *
  275. * Returns: QDF_STATUS
  276. */
  277. static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen)
  278. {
  279. uint8_t *ivp, *origHdr;
  280. int rx_desc_len = sizeof(struct rx_pkt_tlvs);
  281. origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
  282. ivp = origHdr + hdrlen;
  283. if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
  284. return QDF_STATUS_E_DEFRAG_ERROR;
  285. qdf_mem_move(origHdr + dp_f_ccmp.ic_header, origHdr, hdrlen);
  286. qdf_nbuf_pull_head(nbuf, dp_f_ccmp.ic_header);
  287. return QDF_STATUS_SUCCESS;
  288. }
  289. /*
  290. * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment
  291. * @msdu: Pointer to the fragment
  292. * @hdrlen: length of the header information
  293. *
  294. * decap WEP encrypted fragment
  295. *
  296. * Returns: QDF_STATUS
  297. */
  298. static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen)
  299. {
  300. uint8_t *origHdr;
  301. int rx_desc_len = sizeof(struct rx_pkt_tlvs);
  302. origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
  303. qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
  304. qdf_nbuf_pull_head(msdu, dp_f_wep.ic_header);
  305. qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
  306. return QDF_STATUS_SUCCESS;
  307. }
  308. /*
  309. * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment
  310. * @nbuf: Pointer to the fragment
  311. *
  312. * Calculate the header size of the received fragment
  313. *
  314. * Returns: header size (uint16_t)
  315. */
  316. static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf)
  317. {
  318. uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
  319. uint16_t size = sizeof(struct ieee80211_frame);
  320. uint16_t fc = 0;
  321. uint32_t to_ds, fr_ds;
  322. uint8_t frm_ctrl_valid;
  323. uint16_t frm_ctrl_field;
  324. to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
  325. fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
  326. frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr);
  327. frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr);
  328. if (to_ds && fr_ds)
  329. size += IEEE80211_ADDR_LEN;
  330. if (frm_ctrl_valid) {
  331. fc = frm_ctrl_field;
  332. /* use 1-st byte for validation */
  333. if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
  334. size += sizeof(uint16_t);
  335. /* use 2-nd byte for validation */
  336. if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
  337. size += sizeof(struct ieee80211_htc);
  338. }
  339. }
  340. return size;
  341. }
  342. /*
  343. * dp_rx_defrag_michdr(): Calculate a psuedo MIC header
  344. * @wh0: Pointer to the wireless header of the fragment
  345. * @hdr: Array to hold the psuedo header
  346. *
  347. * Calculate a psuedo MIC header
  348. *
  349. * Returns: None
  350. */
  351. static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
  352. uint8_t hdr[])
  353. {
  354. const struct ieee80211_frame_addr4 *wh =
  355. (const struct ieee80211_frame_addr4 *)wh0;
  356. switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
  357. case IEEE80211_FC1_DIR_NODS:
  358. DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
  359. DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
  360. wh->i_addr2);
  361. break;
  362. case IEEE80211_FC1_DIR_TODS:
  363. DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
  364. DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
  365. wh->i_addr2);
  366. break;
  367. case IEEE80211_FC1_DIR_FROMDS:
  368. DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
  369. DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
  370. wh->i_addr3);
  371. break;
  372. case IEEE80211_FC1_DIR_DSTODS:
  373. DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
  374. DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
  375. wh->i_addr4);
  376. break;
  377. }
  378. /*
  379. * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
  380. * it could also be set for deauth, disassoc, action, etc. for
  381. * a mgt type frame. It comes into picture for MFP.
  382. */
  383. if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
  384. const struct ieee80211_qosframe *qwh =
  385. (const struct ieee80211_qosframe *)wh;
  386. hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
  387. } else {
  388. hdr[12] = 0;
  389. }
  390. hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
  391. }
  392. /*
  393. * dp_rx_defrag_mic(): Calculate MIC header
  394. * @key: Pointer to the key
  395. * @wbuf: fragment buffer
  396. * @off: Offset
  397. * @data_len: Data lengh
  398. * @mic: Array to hold MIC
  399. *
  400. * Calculate a psuedo MIC header
  401. *
  402. * Returns: QDF_STATUS
  403. */
  404. static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf,
  405. uint16_t off, uint16_t data_len, uint8_t mic[])
  406. {
  407. uint8_t hdr[16] = { 0, };
  408. uint32_t l, r;
  409. const uint8_t *data;
  410. uint32_t space;
  411. int rx_desc_len = sizeof(struct rx_pkt_tlvs);
  412. dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
  413. + rx_desc_len), hdr);
  414. l = dp_rx_get_le32(key);
  415. r = dp_rx_get_le32(key + 4);
  416. /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
  417. l ^= dp_rx_get_le32(hdr);
  418. dp_rx_michael_block(l, r);
  419. l ^= dp_rx_get_le32(&hdr[4]);
  420. dp_rx_michael_block(l, r);
  421. l ^= dp_rx_get_le32(&hdr[8]);
  422. dp_rx_michael_block(l, r);
  423. l ^= dp_rx_get_le32(&hdr[12]);
  424. dp_rx_michael_block(l, r);
  425. /* first buffer has special handling */
  426. data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off;
  427. space = qdf_nbuf_len(wbuf) - rx_desc_len - off;
  428. for (;; ) {
  429. if (space > data_len)
  430. space = data_len;
  431. /* collect 32-bit blocks from current buffer */
  432. while (space >= sizeof(uint32_t)) {
  433. l ^= dp_rx_get_le32(data);
  434. dp_rx_michael_block(l, r);
  435. data += sizeof(uint32_t);
  436. space -= sizeof(uint32_t);
  437. data_len -= sizeof(uint32_t);
  438. }
  439. if (data_len < sizeof(uint32_t))
  440. break;
  441. wbuf = qdf_nbuf_next(wbuf);
  442. if (wbuf == NULL)
  443. return QDF_STATUS_E_DEFRAG_ERROR;
  444. if (space != 0) {
  445. const uint8_t *data_next;
  446. /*
  447. * Block straddles buffers, split references.
  448. */
  449. data_next =
  450. (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
  451. if ((qdf_nbuf_len(wbuf) - rx_desc_len) <
  452. sizeof(uint32_t) - space) {
  453. return QDF_STATUS_E_DEFRAG_ERROR;
  454. }
  455. switch (space) {
  456. case 1:
  457. l ^= dp_rx_get_le32_split(data[0],
  458. data_next[0], data_next[1],
  459. data_next[2]);
  460. data = data_next + 3;
  461. space = (qdf_nbuf_len(wbuf) - rx_desc_len)
  462. - 3;
  463. break;
  464. case 2:
  465. l ^= dp_rx_get_le32_split(data[0], data[1],
  466. data_next[0], data_next[1]);
  467. data = data_next + 2;
  468. space = (qdf_nbuf_len(wbuf) - rx_desc_len)
  469. - 2;
  470. break;
  471. case 3:
  472. l ^= dp_rx_get_le32_split(data[0], data[1],
  473. data[2], data_next[0]);
  474. data = data_next + 1;
  475. space = (qdf_nbuf_len(wbuf) - rx_desc_len)
  476. - 1;
  477. break;
  478. }
  479. dp_rx_michael_block(l, r);
  480. data_len -= sizeof(uint32_t);
  481. } else {
  482. /*
  483. * Setup for next buffer.
  484. */
  485. data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
  486. space = qdf_nbuf_len(wbuf) - rx_desc_len;
  487. }
  488. }
  489. /* Last block and padding (0x5a, 4..7 x 0) */
  490. switch (data_len) {
  491. case 0:
  492. l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
  493. break;
  494. case 1:
  495. l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
  496. break;
  497. case 2:
  498. l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
  499. break;
  500. case 3:
  501. l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
  502. break;
  503. }
  504. dp_rx_michael_block(l, r);
  505. dp_rx_michael_block(l, r);
  506. dp_rx_put_le32(mic, l);
  507. dp_rx_put_le32(mic + 4, r);
  508. return QDF_STATUS_SUCCESS;
  509. }
  510. /*
  511. * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame
  512. * @key: Pointer to the key
  513. * @msdu: fragment buffer
  514. * @hdrlen: Length of the header information
  515. *
  516. * Remove MIC information from the TKIP frame
  517. *
  518. * Returns: QDF_STATUS
  519. */
  520. static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key,
  521. qdf_nbuf_t msdu, uint16_t hdrlen)
  522. {
  523. QDF_STATUS status;
  524. uint32_t pktlen;
  525. uint8_t mic[IEEE80211_WEP_MICLEN];
  526. uint8_t mic0[IEEE80211_WEP_MICLEN];
  527. int rx_desc_len = sizeof(struct rx_pkt_tlvs);
  528. pktlen = qdf_nbuf_len(msdu) - rx_desc_len;
  529. status = dp_rx_defrag_mic(key, msdu, hdrlen,
  530. pktlen - (hdrlen + dp_f_tkip.ic_miclen), mic);
  531. if (QDF_IS_STATUS_ERROR(status))
  532. return status;
  533. qdf_nbuf_copy_bits(msdu, pktlen - dp_f_tkip.ic_miclen + rx_desc_len,
  534. dp_f_tkip.ic_miclen, (caddr_t)mic0);
  535. if (!qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
  536. return QDF_STATUS_E_DEFRAG_ERROR;
  537. qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_miclen);
  538. return QDF_STATUS_SUCCESS;
  539. }
  540. /*
  541. * dp_rx_defrag_decap_recombine(): Recombine the fragments
  542. * @peer: Pointer to the peer
  543. * @frag_list: list of fragments
  544. * @tid: Transmit identifier
  545. * @hdrsize: Header size
  546. *
  547. * Recombine fragments
  548. *
  549. * Returns: QDF_STATUS
  550. */
  551. static QDF_STATUS dp_rx_defrag_decap_recombine(struct dp_peer *peer,
  552. qdf_nbuf_t head_msdu, unsigned tid, uint16_t hdrsize)
  553. {
  554. qdf_nbuf_t msdu = head_msdu;
  555. uint8_t i;
  556. uint8_t num_ring_desc_saved = peer->rx_tid[tid].curr_ring_desc_idx;
  557. uint8_t num_msdus;
  558. /* Stitch fragments together */
  559. for (i = 0; (i < num_ring_desc_saved) && msdu; i++) {
  560. struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info =
  561. &peer->rx_tid[tid].transcap_msdu_link_ptr[i];
  562. struct hal_rx_mpdu_desc_info *mpdu_desc_info =
  563. &peer->rx_tid[tid].transcap_rx_mpdu_desc_info;
  564. num_msdus = hal_rx_chain_msdu_links(msdu, msdu_link_ptr_info,
  565. mpdu_desc_info);
  566. msdu = qdf_nbuf_next(msdu);
  567. }
  568. return QDF_STATUS_SUCCESS;
  569. }
  570. /**
  571. * dp_rx_defrag_err() - rx err handler
  572. * @pdev: handle to pdev object
  573. * @vdev_id: vdev id
  574. * @peer_mac_addr: peer mac address
  575. * @tid: TID
  576. * @tsf32: TSF
  577. * @err_type: error type
  578. * @rx_frame: rx frame
  579. * @pn: PN Number
  580. * @key_id: key id
  581. *
  582. * This function handles rx error and send MIC error notification
  583. *
  584. * Return: None
  585. */
  586. static void dp_rx_defrag_err(uint8_t vdev_id, uint8_t *peer_mac_addr,
  587. int tid, uint32_t tsf32, uint32_t err_type, qdf_nbuf_t rx_frame,
  588. uint64_t *pn, uint8_t key_id)
  589. {
  590. /* TODO: Who needs to know about the TKIP MIC error */
  591. }
  592. /*
  593. * dp_rx_defrag_qos_decap(): Remove QOS header from the frame
  594. * @nbuf: Pointer to the frame buffer
  595. * @hdrlen: Length of the header information
  596. *
  597. * Recombine fragments
  598. *
  599. * Returns: None
  600. */
  601. static void dp_rx_defrag_qos_decap(qdf_nbuf_t nbuf, uint16_t hdrlen)
  602. {
  603. struct ieee80211_frame *wh;
  604. uint16_t qoslen;
  605. int pkt_tlv_size = sizeof(struct rx_pkt_tlvs); /* pkt TLV hdr size */
  606. uint16_t fc = 0;
  607. uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
  608. /* Get the frame control field if it is valid */
  609. if (hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr))
  610. fc = hal_rx_get_frame_ctrl_field(rx_tlv_hdr);
  611. wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + pkt_tlv_size);
  612. if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
  613. qoslen = sizeof(struct ieee80211_qoscntl);
  614. /* Qos frame with Order bit set indicates a HTC frame */
  615. if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
  616. qoslen += sizeof(struct ieee80211_htc);
  617. /* remove QoS field from header */
  618. hdrlen -= qoslen;
  619. qdf_mem_move((uint8_t *)wh + qoslen, wh, hdrlen);
  620. wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf,
  621. pkt_tlv_size +
  622. qoslen);
  623. /* clear QoS bit */
  624. if (wh)
  625. wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
  626. }
  627. }
  628. /*
  629. * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3
  630. * @msdu: Pointer to the fragment buffer
  631. *
  632. * Transcap the fragment from 802.11 to 802.3
  633. *
  634. * Returns: None
  635. */
  636. static void dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t msdu)
  637. {
  638. struct ieee80211_frame wh;
  639. uint32_t hdrsize;
  640. struct llc_snap_hdr_t llchdr;
  641. struct ethernet_hdr_t *eth_hdr;
  642. int rx_desc_len = sizeof(struct rx_pkt_tlvs);
  643. struct ieee80211_frame *wh_ptr;
  644. wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) +
  645. rx_desc_len);
  646. qdf_mem_copy(&wh, wh_ptr, sizeof(wh));
  647. hdrsize = sizeof(struct ieee80211_frame);
  648. qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) +
  649. rx_desc_len)) + hdrsize,
  650. sizeof(struct llc_snap_hdr_t));
  651. /*
  652. * Now move the data pointer to the beginning of the mac header :
  653. * new-header = old-hdr + (wifihdrsize + llchdrsize - ethhdrsize)
  654. */
  655. qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
  656. sizeof(struct llc_snap_hdr_t) -
  657. sizeof(struct ethernet_hdr_t)));
  658. eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu));
  659. switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
  660. case IEEE80211_FC1_DIR_NODS:
  661. qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
  662. IEEE80211_ADDR_LEN);
  663. qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2,
  664. IEEE80211_ADDR_LEN);
  665. break;
  666. case IEEE80211_FC1_DIR_TODS:
  667. qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3,
  668. IEEE80211_ADDR_LEN);
  669. qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2,
  670. IEEE80211_ADDR_LEN);
  671. break;
  672. case IEEE80211_FC1_DIR_FROMDS:
  673. qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
  674. IEEE80211_ADDR_LEN);
  675. qdf_mem_copy(eth_hdr->src_addr, wh.i_addr3,
  676. IEEE80211_ADDR_LEN);
  677. break;
  678. case IEEE80211_FC1_DIR_DSTODS:
  679. break;
  680. }
  681. /* TODO: Is it requried to copy rx_pkt_tlvs
  682. * to the start of data buffer?
  683. */
  684. qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
  685. sizeof(llchdr.ethertype));
  686. }
  687. /*
  688. * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
  689. * @peer: Pointer to the peer
  690. * @tid: Transmit Identifier
  691. *
  692. * Reinject the fragment chain back into REO
  693. *
  694. * Returns: QDF_STATUS
  695. */
  696. static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
  697. unsigned tid)
  698. {
  699. struct dp_pdev *pdev = peer->vdev->pdev;
  700. struct dp_soc *soc = pdev->soc;
  701. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  702. void *ring_desc;
  703. enum hal_reo_error_status error;
  704. struct hal_rx_mpdu_desc_info *saved_mpdu_desc_info;
  705. void *hal_srng = soc->reo_reinject_ring.hal_srng;
  706. struct hal_rx_msdu_link_ptr_info *saved_msdu_link_ptr;
  707. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  708. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  709. "HAL RING Access For WBM Release SRNG Failed: %p",
  710. hal_srng);
  711. goto done;
  712. }
  713. ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
  714. qdf_assert(ring_desc);
  715. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  716. if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
  717. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  718. "HAL RING 0x%p:error %d", hal_srng, error);
  719. /* Don't know how to deal with this condition -- assert */
  720. qdf_assert(0);
  721. goto done;
  722. }
  723. saved_mpdu_desc_info =
  724. &peer->rx_tid[tid].transcap_rx_mpdu_desc_info;
  725. /* first msdu link pointer */
  726. saved_msdu_link_ptr =
  727. &peer->rx_tid[tid].transcap_msdu_link_ptr[0];
  728. hal_rx_defrag_update_src_ring_desc(ring_desc,
  729. saved_mpdu_desc_info, saved_msdu_link_ptr);
  730. status = QDF_STATUS_SUCCESS;
  731. done:
  732. hal_srng_access_end(soc->hal_soc, hal_srng);
  733. return status;
  734. }
  735. /*
  736. * dp_rx_defrag(): Defragment the fragment chain
  737. * @peer: Pointer to the peer
  738. * @tid: Transmit Identifier
  739. * @frag_list: Pointer to head list
  740. * @frag_list_tail: Pointer to tail list
  741. *
  742. * Defragment the fragment chain
  743. *
  744. * Returns: QDF_STATUS
  745. */
  746. static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid,
  747. qdf_nbuf_t frag_list, qdf_nbuf_t frag_list_tail)
  748. {
  749. qdf_nbuf_t tmp_next;
  750. qdf_nbuf_t cur = frag_list, msdu;
  751. uint32_t index, tkip_demic = 0;
  752. uint16_t hdr_space;
  753. QDF_STATUS status;
  754. uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
  755. struct dp_vdev *vdev = peer->vdev;
  756. cur = frag_list;
  757. hdr_space = dp_rx_defrag_hdrsize(cur);
  758. index = hal_rx_msdu_is_wlan_mcast(cur) ?
  759. dp_sec_mcast : dp_sec_ucast;
  760. switch (peer->security[index].sec_type) {
  761. case htt_sec_type_tkip:
  762. tkip_demic = 1;
  763. case htt_sec_type_tkip_nomic:
  764. while (cur) {
  765. tmp_next = qdf_nbuf_next(cur);
  766. if (dp_rx_defrag_tkip_decap(cur, hdr_space)) {
  767. /* TKIP decap failed, discard frags */
  768. dp_rx_defrag_frames_free(frag_list);
  769. QDF_TRACE(QDF_MODULE_ID_TXRX,
  770. QDF_TRACE_LEVEL_ERROR,
  771. "dp_rx_defrag: TKIP decap failed");
  772. return QDF_STATUS_E_DEFRAG_ERROR;
  773. }
  774. cur = tmp_next;
  775. }
  776. break;
  777. case htt_sec_type_aes_ccmp:
  778. while (cur) {
  779. tmp_next = qdf_nbuf_next(cur);
  780. if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) {
  781. /* CCMP demic failed, discard frags */
  782. dp_rx_defrag_frames_free(frag_list);
  783. QDF_TRACE(QDF_MODULE_ID_TXRX,
  784. QDF_TRACE_LEVEL_ERROR,
  785. "dp_rx_defrag: CCMP demic failed");
  786. return QDF_STATUS_E_DEFRAG_ERROR;
  787. }
  788. if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) {
  789. /* CCMP decap failed, discard frags */
  790. dp_rx_defrag_frames_free(frag_list);
  791. QDF_TRACE(QDF_MODULE_ID_TXRX,
  792. QDF_TRACE_LEVEL_ERROR,
  793. "dp_rx_defrag: CCMP decap failed");
  794. return QDF_STATUS_E_DEFRAG_ERROR;
  795. }
  796. cur = tmp_next;
  797. }
  798. break;
  799. case htt_sec_type_wep40:
  800. case htt_sec_type_wep104:
  801. case htt_sec_type_wep128:
  802. while (cur) {
  803. tmp_next = qdf_nbuf_next(cur);
  804. if (dp_rx_defrag_wep_decap(cur, hdr_space)) {
  805. /* WEP decap failed, discard frags */
  806. dp_rx_defrag_frames_free(frag_list);
  807. QDF_TRACE(QDF_MODULE_ID_TXRX,
  808. QDF_TRACE_LEVEL_ERROR,
  809. "dp_rx_defrag: WEP decap failed");
  810. return QDF_STATUS_E_DEFRAG_ERROR;
  811. }
  812. cur = tmp_next;
  813. }
  814. break;
  815. default:
  816. QDF_TRACE(QDF_MODULE_ID_TXRX,
  817. QDF_TRACE_LEVEL_ERROR,
  818. "dp_rx_defrag: Did not match any security type");
  819. break;
  820. }
  821. if (tkip_demic) {
  822. msdu = frag_list_tail; /* Only last fragment has the MIC */
  823. qdf_mem_copy(key,
  824. peer->security[index].michael_key,
  825. sizeof(peer->security[index].michael_key));
  826. if (dp_rx_defrag_tkip_demic(key, msdu, hdr_space)) {
  827. qdf_nbuf_free(msdu);
  828. dp_rx_defrag_err(vdev->vdev_id, peer->mac_addr.raw,
  829. tid, 0, QDF_STATUS_E_DEFRAG_ERROR, msdu,
  830. NULL, 0);
  831. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  832. "dp_rx_defrag: TKIP demic failed");
  833. return QDF_STATUS_E_DEFRAG_ERROR;
  834. }
  835. }
  836. dp_rx_defrag_qos_decap(cur, hdr_space);
  837. /* Convert the header to 802.3 header */
  838. dp_rx_defrag_nwifi_to_8023(cur);
  839. status = dp_rx_defrag_decap_recombine(peer, cur, tid, hdr_space);
  840. if (QDF_IS_STATUS_ERROR(status)) {
  841. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  842. "dp_rx_defrag_decap_recombine failed");
  843. qdf_assert(0);
  844. }
  845. return status;
  846. }
  847. /*
  848. * dp_rx_defrag_cleanup(): Clean up activities
  849. * @peer: Pointer to the peer
  850. * @tid: Transmit Identifier
  851. * @seq: Sequence number
  852. *
  853. * Returns: None
  854. */
  855. static void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid,
  856. uint16_t seq)
  857. {
  858. struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
  859. &peer->rx_tid[tid].array[seq];
  860. /* Free up nbufs */
  861. dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
  862. /* Free up saved ring descriptors */
  863. dp_rx_clear_saved_desc_info(peer, tid);
  864. rx_reorder_array_elem->head = NULL;
  865. rx_reorder_array_elem->tail = NULL;
  866. peer->rx_tid[tid].defrag_timeout_ms = 0;
  867. peer->rx_tid[tid].curr_frag_num = 0;
  868. peer->rx_tid[tid].curr_seq_num = 0;
  869. peer->rx_tid[tid].curr_ring_desc_idx = 0;
  870. }
  871. /*
  872. * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor
  873. * @ring_desc: Pointer to the ring descriptor
  874. * @peer: Pointer to the peer
  875. * @tid: Transmit Identifier
  876. * @mpdu_desc_info: MPDU descriptor info
  877. *
  878. * Returns: None
  879. */
  880. static void dp_rx_defrag_save_info_from_ring_desc(void *ring_desc,
  881. struct dp_peer *peer, unsigned tid,
  882. struct hal_rx_mpdu_desc_info *mpdu_desc_info)
  883. {
  884. struct dp_pdev *pdev = peer->vdev->pdev;
  885. void *msdu_link_desc_va = NULL;
  886. uint8_t idx = peer->rx_tid[tid].curr_ring_desc_idx;
  887. uint8_t rbm;
  888. struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info =
  889. &peer->rx_tid[tid].transcap_msdu_link_ptr[++idx];
  890. struct hal_rx_mpdu_desc_info *tmp_mpdu_desc_info =
  891. &peer->rx_tid[tid].transcap_rx_mpdu_desc_info;
  892. struct hal_buf_info hbi;
  893. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  894. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  895. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  896. "Invalid RBM while chaining frag MSDUs");
  897. return;
  898. }
  899. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  900. msdu_link_desc_va =
  901. dp_rx_cookie_2_link_desc_va(pdev->soc, &hbi);
  902. hal_rx_defrag_save_info_from_ring_desc(msdu_link_desc_va,
  903. msdu_link_ptr_info, &hbi);
  904. qdf_mem_copy(tmp_mpdu_desc_info, mpdu_desc_info,
  905. sizeof(*tmp_mpdu_desc_info));
  906. }
  907. /*
  908. * dp_rx_defrag_store_fragment(): Store incoming fragments
  909. * @soc: Pointer to the SOC data structure
  910. * @ring_desc: Pointer to the ring descriptor
  911. * @mpdu_desc_info: MPDU descriptor info
  912. * @msdu_info: Pointer to MSDU descriptor info
  913. * @tid: Traffic Identifier
  914. * @rx_desc: Pointer to rx descriptor
  915. *
  916. * Returns: QDF_STATUS
  917. */
  918. static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc,
  919. void *ring_desc,
  920. union dp_rx_desc_list_elem_t **head,
  921. union dp_rx_desc_list_elem_t **tail,
  922. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  923. struct hal_rx_msdu_desc_info *msdu_info,
  924. unsigned tid, struct dp_rx_desc *rx_desc)
  925. {
  926. uint8_t idx;
  927. struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
  928. struct dp_pdev *pdev;
  929. struct dp_peer *peer;
  930. uint16_t peer_id;
  931. uint16_t rxseq, seq;
  932. uint8_t fragno, more_frag, all_frag_present = 0;
  933. uint16_t seq_num = mpdu_desc_info->mpdu_seq;
  934. QDF_STATUS status;
  935. struct dp_rx_tid *rx_tid;
  936. uint8_t mpdu_sequence_control_valid;
  937. uint8_t mpdu_frame_control_valid;
  938. qdf_nbuf_t frag = rx_desc->nbuf;
  939. uint8_t *rx_desc_info;
  940. /* Check if the packet is from a valid peer */
  941. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  942. mpdu_desc_info->peer_meta_data);
  943. peer = dp_peer_find_by_id(soc, peer_id);
  944. if (!peer) {
  945. /* We should not recieve anything from unknown peer
  946. * however, that might happen while we are in the monitor mode.
  947. * We don't need to handle that here
  948. */
  949. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  950. "Unknown peer, dropping the fragment");
  951. qdf_nbuf_free(frag);
  952. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  953. return QDF_STATUS_E_DEFRAG_ERROR;
  954. }
  955. pdev = peer->vdev->pdev;
  956. rx_tid = &peer->rx_tid[tid];
  957. seq = seq_num & (peer->rx_tid[tid].ba_win_size - 1);
  958. qdf_assert(seq == 0);
  959. rx_reorder_array_elem = &peer->rx_tid[tid].array[seq];
  960. rx_desc_info = qdf_nbuf_data(frag);
  961. mpdu_sequence_control_valid =
  962. hal_rx_get_mpdu_sequence_control_valid(rx_desc_info);
  963. /* Invalid MPDU sequence control field, MPDU is of no use */
  964. if (!mpdu_sequence_control_valid) {
  965. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  966. "Invalid MPDU seq control field, dropping MPDU");
  967. qdf_nbuf_free(frag);
  968. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  969. qdf_assert(0);
  970. goto end;
  971. }
  972. mpdu_frame_control_valid =
  973. hal_rx_get_mpdu_frame_control_valid(rx_desc_info);
  974. /* Invalid frame control field */
  975. if (!mpdu_frame_control_valid) {
  976. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  977. "Invalid frame control field, dropping MPDU");
  978. qdf_nbuf_free(frag);
  979. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  980. qdf_assert(0);
  981. goto end;
  982. }
  983. /* Current mpdu sequence */
  984. rxseq = hal_rx_get_rx_sequence(rx_desc_info);
  985. more_frag = hal_rx_get_rx_more_frag_bit(rx_desc_info);
  986. /* HW does not populate the fragment number as of now
  987. * need to get from the 802.11 header
  988. */
  989. fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
  990. /*
  991. * !more_frag: no more fragments to be delivered
  992. * !frag_no: packet is not fragmented
  993. * !rx_reorder_array_elem->head: no saved fragments so far
  994. */
  995. if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
  996. /* We should not get into this situation here.
  997. * It means an unfragmented packet with fragment flag
  998. * is delivered over the REO exception ring.
  999. * Typically it follows normal rx path.
  1000. */
  1001. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1002. "Rcvd unfragmented pkt on REO Err srng, dropping");
  1003. qdf_nbuf_free(frag);
  1004. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  1005. qdf_assert(0);
  1006. goto end;
  1007. }
  1008. /* Check if the fragment is for the same sequence or a different one */
  1009. if (rx_reorder_array_elem->head) {
  1010. if (rxseq != rx_tid->curr_seq_num) {
  1011. /* Drop stored fragments if out of sequence
  1012. * fragment is received
  1013. */
  1014. dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
  1015. rx_reorder_array_elem->head = NULL;
  1016. rx_reorder_array_elem->tail = NULL;
  1017. /*
  1018. * The sequence number for this fragment becomes the
  1019. * new sequence number to be processed
  1020. */
  1021. rx_tid->curr_seq_num = rxseq;
  1022. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1023. "%s mismatch, dropping earlier sequence ",
  1024. (rxseq == rx_tid->curr_seq_num)
  1025. ? "address"
  1026. : "seq number");
  1027. }
  1028. } else {
  1029. /* Start of a new sequence */
  1030. rx_tid->curr_seq_num = rxseq;
  1031. }
  1032. /*
  1033. * If the earlier sequence was dropped, this will be the fresh start.
  1034. * Else, continue with next fragment in a given sequence
  1035. */
  1036. dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head,
  1037. &rx_reorder_array_elem->tail, frag,
  1038. &all_frag_present);
  1039. /*
  1040. * Currently, we can have only 6 MSDUs per-MPDU, if the current
  1041. * packet sequence has more than 6 MSDUs for some reason, we will
  1042. * have to use the next MSDU link descriptor and chain them together
  1043. * before reinjection
  1044. */
  1045. if (more_frag == 0 || fragno == HAL_RX_NUM_MSDU_DESC) {
  1046. /*
  1047. * Deep copy of MSDU link pointer and msdu descriptor structs
  1048. */
  1049. idx = peer->rx_tid[tid].curr_ring_desc_idx;
  1050. if (idx < HAL_RX_MAX_SAVED_RING_DESC) {
  1051. dp_rx_defrag_save_info_from_ring_desc(ring_desc,
  1052. peer, tid, mpdu_desc_info);
  1053. peer->rx_tid[tid].curr_ring_desc_idx++;
  1054. } else {
  1055. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1056. "Max ring descr saved, dropping fragment");
  1057. /*
  1058. * Free up saved fragments and ring descriptors if any
  1059. */
  1060. goto end;
  1061. }
  1062. }
  1063. /* TODO: handle fragment timeout gracefully */
  1064. if (pdev->soc->rx.flags.defrag_timeout_check) {
  1065. dp_rx_defrag_waitlist_remove(peer, tid);
  1066. goto end;
  1067. }
  1068. /* Yet to receive more fragments for this sequence number */
  1069. if (!all_frag_present) {
  1070. uint32_t now_ms =
  1071. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1072. peer->rx_tid[tid].defrag_timeout_ms =
  1073. now_ms + pdev->soc->rx.defrag.timeout_ms;
  1074. dp_rx_defrag_waitlist_add(peer, tid);
  1075. goto end;
  1076. }
  1077. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  1078. "All fragments received for sequence: %d", rxseq);
  1079. /* Process the fragments */
  1080. status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
  1081. rx_reorder_array_elem->tail);
  1082. if (QDF_IS_STATUS_ERROR(status)) {
  1083. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1084. "Fragment processing failed");
  1085. goto end;
  1086. }
  1087. /* Re-inject the fragments back to REO for further processing */
  1088. status = dp_rx_defrag_reo_reinject(peer, tid);
  1089. if (QDF_IS_STATUS_SUCCESS(status))
  1090. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  1091. "Fragmented sequence successfully reinjected");
  1092. else
  1093. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1094. "Fragmented sequence reinjection failed");
  1095. end:
  1096. dp_rx_defrag_cleanup(peer, tid, seq);
  1097. return QDF_STATUS_E_DEFRAG_ERROR;
  1098. }
  1099. /**
  1100. * dp_rx_frag_handle() - Handles fragmented Rx frames
  1101. *
  1102. * @soc: core txrx main context
  1103. * @ring_desc: opaque pointer to the REO error ring descriptor
  1104. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  1105. * @head: head of the local descriptor free-list
  1106. * @tail: tail of the local descriptor free-list
  1107. * @quota: No. of units (packets) that can be serviced in one shot.
  1108. *
  1109. * This function implements RX 802.11 fragmentation handling
  1110. * The handling is mostly same as legacy fragmentation handling.
  1111. * If required, this function can re-inject the frames back to
  1112. * REO ring (with proper setting to by-pass fragmentation check
  1113. * but use duplicate detection / re-ordering and routing these frames
  1114. * to a different core.
  1115. *
  1116. * Return: uint32_t: No. of elements processed
  1117. */
  1118. uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
  1119. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  1120. union dp_rx_desc_list_elem_t **head,
  1121. union dp_rx_desc_list_elem_t **tail,
  1122. uint32_t quota)
  1123. {
  1124. uint32_t rx_bufs_used = 0;
  1125. void *link_desc_va;
  1126. struct hal_buf_info buf_info;
  1127. struct hal_rx_msdu_list msdu_list; /* per MPDU list of MSDUs */
  1128. uint32_t tid;
  1129. int idx;
  1130. QDF_STATUS status;
  1131. qdf_assert(soc);
  1132. qdf_assert(mpdu_desc_info);
  1133. /* Fragment from a valid peer */
  1134. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  1135. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1136. qdf_assert(link_desc_va);
  1137. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  1138. "Number of MSDUs to process, num_msdus: %d",
  1139. mpdu_desc_info->msdu_count);
  1140. if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
  1141. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1142. "Not sufficient MSDUs to process");
  1143. return rx_bufs_used;
  1144. }
  1145. /* Get msdu_list for the given MPDU */
  1146. hal_rx_msdu_list_get(link_desc_va, &msdu_list,
  1147. mpdu_desc_info->msdu_count);
  1148. /* Process all MSDUs in the current MPDU */
  1149. for (idx = 0; (idx < mpdu_desc_info->msdu_count) && quota--; idx++) {
  1150. struct dp_rx_desc *rx_desc =
  1151. dp_rx_cookie_2_va_rxdma_buf(soc,
  1152. msdu_list.sw_cookie[idx]);
  1153. qdf_assert(rx_desc);
  1154. tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start);
  1155. /* Process fragment-by-fragment */
  1156. status = dp_rx_defrag_store_fragment(soc, ring_desc,
  1157. head, tail, mpdu_desc_info,
  1158. &msdu_list.msdu_info[idx], tid,
  1159. rx_desc);
  1160. if (QDF_IS_STATUS_SUCCESS(status))
  1161. rx_bufs_used++;
  1162. else
  1163. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1164. "Rx Defragmentation error. mpdu_seq: 0x%x msdu_count: %d mpdu_flags: %d",
  1165. mpdu_desc_info->mpdu_seq, mpdu_desc_info->msdu_count,
  1166. mpdu_desc_info->mpdu_flags);
  1167. }
  1168. return rx_bufs_used;
  1169. }