htt_rx.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489
  1. /*
  2. * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * @file htt_rx.c
  28. * @brief Implement receive aspects of HTT.
  29. * @details
  30. * This file contains three categories of HTT rx code:
  31. * 1. An abstraction of the rx descriptor, to hide the
  32. * differences between the HL vs. LL rx descriptor.
  33. * 2. Functions for providing access to the (series of)
  34. * rx descriptor(s) and rx frame(s) associated with
  35. * an rx indication message.
  36. * 3. Functions for setting up and using the MAC DMA
  37. * rx ring (applies to LL only).
  38. */
  39. #include <cdf_memory.h> /* cdf_mem_malloc,free, etc. */
  40. #include <cdf_types.h> /* cdf_print, bool */
  41. #include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
  42. #include <cdf_softirq_timer.h> /* cdf_softirq_timer_free */
  43. #include <htt.h> /* HTT_HL_RX_DESC_SIZE */
  44. #include <ol_cfg.h>
  45. #include <ol_rx.h>
  46. #include <ol_htt_rx_api.h>
  47. #include <htt_internal.h> /* HTT_ASSERT, htt_pdev_t, HTT_RX_BUF_SIZE */
  48. #include "regtable.h"
  49. #include <cds_ieee80211_common.h> /* ieee80211_frame, ieee80211_qoscntl */
  50. #include <cds_ieee80211_defines.h> /* ieee80211_rx_status */
  51. #ifdef DEBUG_DMA_DONE
  52. #include <asm/barrier.h>
  53. #include <wma_api.h>
  54. #endif
  55. /* AR9888v1 WORKAROUND for EV#112367 */
  56. /* FIX THIS - remove this WAR when the bug is fixed */
  57. #define PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR
  58. /*--- setup / tear-down functions -------------------------------------------*/
  59. #ifndef HTT_RX_RING_SIZE_MIN
  60. #define HTT_RX_RING_SIZE_MIN 128 /* slightly > than one large A-MPDU */
  61. #endif
  62. #ifndef HTT_RX_RING_SIZE_MAX
  63. #define HTT_RX_RING_SIZE_MAX 2048 /* ~20 ms @ 1 Gbps of 1500B MSDUs */
  64. #endif
  65. #ifndef HTT_RX_AVG_FRM_BYTES
  66. #define HTT_RX_AVG_FRM_BYTES 1000
  67. #endif
  68. #ifndef HTT_RX_HOST_LATENCY_MAX_MS
  69. #define HTT_RX_HOST_LATENCY_MAX_MS 20 /* ms */ /* very conservative */
  70. #endif
  71. #ifndef HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
  72. #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 /* ms */ /* conservative */
  73. #endif
  74. #ifndef HTT_RX_RING_REFILL_RETRY_TIME_MS
  75. #define HTT_RX_RING_REFILL_RETRY_TIME_MS 50
  76. #endif
  77. /*--- RX In Order Definitions ------------------------------------------------*/
  78. /* Number of buckets in the hash table */
  79. #define RX_NUM_HASH_BUCKETS 1024 /* This should always be a power of 2 */
  80. #define RX_NUM_HASH_BUCKETS_MASK (RX_NUM_HASH_BUCKETS - 1)
  81. /* Number of hash entries allocated per bucket */
  82. #define RX_ENTRIES_SIZE 10
  83. #define RX_HASH_FUNCTION(a) (((a >> 14) ^ (a >> 4)) & RX_NUM_HASH_BUCKETS_MASK)
  84. #ifdef RX_HASH_DEBUG_LOG
  85. #define RX_HASH_LOG(x) x
  86. #else
  87. #define RX_HASH_LOG(x) /* no-op */
  88. #endif
  89. /* De -initialization function of the rx buffer hash table. This function will
  90. free up the hash table which includes freeing all the pending rx buffers*/
  91. void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
  92. {
  93. uint32_t i;
  94. struct htt_rx_hash_entry *hash_entry;
  95. struct htt_list_node *list_iter = NULL;
  96. if (NULL == pdev->rx_ring.hash_table)
  97. return;
  98. for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
  99. /* Free the hash entries in hash bucket i */
  100. list_iter = pdev->rx_ring.hash_table[i].listhead.next;
  101. while (list_iter != &pdev->rx_ring.hash_table[i].listhead) {
  102. hash_entry =
  103. (struct htt_rx_hash_entry *)((char *)list_iter -
  104. pdev->rx_ring.
  105. listnode_offset);
  106. if (hash_entry->netbuf) {
  107. #ifdef DEBUG_DMA_DONE
  108. cdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
  109. CDF_DMA_BIDIRECTIONAL);
  110. #else
  111. cdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
  112. CDF_DMA_FROM_DEVICE);
  113. #endif
  114. cdf_nbuf_free(hash_entry->netbuf);
  115. hash_entry->paddr = 0;
  116. }
  117. list_iter = list_iter->next;
  118. if (!hash_entry->fromlist)
  119. cdf_mem_free(hash_entry);
  120. }
  121. cdf_mem_free(pdev->rx_ring.hash_table[i].entries);
  122. }
  123. cdf_mem_free(pdev->rx_ring.hash_table);
  124. pdev->rx_ring.hash_table = NULL;
  125. }
  126. static bool
  127. htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
  128. {
  129. struct htt_host_rx_desc_base *rx_desc =
  130. (struct htt_host_rx_desc_base *)msdu_desc;
  131. return (bool)
  132. (((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
  133. RX_MSDU_END_4_FIRST_MSDU_MASK) >>
  134. RX_MSDU_END_4_FIRST_MSDU_LSB);
  135. }
  136. static int htt_rx_ring_size(struct htt_pdev_t *pdev)
  137. {
  138. int size;
  139. /*
  140. * It is expected that the host CPU will typically be able to service
  141. * the rx indication from one A-MPDU before the rx indication from
  142. * the subsequent A-MPDU happens, roughly 1-2 ms later.
  143. * However, the rx ring should be sized very conservatively, to
  144. * accomodate the worst reasonable delay before the host CPU services
  145. * a rx indication interrupt.
  146. * The rx ring need not be kept full of empty buffers. In theory,
  147. * the htt host SW can dynamically track the low-water mark in the
  148. * rx ring, and dynamically adjust the level to which the rx ring
  149. * is filled with empty buffers, to dynamically meet the desired
  150. * low-water mark.
  151. * In contrast, it's difficult to resize the rx ring itself, once
  152. * it's in use.
  153. * Thus, the ring itself should be sized very conservatively, while
  154. * the degree to which the ring is filled with empty buffers should
  155. * be sized moderately conservatively.
  156. */
  157. size =
  158. ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
  159. 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
  160. (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
  161. if (size < HTT_RX_RING_SIZE_MIN)
  162. size = HTT_RX_RING_SIZE_MIN;
  163. else if (size > HTT_RX_RING_SIZE_MAX)
  164. size = HTT_RX_RING_SIZE_MAX;
  165. size = cdf_get_pwr2(size);
  166. return size;
  167. }
  168. static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
  169. {
  170. int size;
  171. size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
  172. 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
  173. 8 * HTT_RX_AVG_FRM_BYTES * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
  174. /*
  175. * Make sure the fill level is at least 1 less than the ring size.
  176. * Leaving 1 element empty allows the SW to easily distinguish
  177. * between a full ring vs. an empty ring.
  178. */
  179. if (size >= pdev->rx_ring.size)
  180. size = pdev->rx_ring.size - 1;
  181. return size;
  182. }
  183. static void htt_rx_ring_refill_retry(void *arg)
  184. {
  185. htt_pdev_handle pdev = (htt_pdev_handle) arg;
  186. htt_rx_msdu_buff_replenish(pdev);
  187. }
  188. void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
  189. {
  190. int idx;
  191. CDF_STATUS status;
  192. struct htt_host_rx_desc_base *rx_desc;
  193. idx = *(pdev->rx_ring.alloc_idx.vaddr);
  194. while (num > 0) {
  195. uint32_t paddr;
  196. cdf_nbuf_t rx_netbuf;
  197. int headroom;
  198. rx_netbuf =
  199. cdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
  200. 0, 4, false);
  201. if (!rx_netbuf) {
  202. cdf_softirq_timer_cancel(&pdev->rx_ring.
  203. refill_retry_timer);
  204. /*
  205. * Failed to fill it to the desired level -
  206. * we'll start a timer and try again next time.
  207. * As long as enough buffers are left in the ring for
  208. * another A-MPDU rx, no special recovery is needed.
  209. */
  210. #ifdef DEBUG_DMA_DONE
  211. pdev->rx_ring.dbg_refill_cnt++;
  212. #endif
  213. cdf_softirq_timer_start(
  214. &pdev->rx_ring.refill_retry_timer,
  215. HTT_RX_RING_REFILL_RETRY_TIME_MS);
  216. goto fail;
  217. }
  218. /* Clear rx_desc attention word before posting to Rx ring */
  219. rx_desc = htt_rx_desc(rx_netbuf);
  220. *(uint32_t *) &rx_desc->attention = 0;
  221. #ifdef DEBUG_DMA_DONE
  222. *(uint32_t *) &rx_desc->msdu_end = 1;
  223. #define MAGIC_PATTERN 0xDEADBEEF
  224. *(uint32_t *) &rx_desc->msdu_start = MAGIC_PATTERN;
  225. /* To ensure that attention bit is reset and msdu_end is set
  226. before calling dma_map */
  227. smp_mb();
  228. #endif
  229. /*
  230. * Adjust cdf_nbuf_data to point to the location in the buffer
  231. * where the rx descriptor will be filled in.
  232. */
  233. headroom = cdf_nbuf_data(rx_netbuf) - (uint8_t *) rx_desc;
  234. cdf_nbuf_push_head(rx_netbuf, headroom);
  235. #ifdef DEBUG_DMA_DONE
  236. status =
  237. cdf_nbuf_map(pdev->osdev, rx_netbuf,
  238. CDF_DMA_BIDIRECTIONAL);
  239. #else
  240. status =
  241. cdf_nbuf_map(pdev->osdev, rx_netbuf,
  242. CDF_DMA_FROM_DEVICE);
  243. #endif
  244. if (status != CDF_STATUS_SUCCESS) {
  245. cdf_nbuf_free(rx_netbuf);
  246. goto fail;
  247. }
  248. paddr = cdf_nbuf_get_frag_paddr_lo(rx_netbuf, 0);
  249. if (pdev->cfg.is_full_reorder_offload) {
  250. if (cdf_unlikely
  251. (htt_rx_hash_list_insert(pdev, paddr,
  252. rx_netbuf))) {
  253. cdf_print("%s: hash insert failed!\n",
  254. __func__);
  255. #ifdef DEBUG_DMA_DONE
  256. cdf_nbuf_unmap(pdev->osdev, rx_netbuf,
  257. CDF_DMA_BIDIRECTIONAL);
  258. #else
  259. cdf_nbuf_unmap(pdev->osdev, rx_netbuf,
  260. CDF_DMA_FROM_DEVICE);
  261. #endif
  262. cdf_nbuf_free(rx_netbuf);
  263. goto fail;
  264. }
  265. htt_rx_dbg_rxbuf_set(pdev, paddr, rx_netbuf);
  266. } else {
  267. pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
  268. }
  269. #if HTT_PADDR64
  270. pdev->rx_ring.buf.paddrs_ring[idx] = 0;
  271. pdev->rx_ring.buf.paddrs_ring[idx] = (uint32_t)paddr;
  272. #else
  273. pdev->rx_ring.buf.paddrs_ring[idx] = paddr;
  274. #endif /* HTT_PADDR64 */
  275. pdev->rx_ring.fill_cnt++;
  276. num--;
  277. idx++;
  278. idx &= pdev->rx_ring.size_mask;
  279. }
  280. fail:
  281. *(pdev->rx_ring.alloc_idx.vaddr) = idx;
  282. return;
  283. }
  284. unsigned htt_rx_ring_elems(struct htt_pdev_t *pdev)
  285. {
  286. return
  287. (*pdev->rx_ring.alloc_idx.vaddr -
  288. pdev->rx_ring.sw_rd_idx.msdu_payld) & pdev->rx_ring.size_mask;
  289. }
  290. unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
  291. {
  292. return
  293. (*pdev->rx_ring.alloc_idx.vaddr -
  294. *pdev->rx_ring.target_idx.vaddr) &
  295. pdev->rx_ring.size_mask;
  296. }
  297. void htt_rx_detach(struct htt_pdev_t *pdev)
  298. {
  299. cdf_softirq_timer_cancel(&pdev->rx_ring.refill_retry_timer);
  300. cdf_softirq_timer_free(&pdev->rx_ring.refill_retry_timer);
  301. if (pdev->cfg.is_full_reorder_offload) {
  302. cdf_os_mem_free_consistent(pdev->osdev,
  303. sizeof(uint32_t),
  304. pdev->rx_ring.target_idx.vaddr,
  305. pdev->rx_ring.target_idx.paddr,
  306. cdf_get_dma_mem_context((&pdev->
  307. rx_ring.
  308. target_idx),
  309. memctx));
  310. htt_rx_hash_deinit(pdev);
  311. } else {
  312. int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
  313. while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
  314. #ifdef DEBUG_DMA_DONE
  315. cdf_nbuf_unmap(pdev->osdev,
  316. pdev->rx_ring.buf.
  317. netbufs_ring[sw_rd_idx],
  318. CDF_DMA_BIDIRECTIONAL);
  319. #else
  320. cdf_nbuf_unmap(pdev->osdev,
  321. pdev->rx_ring.buf.
  322. netbufs_ring[sw_rd_idx],
  323. CDF_DMA_FROM_DEVICE);
  324. #endif
  325. cdf_nbuf_free(pdev->rx_ring.buf.
  326. netbufs_ring[sw_rd_idx]);
  327. sw_rd_idx++;
  328. sw_rd_idx &= pdev->rx_ring.size_mask;
  329. }
  330. cdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
  331. }
  332. cdf_os_mem_free_consistent(pdev->osdev,
  333. sizeof(uint32_t),
  334. pdev->rx_ring.alloc_idx.vaddr,
  335. pdev->rx_ring.alloc_idx.paddr,
  336. cdf_get_dma_mem_context((&pdev->rx_ring.
  337. alloc_idx),
  338. memctx));
  339. cdf_os_mem_free_consistent(pdev->osdev,
  340. pdev->rx_ring.size * sizeof(uint32_t),
  341. pdev->rx_ring.buf.paddrs_ring,
  342. pdev->rx_ring.base_paddr,
  343. cdf_get_dma_mem_context((&pdev->rx_ring.buf),
  344. memctx));
  345. }
  346. /*--- rx descriptor field access functions ----------------------------------*/
  347. /*
  348. * These functions need to use bit masks and shifts to extract fields
  349. * from the rx descriptors, rather than directly using the bitfields.
  350. * For example, use
  351. * (desc & FIELD_MASK) >> FIELD_LSB
  352. * rather than
  353. * desc.field
  354. * This allows the functions to work correctly on either little-endian
  355. * machines (no endianness conversion needed) or big-endian machines
  356. * (endianness conversion provided automatically by the HW DMA's
  357. * byte-swizzling).
  358. */
  359. /* FIX THIS: APPLIES TO LL ONLY */
  360. /**
  361. * htt_rx_mpdu_desc_retry_ll() - Returns the retry bit from the Rx descriptor
  362. * for the Low Latency driver
  363. * @pdev: Handle (pointer) to HTT pdev.
  364. * @mpdu_desc: Void pointer to the Rx descriptor for MPDU
  365. * before the beginning of the payload.
  366. *
  367. * This function returns the retry bit of the 802.11 header for the
  368. * provided rx MPDU descriptor.
  369. *
  370. * Return: boolean -- true if retry is set, false otherwise
  371. */
  372. bool
  373. htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev, void *mpdu_desc)
  374. {
  375. struct htt_host_rx_desc_base *rx_desc =
  376. (struct htt_host_rx_desc_base *) mpdu_desc;
  377. return
  378. (bool)(((*((uint32_t *) &rx_desc->mpdu_start)) &
  379. RX_MPDU_START_0_RETRY_MASK) >>
  380. RX_MPDU_START_0_RETRY_LSB);
  381. }
  382. uint16_t htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev, void *mpdu_desc)
  383. {
  384. struct htt_host_rx_desc_base *rx_desc =
  385. (struct htt_host_rx_desc_base *)mpdu_desc;
  386. return
  387. (uint16_t) (((*((uint32_t *) &rx_desc->mpdu_start)) &
  388. RX_MPDU_START_0_SEQ_NUM_MASK) >>
  389. RX_MPDU_START_0_SEQ_NUM_LSB);
  390. }
  391. /* FIX THIS: APPLIES TO LL ONLY */
  392. void
  393. htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,
  394. void *mpdu_desc, union htt_rx_pn_t *pn, int pn_len_bits)
  395. {
  396. struct htt_host_rx_desc_base *rx_desc =
  397. (struct htt_host_rx_desc_base *)mpdu_desc;
  398. switch (pn_len_bits) {
  399. case 24:
  400. /* bits 23:0 */
  401. pn->pn24 = rx_desc->mpdu_start.pn_31_0 & 0xffffff;
  402. break;
  403. case 48:
  404. /* bits 31:0 */
  405. pn->pn48 = rx_desc->mpdu_start.pn_31_0;
  406. /* bits 47:32 */
  407. pn->pn48 |= ((uint64_t)
  408. ((*(((uint32_t *) &rx_desc->mpdu_start) + 2))
  409. & RX_MPDU_START_2_PN_47_32_MASK))
  410. << (32 - RX_MPDU_START_2_PN_47_32_LSB);
  411. break;
  412. case 128:
  413. /* bits 31:0 */
  414. pn->pn128[0] = rx_desc->mpdu_start.pn_31_0;
  415. /* bits 47:32 */
  416. pn->pn128[0] |=
  417. ((uint64_t) ((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
  418. & RX_MPDU_START_2_PN_47_32_MASK))
  419. << (32 - RX_MPDU_START_2_PN_47_32_LSB);
  420. /* bits 63:48 */
  421. pn->pn128[0] |=
  422. ((uint64_t) ((*(((uint32_t *) &rx_desc->msdu_end) + 2))
  423. & RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK))
  424. << (48 - RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB);
  425. /* bits 95:64 */
  426. pn->pn128[1] = rx_desc->msdu_end.ext_wapi_pn_95_64;
  427. /* bits 127:96 */
  428. pn->pn128[1] |=
  429. ((uint64_t) rx_desc->msdu_end.ext_wapi_pn_127_96) << 32;
  430. break;
  431. default:
  432. cdf_print("Error: invalid length spec (%d bits) for PN\n",
  433. pn_len_bits);
  434. };
  435. }
  436. /**
  437. * htt_rx_mpdu_desc_tid_ll() - Returns the TID value from the Rx descriptor
  438. * for Low Latency driver
  439. * @pdev: Handle (pointer) to HTT pdev.
  440. * @mpdu_desc: Void pointer to the Rx descriptor for the MPDU
  441. * before the beginning of the payload.
  442. *
  443. * This function returns the TID set in the 802.11 QoS Control for the MPDU
  444. * in the packet header, by looking at the mpdu_start of the Rx descriptor.
  445. * Rx descriptor gets a copy of the TID from the MAC.
  446. *
  447. * Return: Actual TID set in the packet header.
  448. */
  449. uint8_t
  450. htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev, void *mpdu_desc)
  451. {
  452. struct htt_host_rx_desc_base *rx_desc =
  453. (struct htt_host_rx_desc_base *) mpdu_desc;
  454. return
  455. (uint8_t)(((*(((uint32_t *) &rx_desc->mpdu_start) + 2)) &
  456. RX_MPDU_START_2_TID_MASK) >>
  457. RX_MPDU_START_2_TID_LSB);
  458. }
  459. uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc)
  460. {
  461. /* FIX THIS */
  462. return 0;
  463. }
  464. /* FIX THIS: APPLIES TO LL ONLY */
  465. char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc)
  466. {
  467. struct htt_host_rx_desc_base *rx_desc =
  468. (struct htt_host_rx_desc_base *)mpdu_desc;
  469. return rx_desc->rx_hdr_status;
  470. }
  471. /* FIX THIS: APPLIES TO LL ONLY */
  472. bool htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev, void *msdu_desc)
  473. {
  474. struct htt_host_rx_desc_base *rx_desc =
  475. (struct htt_host_rx_desc_base *)msdu_desc;
  476. return (bool)
  477. (((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
  478. RX_MSDU_END_4_LAST_MSDU_MASK) >> RX_MSDU_END_4_LAST_MSDU_LSB);
  479. }
  480. /* FIX THIS: APPLIES TO LL ONLY */
  481. int htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
  482. {
  483. struct htt_host_rx_desc_base *rx_desc =
  484. (struct htt_host_rx_desc_base *)msdu_desc;
  485. /* HW rx desc: the mcast_bcast flag is only valid
  486. if first_msdu is set */
  487. return
  488. ((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
  489. RX_MSDU_END_4_FIRST_MSDU_MASK) >> RX_MSDU_END_4_FIRST_MSDU_LSB;
  490. }
  491. /* FIX THIS: APPLIES TO LL ONLY */
  492. bool htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev, void *msdu_desc)
  493. {
  494. struct htt_host_rx_desc_base *rx_desc =
  495. (struct htt_host_rx_desc_base *)msdu_desc;
  496. return
  497. ((*((uint32_t *) &rx_desc->attention)) &
  498. RX_ATTENTION_0_MCAST_BCAST_MASK)
  499. >> RX_ATTENTION_0_MCAST_BCAST_LSB;
  500. }
  501. /* FIX THIS: APPLIES TO LL ONLY */
  502. int htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev, void *msdu_desc)
  503. {
  504. struct htt_host_rx_desc_base *rx_desc =
  505. (struct htt_host_rx_desc_base *)msdu_desc;
  506. return
  507. ((*((uint32_t *) &rx_desc->attention)) &
  508. RX_ATTENTION_0_FRAGMENT_MASK) >> RX_ATTENTION_0_FRAGMENT_LSB;
  509. }
  510. static inline
  511. uint8_t htt_rx_msdu_fw_desc_get(htt_pdev_handle pdev, void *msdu_desc)
  512. {
  513. /*
  514. * HL and LL use the same format for FW rx desc, but have the FW rx desc
  515. * in different locations.
  516. * In LL, the FW rx descriptor has been copied into the same
  517. * htt_host_rx_desc_base struct that holds the HW rx desc.
  518. * In HL, the FW rx descriptor, along with the MSDU payload,
  519. * is in the same buffer as the rx indication message.
  520. *
  521. * Use the FW rx desc offset configured during startup to account for
  522. * this difference between HL vs. LL.
  523. *
  524. * An optimization would be to define the LL and HL msdu_desc pointer
  525. * in such a way that they both use the same offset to the FW rx desc.
  526. * Then the following functions could be converted to macros, without
  527. * needing to expose the htt_pdev_t definition outside HTT.
  528. */
  529. return *(((uint8_t *) msdu_desc) + pdev->rx_fw_desc_offset);
  530. }
  531. int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc)
  532. {
  533. return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_DISCARD_M;
  534. }
  535. int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc)
  536. {
  537. return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_FORWARD_M;
  538. }
  539. int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc)
  540. {
  541. return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_INSPECT_M;
  542. }
  543. void
  544. htt_rx_msdu_actions(htt_pdev_handle pdev,
  545. void *msdu_desc, int *discard, int *forward, int *inspect)
  546. {
  547. uint8_t rx_msdu_fw_desc = htt_rx_msdu_fw_desc_get(pdev, msdu_desc);
  548. #ifdef HTT_DEBUG_DATA
  549. HTT_PRINT("act:0x%x ", rx_msdu_fw_desc);
  550. #endif
  551. *discard = rx_msdu_fw_desc & FW_RX_DESC_DISCARD_M;
  552. *forward = rx_msdu_fw_desc & FW_RX_DESC_FORWARD_M;
  553. *inspect = rx_msdu_fw_desc & FW_RX_DESC_INSPECT_M;
  554. }
  555. static inline cdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
  556. {
  557. int idx;
  558. cdf_nbuf_t msdu;
  559. HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
  560. #ifdef DEBUG_DMA_DONE
  561. pdev->rx_ring.dbg_ring_idx++;
  562. pdev->rx_ring.dbg_ring_idx &= pdev->rx_ring.size_mask;
  563. #endif
  564. idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
  565. msdu = pdev->rx_ring.buf.netbufs_ring[idx];
  566. idx++;
  567. idx &= pdev->rx_ring.size_mask;
  568. pdev->rx_ring.sw_rd_idx.msdu_payld = idx;
  569. pdev->rx_ring.fill_cnt--;
  570. return msdu;
  571. }
  572. static inline cdf_nbuf_t
  573. htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, uint32_t paddr)
  574. {
  575. HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
  576. pdev->rx_ring.fill_cnt--;
  577. return htt_rx_hash_list_lookup(pdev, paddr);
  578. }
  579. /* FIX ME: this function applies only to LL rx descs.
  580. An equivalent for HL rx descs is needed. */
  581. #ifdef CHECKSUM_OFFLOAD
  582. static inline
  583. void
  584. htt_set_checksum_result_ll(htt_pdev_handle pdev, cdf_nbuf_t msdu,
  585. struct htt_host_rx_desc_base *rx_desc)
  586. {
  587. #define MAX_IP_VER 2
  588. #define MAX_PROTO_VAL 4
  589. struct rx_msdu_start *rx_msdu = &rx_desc->msdu_start;
  590. unsigned int proto = (rx_msdu->tcp_proto) | (rx_msdu->udp_proto << 1);
  591. /*
  592. * HW supports TCP & UDP checksum offload for ipv4 and ipv6
  593. */
  594. static const cdf_nbuf_l4_rx_cksum_type_t
  595. cksum_table[][MAX_PROTO_VAL][MAX_IP_VER] = {
  596. {
  597. /* non-fragmented IP packet */
  598. /* non TCP/UDP packet */
  599. {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
  600. /* TCP packet */
  601. {CDF_NBUF_RX_CKSUM_TCP, CDF_NBUF_RX_CKSUM_TCPIPV6},
  602. /* UDP packet */
  603. {CDF_NBUF_RX_CKSUM_UDP, CDF_NBUF_RX_CKSUM_UDPIPV6},
  604. /* invalid packet type */
  605. {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
  606. },
  607. {
  608. /* fragmented IP packet */
  609. {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
  610. {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
  611. {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
  612. {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
  613. }
  614. };
  615. cdf_nbuf_rx_cksum_t cksum = {
  616. cksum_table[rx_msdu->ip_frag][proto][rx_msdu->ipv6_proto],
  617. CDF_NBUF_RX_CKSUM_NONE,
  618. 0
  619. };
  620. if (cksum.l4_type !=
  621. (cdf_nbuf_l4_rx_cksum_type_t) CDF_NBUF_RX_CKSUM_NONE) {
  622. cksum.l4_result =
  623. ((*(uint32_t *) &rx_desc->attention) &
  624. RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) ?
  625. CDF_NBUF_RX_CKSUM_NONE :
  626. CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
  627. }
  628. cdf_nbuf_set_rx_cksum(msdu, &cksum);
  629. #undef MAX_IP_VER
  630. #undef MAX_PROTO_VAL
  631. }
  632. #else
  633. #define htt_set_checksum_result_ll(pdev, msdu, rx_desc) /* no-op */
  634. #endif
  635. #ifdef DEBUG_DMA_DONE
  636. void htt_rx_print_rx_indication(cdf_nbuf_t rx_ind_msg, htt_pdev_handle pdev)
  637. {
  638. uint32_t *msg_word;
  639. int byte_offset;
  640. int mpdu_range, num_mpdu_range;
  641. msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
  642. cdf_print
  643. ("------------------HTT RX IND-----------------------------\n");
  644. cdf_print("alloc idx paddr %x (*vaddr) %d\n",
  645. pdev->rx_ring.alloc_idx.paddr,
  646. *pdev->rx_ring.alloc_idx.vaddr);
  647. cdf_print("sw_rd_idx msdu_payld %d msdu_desc %d\n",
  648. pdev->rx_ring.sw_rd_idx.msdu_payld,
  649. pdev->rx_ring.sw_rd_idx.msdu_desc);
  650. cdf_print("dbg_ring_idx %d\n", pdev->rx_ring.dbg_ring_idx);
  651. cdf_print("fill_level %d fill_cnt %d\n", pdev->rx_ring.fill_level,
  652. pdev->rx_ring.fill_cnt);
  653. cdf_print("initial msdu_payld %d curr mpdu range %d curr mpdu cnt %d\n",
  654. pdev->rx_ring.dbg_initial_msdu_payld,
  655. pdev->rx_ring.dbg_mpdu_range, pdev->rx_ring.dbg_mpdu_count);
  656. /* Print the RX_IND contents */
  657. cdf_print("peer id %x RV %x FV %x ext_tid %x msg_type %x\n",
  658. HTT_RX_IND_PEER_ID_GET(*msg_word),
  659. HTT_RX_IND_REL_VALID_GET(*msg_word),
  660. HTT_RX_IND_FLUSH_VALID_GET(*msg_word),
  661. HTT_RX_IND_EXT_TID_GET(*msg_word),
  662. HTT_T2H_MSG_TYPE_GET(*msg_word));
  663. cdf_print("num_mpdu_ranges %x rel_seq_num_end %x rel_seq_num_start %x\n"
  664. " flush_seq_num_end %x flush_seq_num_start %x\n",
  665. HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1)),
  666. HTT_RX_IND_REL_SEQ_NUM_END_GET(*(msg_word + 1)),
  667. HTT_RX_IND_REL_SEQ_NUM_START_GET(*(msg_word + 1)),
  668. HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1)),
  669. HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1)));
  670. cdf_print("fw_rx_desc_bytes %x\n",
  671. HTT_RX_IND_FW_RX_DESC_BYTES_GET(*
  672. (msg_word + 2 +
  673. HTT_RX_PPDU_DESC_SIZE32)));
  674. /* receive MSDU desc for current frame */
  675. byte_offset =
  676. HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
  677. pdev->rx_ind_msdu_byte_idx);
  678. cdf_print("msdu byte idx %x msdu desc %x\n", pdev->rx_ind_msdu_byte_idx,
  679. HTT_RX_IND_FW_RX_DESC_BYTES_GET(*
  680. (msg_word + 2 +
  681. HTT_RX_PPDU_DESC_SIZE32)));
  682. num_mpdu_range = HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
  683. for (mpdu_range = 0; mpdu_range < num_mpdu_range; mpdu_range++) {
  684. enum htt_rx_status status;
  685. int num_mpdus;
  686. htt_rx_ind_mpdu_range_info(pdev, rx_ind_msg, mpdu_range,
  687. &status, &num_mpdus);
  688. cdf_print("mpdu_range %x status %x num_mpdus %x\n",
  689. pdev->rx_ind_msdu_byte_idx, status, num_mpdus);
  690. }
  691. cdf_print
  692. ("---------------------------------------------------------\n");
  693. }
  694. #endif
  695. #ifdef DEBUG_DMA_DONE
  696. #define MAX_DONE_BIT_CHECK_ITER 5
  697. #endif
  698. int
  699. htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
  700. cdf_nbuf_t rx_ind_msg,
  701. cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu)
  702. {
  703. int msdu_len, msdu_chaining = 0;
  704. cdf_nbuf_t msdu;
  705. struct htt_host_rx_desc_base *rx_desc;
  706. uint8_t *rx_ind_data;
  707. uint32_t *msg_word, num_msdu_bytes;
  708. enum htt_t2h_msg_type msg_type;
  709. uint8_t pad_bytes = 0;
  710. HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
  711. rx_ind_data = cdf_nbuf_data(rx_ind_msg);
  712. msg_word = (uint32_t *) rx_ind_data;
  713. msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
  714. if (cdf_unlikely(HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type)) {
  715. num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
  716. *(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
  717. } else {
  718. num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
  719. *(msg_word
  720. + HTT_RX_IND_HDR_PREFIX_SIZE32
  721. + HTT_RX_PPDU_DESC_SIZE32));
  722. }
  723. msdu = *head_msdu = htt_rx_netbuf_pop(pdev);
  724. while (1) {
  725. int last_msdu, msdu_len_invalid, msdu_chained;
  726. int byte_offset;
  727. /*
  728. * Set the netbuf length to be the entire buffer length
  729. * initially, so the unmap will unmap the entire buffer.
  730. */
  731. cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
  732. #ifdef DEBUG_DMA_DONE
  733. cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_BIDIRECTIONAL);
  734. #else
  735. cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
  736. #endif
  737. /* cache consistency has been taken care of by cdf_nbuf_unmap */
  738. /*
  739. * Now read the rx descriptor.
  740. * Set the length to the appropriate value.
  741. * Check if this MSDU completes a MPDU.
  742. */
  743. rx_desc = htt_rx_desc(msdu);
  744. #if defined(HELIUMPLUS_PADDR64)
  745. if (HTT_WIFI_IP(pdev, 2, 0))
  746. pad_bytes = rx_desc->msdu_end.l3_header_padding;
  747. #endif /* defined(HELIUMPLUS_PADDR64) */
  748. /*
  749. * Make the netbuf's data pointer point to the payload rather
  750. * than the descriptor.
  751. */
  752. cdf_nbuf_pull_head(msdu,
  753. HTT_RX_STD_DESC_RESERVATION + pad_bytes);
  754. /*
  755. * Sanity check - confirm the HW is finished filling in
  756. * the rx data.
  757. * If the HW and SW are working correctly, then it's guaranteed
  758. * that the HW's MAC DMA is done before this point in the SW.
  759. * To prevent the case that we handle a stale Rx descriptor,
  760. * just assert for now until we have a way to recover.
  761. */
  762. #ifdef DEBUG_DMA_DONE
  763. if (cdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
  764. & RX_ATTENTION_0_MSDU_DONE_MASK))) {
  765. int dbg_iter = MAX_DONE_BIT_CHECK_ITER;
  766. cdf_print("malformed frame\n");
  767. while (dbg_iter &&
  768. (!((*(uint32_t *) &rx_desc->attention) &
  769. RX_ATTENTION_0_MSDU_DONE_MASK))) {
  770. cdf_mdelay(1);
  771. cdf_invalidate_range((void *)rx_desc,
  772. (void *)((char *)rx_desc +
  773. HTT_RX_STD_DESC_RESERVATION));
  774. cdf_print("debug iter %d success %d\n",
  775. dbg_iter,
  776. pdev->rx_ring.dbg_sync_success);
  777. dbg_iter--;
  778. }
  779. if (cdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
  780. & RX_ATTENTION_0_MSDU_DONE_MASK))) {
  781. #ifdef HTT_RX_RESTORE
  782. cdf_print("RX done bit error detected!\n");
  783. cdf_nbuf_set_next(msdu, NULL);
  784. *tail_msdu = msdu;
  785. pdev->rx_ring.rx_reset = 1;
  786. return msdu_chaining;
  787. #else
  788. wma_cli_set_command(0, GEN_PARAM_CRASH_INJECT,
  789. 0, GEN_CMD);
  790. HTT_ASSERT_ALWAYS(0);
  791. #endif
  792. }
  793. pdev->rx_ring.dbg_sync_success++;
  794. cdf_print("debug iter %d success %d\n", dbg_iter,
  795. pdev->rx_ring.dbg_sync_success);
  796. }
  797. #else
  798. HTT_ASSERT_ALWAYS((*(uint32_t *) &rx_desc->attention) &
  799. RX_ATTENTION_0_MSDU_DONE_MASK);
  800. #endif
  801. /*
  802. * Copy the FW rx descriptor for this MSDU from the rx
  803. * indication message into the MSDU's netbuf.
  804. * HL uses the same rx indication message definition as LL, and
  805. * simply appends new info (fields from the HW rx desc, and the
  806. * MSDU payload itself).
  807. * So, the offset into the rx indication message only has to
  808. * account for the standard offset of the per-MSDU FW rx
  809. * desc info within the message, and how many bytes of the
  810. * per-MSDU FW rx desc info have already been consumed.
  811. * (And the endianness of the host,
  812. * since for a big-endian host, the rx ind message contents,
  813. * including the per-MSDU rx desc bytes, were byteswapped during
  814. * upload.)
  815. */
  816. if (pdev->rx_ind_msdu_byte_idx < num_msdu_bytes) {
  817. if (cdf_unlikely
  818. (HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type))
  819. byte_offset =
  820. HTT_ENDIAN_BYTE_IDX_SWAP
  821. (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET);
  822. else
  823. byte_offset =
  824. HTT_ENDIAN_BYTE_IDX_SWAP
  825. (HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
  826. pdev->rx_ind_msdu_byte_idx);
  827. *((uint8_t *) &rx_desc->fw_desc.u.val) =
  828. rx_ind_data[byte_offset];
  829. /*
  830. * The target is expected to only provide the basic
  831. * per-MSDU rx descriptors. Just to be sure,
  832. * verify that the target has not attached
  833. * extension data (e.g. LRO flow ID).
  834. */
  835. /*
  836. * The assertion below currently doesn't work for
  837. * RX_FRAG_IND messages, since their format differs
  838. * from the RX_IND format (no FW rx PPDU desc in
  839. * the current RX_FRAG_IND message).
  840. * If the RX_FRAG_IND message format is updated to match
  841. * the RX_IND message format, then the following
  842. * assertion can be restored.
  843. */
  844. /* cdf_assert((rx_ind_data[byte_offset] &
  845. FW_RX_DESC_EXT_M) == 0); */
  846. pdev->rx_ind_msdu_byte_idx += 1;
  847. /* or more, if there's ext data */
  848. } else {
  849. /*
  850. * When an oversized AMSDU happened, FW will lost some
  851. * of MSDU status - in this case, the FW descriptors
  852. * provided will be less than the actual MSDUs
  853. * inside this MPDU.
  854. * Mark the FW descriptors so that it will still
  855. * deliver to upper stack, if no CRC error for the MPDU.
  856. *
  857. * FIX THIS - the FW descriptors are actually for MSDUs
  858. * in the end of this A-MSDU instead of the beginning.
  859. */
  860. *((uint8_t *) &rx_desc->fw_desc.u.val) = 0;
  861. }
  862. /*
  863. * TCP/UDP checksum offload support
  864. */
  865. htt_set_checksum_result_ll(pdev, msdu, rx_desc);
  866. msdu_len_invalid = (*(uint32_t *) &rx_desc->attention) &
  867. RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
  868. msdu_chained = (((*(uint32_t *) &rx_desc->frag_info) &
  869. RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) >>
  870. RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB);
  871. msdu_len =
  872. ((*((uint32_t *) &rx_desc->msdu_start)) &
  873. RX_MSDU_START_0_MSDU_LENGTH_MASK) >>
  874. RX_MSDU_START_0_MSDU_LENGTH_LSB;
  875. do {
  876. if (!msdu_len_invalid && !msdu_chained) {
  877. #if defined(PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR)
  878. if (msdu_len > 0x3000)
  879. break;
  880. #endif
  881. cdf_nbuf_trim_tail(msdu,
  882. HTT_RX_BUF_SIZE -
  883. (RX_STD_DESC_SIZE +
  884. msdu_len));
  885. }
  886. } while (0);
  887. while (msdu_chained--) {
  888. cdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
  889. cdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
  890. msdu_len -= HTT_RX_BUF_SIZE;
  891. cdf_nbuf_set_next(msdu, next);
  892. msdu = next;
  893. msdu_chaining = 1;
  894. if (msdu_chained == 0) {
  895. /* Trim the last one to the correct size -
  896. * accounting for inconsistent HW lengths
  897. * causing length overflows and underflows
  898. */
  899. if (((unsigned)msdu_len) >
  900. ((unsigned)
  901. (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE))) {
  902. msdu_len =
  903. (HTT_RX_BUF_SIZE -
  904. RX_STD_DESC_SIZE);
  905. }
  906. cdf_nbuf_trim_tail(next,
  907. HTT_RX_BUF_SIZE -
  908. (RX_STD_DESC_SIZE +
  909. msdu_len));
  910. }
  911. }
  912. last_msdu =
  913. ((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
  914. RX_MSDU_END_4_LAST_MSDU_MASK) >>
  915. RX_MSDU_END_4_LAST_MSDU_LSB;
  916. if (last_msdu) {
  917. cdf_nbuf_set_next(msdu, NULL);
  918. break;
  919. } else {
  920. cdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
  921. cdf_nbuf_set_next(msdu, next);
  922. msdu = next;
  923. }
  924. }
  925. *tail_msdu = msdu;
  926. /*
  927. * Don't refill the ring yet.
  928. * First, the elements popped here are still in use - it is
  929. * not safe to overwrite them until the matching call to
  930. * mpdu_desc_list_next.
  931. * Second, for efficiency it is preferable to refill the rx ring
  932. * with 1 PPDU's worth of rx buffers (something like 32 x 3 buffers),
  933. * rather than one MPDU's worth of rx buffers (sth like 3 buffers).
  934. * Consequently, we'll rely on the txrx SW to tell us when it is done
  935. * pulling all the PPDU's rx buffers out of the rx ring, and then
  936. * refill it just once.
  937. */
  938. return msdu_chaining;
  939. }
  940. int
  941. htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
  942. cdf_nbuf_t offload_deliver_msg,
  943. int *vdev_id,
  944. int *peer_id,
  945. int *tid,
  946. uint8_t *fw_desc,
  947. cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf)
  948. {
  949. cdf_nbuf_t buf;
  950. uint32_t *msdu_hdr, msdu_len;
  951. *head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
  952. /* Fake read mpdu_desc to keep desc ptr in sync */
  953. htt_rx_mpdu_desc_list_next(pdev, NULL);
  954. cdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
  955. #ifdef DEBUG_DMA_DONE
  956. cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_BIDIRECTIONAL);
  957. #else
  958. cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_FROM_DEVICE);
  959. #endif
  960. msdu_hdr = (uint32_t *) cdf_nbuf_data(buf);
  961. /* First dword */
  962. msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
  963. *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
  964. /* Second dword */
  965. msdu_hdr++;
  966. *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
  967. *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
  968. *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
  969. cdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
  970. cdf_nbuf_set_pktlen(buf, msdu_len);
  971. return 0;
  972. }
  973. int
  974. htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
  975. uint32_t *msg_word,
  976. int msdu_iter,
  977. int *vdev_id,
  978. int *peer_id,
  979. int *tid,
  980. uint8_t *fw_desc,
  981. cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf)
  982. {
  983. cdf_nbuf_t buf;
  984. uint32_t *msdu_hdr, msdu_len;
  985. uint32_t *curr_msdu;
  986. uint32_t paddr;
  987. curr_msdu =
  988. msg_word + (msdu_iter * HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS);
  989. paddr = HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*curr_msdu);
  990. *head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
  991. if (cdf_unlikely(NULL == buf)) {
  992. cdf_print("%s: netbuf pop failed!\n", __func__);
  993. return 0;
  994. }
  995. cdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
  996. #ifdef DEBUG_DMA_DONE
  997. cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_BIDIRECTIONAL);
  998. #else
  999. cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_FROM_DEVICE);
  1000. #endif
  1001. msdu_hdr = (uint32_t *) cdf_nbuf_data(buf);
  1002. /* First dword */
  1003. msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
  1004. *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
  1005. /* Second dword */
  1006. msdu_hdr++;
  1007. *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
  1008. *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
  1009. *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
  1010. cdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
  1011. cdf_nbuf_set_pktlen(buf, msdu_len);
  1012. return 0;
  1013. }
  1014. extern void
  1015. dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
  1016. #ifdef RX_HASH_DEBUG
  1017. #define HTT_RX_CHECK_MSDU_COUNT(msdu_count) HTT_ASSERT_ALWAYS(msdu_count)
  1018. #else
  1019. #define HTT_RX_CHECK_MSDU_COUNT(msdu_count) /* no-op */
  1020. #endif
  1021. /* Return values: 1 - success, 0 - failure */
  1022. int
  1023. htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
  1024. cdf_nbuf_t rx_ind_msg,
  1025. cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu)
  1026. {
  1027. cdf_nbuf_t msdu, next, prev = NULL;
  1028. uint8_t *rx_ind_data;
  1029. uint32_t *msg_word;
  1030. unsigned int msdu_count = 0;
  1031. uint8_t offload_ind;
  1032. struct htt_host_rx_desc_base *rx_desc;
  1033. HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
  1034. rx_ind_data = cdf_nbuf_data(rx_ind_msg);
  1035. msg_word = (uint32_t *) rx_ind_data;
  1036. offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
  1037. /* Get the total number of MSDUs */
  1038. msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
  1039. HTT_RX_CHECK_MSDU_COUNT(msdu_count);
  1040. msg_word =
  1041. (uint32_t *) (rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
  1042. if (offload_ind) {
  1043. ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
  1044. msg_word);
  1045. *head_msdu = *tail_msdu = NULL;
  1046. return 0;
  1047. }
  1048. (*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(
  1049. pdev,
  1050. HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*msg_word));
  1051. if (cdf_unlikely(NULL == msdu)) {
  1052. cdf_print("%s: netbuf pop failed!\n", __func__);
  1053. *tail_msdu = NULL;
  1054. return 0;
  1055. }
  1056. while (msdu_count > 0) {
  1057. /*
  1058. * Set the netbuf length to be the entire buffer length
  1059. * initially, so the unmap will unmap the entire buffer.
  1060. */
  1061. cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
  1062. #ifdef DEBUG_DMA_DONE
  1063. cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_BIDIRECTIONAL);
  1064. #else
  1065. cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
  1066. #endif
  1067. /* cache consistency has been taken care of by cdf_nbuf_unmap */
  1068. rx_desc = htt_rx_desc(msdu);
  1069. htt_rx_extract_lro_info(msdu, rx_desc);
  1070. /*
  1071. * Make the netbuf's data pointer point to the payload rather
  1072. * than the descriptor.
  1073. */
  1074. cdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
  1075. #if HTT_PADDR64
  1076. #define NEXT_FIELD_OFFSET_IN32 2
  1077. #else /* ! HTT_PADDR64 */
  1078. #define NEXT_FIELD_OFFSET_IN32 1
  1079. #endif /* HTT_PADDR64 */
  1080. #
  1081. cdf_nbuf_trim_tail(msdu,
  1082. HTT_RX_BUF_SIZE -
  1083. (RX_STD_DESC_SIZE +
  1084. HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
  1085. *(msg_word + NEXT_FIELD_OFFSET_IN32))));
  1086. #if defined(HELIUMPLUS_DEBUG)
  1087. dump_pkt(msdu, 0, 64);
  1088. #endif
  1089. *((uint8_t *) &rx_desc->fw_desc.u.val) =
  1090. HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word + NEXT_FIELD_OFFSET_IN32));
  1091. #undef NEXT_FIELD_OFFSET_IN32
  1092. msdu_count--;
  1093. if (cdf_unlikely((*((u_int8_t *) &rx_desc->fw_desc.u.val)) &
  1094. FW_RX_DESC_MIC_ERR_M)) {
  1095. u_int8_t tid =
  1096. HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
  1097. *(u_int32_t *)rx_ind_data);
  1098. u_int16_t peer_id =
  1099. HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
  1100. *(u_int32_t *)rx_ind_data);
  1101. ol_rx_mic_error_handler(pdev->txrx_pdev, tid, peer_id,
  1102. rx_desc, msdu);
  1103. htt_rx_desc_frame_free(pdev, msdu);
  1104. /* if this is the last msdu */
  1105. if (!msdu_count) {
  1106. /* if this is the only msdu */
  1107. if (!prev) {
  1108. *head_msdu = *tail_msdu = NULL;
  1109. return 0;
  1110. } else {
  1111. *tail_msdu = prev;
  1112. cdf_nbuf_set_next(prev, NULL);
  1113. return 1;
  1114. }
  1115. } else { /* if this is not the last msdu */
  1116. /* get the next msdu */
  1117. msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
  1118. next = htt_rx_in_order_netbuf_pop(
  1119. pdev,
  1120. HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(
  1121. *msg_word));
  1122. if (cdf_unlikely(NULL == next)) {
  1123. cdf_print("%s: netbuf pop failed!\n",
  1124. __func__);
  1125. *tail_msdu = NULL;
  1126. return 0;
  1127. }
  1128. /* if this is not the first msdu, update the
  1129. * next pointer of the preceding msdu
  1130. */
  1131. if (prev) {
  1132. cdf_nbuf_set_next(prev, next);
  1133. } else {
  1134. /* if this is the first msdu, update the
  1135. * head pointer
  1136. */
  1137. *head_msdu = next;
  1138. }
  1139. msdu = next;
  1140. continue;
  1141. }
  1142. }
  1143. /* Update checksum result */
  1144. htt_set_checksum_result_ll(pdev, msdu, rx_desc);
  1145. /* check if this is the last msdu */
  1146. if (msdu_count) {
  1147. msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
  1148. next = htt_rx_in_order_netbuf_pop(
  1149. pdev,
  1150. HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*msg_word));
  1151. if (cdf_unlikely(NULL == next)) {
  1152. cdf_print("%s: netbuf pop failed!\n",
  1153. __func__);
  1154. *tail_msdu = NULL;
  1155. return 0;
  1156. }
  1157. cdf_nbuf_set_next(msdu, next);
  1158. prev = msdu;
  1159. msdu = next;
  1160. } else {
  1161. *tail_msdu = msdu;
  1162. cdf_nbuf_set_next(msdu, NULL);
  1163. }
  1164. }
  1165. return 1;
  1166. }
  1167. /* Util fake function that has same prototype as cdf_nbuf_clone that just
  1168. * retures the same nbuf
  1169. */
  1170. cdf_nbuf_t htt_rx_cdf_noclone_buf(cdf_nbuf_t buf)
  1171. {
  1172. return buf;
  1173. }
  1174. /* FIXME: This is a HW definition not provded by HW, where does it go ? */
  1175. enum {
  1176. HW_RX_DECAP_FORMAT_RAW = 0,
  1177. HW_RX_DECAP_FORMAT_NWIFI,
  1178. HW_RX_DECAP_FORMAT_8023,
  1179. HW_RX_DECAP_FORMAT_ETH2,
  1180. };
  1181. #define HTT_FCS_LEN (4)
  1182. static void
  1183. htt_rx_parse_ppdu_start_status(struct htt_host_rx_desc_base *rx_desc,
  1184. struct ieee80211_rx_status *rs)
  1185. {
  1186. struct rx_ppdu_start *ppdu_start = &rx_desc->ppdu_start;
  1187. /* RSSI */
  1188. rs->rs_rssi = ppdu_start->rssi_comb;
  1189. /* PHY rate */
  1190. /* rs_ratephy coding
  1191. [b3 - b0]
  1192. 0 -> OFDM
  1193. 1 -> CCK
  1194. 2 -> HT
  1195. 3 -> VHT
  1196. OFDM / CCK
  1197. [b7 - b4 ] => LSIG rate
  1198. [b23 - b8 ] => service field
  1199. (b'12 static/dynamic,
  1200. b'14..b'13 BW for VHT)
  1201. [b31 - b24 ] => Reserved
  1202. HT / VHT
  1203. [b15 - b4 ] => SIG A_2 12 LSBs
  1204. [b31 - b16] => SIG A_1 16 LSBs
  1205. */
  1206. if (ppdu_start->preamble_type == 0x4) {
  1207. rs->rs_ratephy = ppdu_start->l_sig_rate_select;
  1208. rs->rs_ratephy |= ppdu_start->l_sig_rate << 4;
  1209. rs->rs_ratephy |= ppdu_start->service << 8;
  1210. } else {
  1211. rs->rs_ratephy = (ppdu_start->preamble_type & 0x4) ? 3 : 2;
  1212. #ifdef HELIUMPLUS
  1213. rs->rs_ratephy |=
  1214. (ppdu_start->ht_sig_vht_sig_ah_sig_a_2 & 0xFFF) << 4;
  1215. rs->rs_ratephy |=
  1216. (ppdu_start->ht_sig_vht_sig_ah_sig_a_1 & 0xFFFF) << 16;
  1217. #else
  1218. rs->rs_ratephy |= (ppdu_start->ht_sig_vht_sig_a_2 & 0xFFF) << 4;
  1219. rs->rs_ratephy |=
  1220. (ppdu_start->ht_sig_vht_sig_a_1 & 0xFFFF) << 16;
  1221. #endif
  1222. }
  1223. return;
  1224. }
  1225. /* This function is used by montior mode code to restitch an MSDU list
  1226. * corresponding to an MPDU back into an MPDU by linking up the skbs.
  1227. */
  1228. cdf_nbuf_t
  1229. htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
  1230. cdf_nbuf_t head_msdu,
  1231. struct ieee80211_rx_status *rx_status,
  1232. unsigned clone_not_reqd)
  1233. {
  1234. cdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list_cloned;
  1235. cdf_nbuf_t (*clone_nbuf_fn)(cdf_nbuf_t buf);
  1236. unsigned decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len,
  1237. mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
  1238. is_amsdu, is_first_frag, amsdu_pad, msdu_len;
  1239. struct htt_host_rx_desc_base *rx_desc;
  1240. char *hdr_desc;
  1241. unsigned char *dest;
  1242. struct ieee80211_frame *wh;
  1243. struct ieee80211_qoscntl *qos;
  1244. /* If this packet does not go up the normal stack path we dont need to
  1245. * waste cycles cloning the packets
  1246. */
  1247. clone_nbuf_fn =
  1248. clone_not_reqd ? htt_rx_cdf_noclone_buf : cdf_nbuf_clone;
  1249. /* The nbuf has been pulled just beyond the status and points to the
  1250. * payload
  1251. */
  1252. msdu_orig = head_msdu;
  1253. rx_desc = htt_rx_desc(msdu_orig);
  1254. /* Fill out the rx_status from the PPDU start and end fields */
  1255. if (rx_desc->attention.first_mpdu) {
  1256. htt_rx_parse_ppdu_start_status(rx_desc, rx_status);
  1257. /* The timestamp is no longer valid - It will be valid only for
  1258. * the last MPDU
  1259. */
  1260. rx_status->rs_tstamp.tsf = ~0;
  1261. }
  1262. decap_format =
  1263. GET_FIELD(&rx_desc->msdu_start, RX_MSDU_START_2_DECAP_FORMAT);
  1264. head_frag_list_cloned = NULL;
  1265. /* Easy case - The MSDU status indicates that this is a non-decapped
  1266. * packet in RAW mode.
  1267. * return
  1268. */
  1269. if (decap_format == HW_RX_DECAP_FORMAT_RAW) {
  1270. /* Note that this path might suffer from headroom unavailabilty,
  1271. * but the RX status is usually enough
  1272. */
  1273. mpdu_buf = clone_nbuf_fn(head_msdu);
  1274. prev_buf = mpdu_buf;
  1275. frag_list_sum_len = 0;
  1276. is_first_frag = 1;
  1277. msdu_len = cdf_nbuf_len(mpdu_buf);
  1278. /* Drop the zero-length msdu */
  1279. if (!msdu_len)
  1280. goto mpdu_stitch_fail;
  1281. msdu_orig = cdf_nbuf_next(head_msdu);
  1282. while (msdu_orig) {
  1283. /* TODO: intra AMSDU padding - do we need it ??? */
  1284. msdu = clone_nbuf_fn(msdu_orig);
  1285. if (!msdu)
  1286. goto mpdu_stitch_fail;
  1287. if (is_first_frag) {
  1288. is_first_frag = 0;
  1289. head_frag_list_cloned = msdu;
  1290. }
  1291. msdu_len = cdf_nbuf_len(msdu);
  1292. /* Drop the zero-length msdu */
  1293. if (!msdu_len)
  1294. goto mpdu_stitch_fail;
  1295. frag_list_sum_len += msdu_len;
  1296. /* Maintain the linking of the cloned MSDUS */
  1297. cdf_nbuf_set_next_ext(prev_buf, msdu);
  1298. /* Move to the next */
  1299. prev_buf = msdu;
  1300. msdu_orig = cdf_nbuf_next(msdu_orig);
  1301. }
  1302. /* The last msdu length need be larger than HTT_FCS_LEN */
  1303. if (msdu_len < HTT_FCS_LEN)
  1304. goto mpdu_stitch_fail;
  1305. cdf_nbuf_trim_tail(prev_buf, HTT_FCS_LEN);
  1306. /* If there were more fragments to this RAW frame */
  1307. if (head_frag_list_cloned) {
  1308. cdf_nbuf_append_ext_list(mpdu_buf,
  1309. head_frag_list_cloned,
  1310. frag_list_sum_len);
  1311. }
  1312. goto mpdu_stitch_done;
  1313. }
  1314. /* Decap mode:
  1315. * Calculate the amount of header in decapped packet to knock off based
  1316. * on the decap type and the corresponding number of raw bytes to copy
  1317. * status header
  1318. */
  1319. hdr_desc = &rx_desc->rx_hdr_status[0];
  1320. /* Base size */
  1321. wifi_hdr_len = sizeof(struct ieee80211_frame);
  1322. wh = (struct ieee80211_frame *)hdr_desc;
  1323. dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
  1324. if (dir == IEEE80211_FC1_DIR_DSTODS)
  1325. wifi_hdr_len += 6;
  1326. is_amsdu = 0;
  1327. if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
  1328. qos = (struct ieee80211_qoscntl *)
  1329. (hdr_desc + wifi_hdr_len);
  1330. wifi_hdr_len += 2;
  1331. is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
  1332. }
  1333. /* TODO: Any security headers associated with MPDU */
  1334. sec_hdr_len = 0;
  1335. /* MSDU related stuff LLC - AMSDU subframe header etc */
  1336. msdu_llc_len = is_amsdu ? (14 + 8) : 8;
  1337. mpdu_buf_len = wifi_hdr_len + sec_hdr_len + msdu_llc_len;
  1338. /* "Decap" header to remove from MSDU buffer */
  1339. decap_hdr_pull_bytes = 14;
  1340. /* Allocate a new nbuf for holding the 802.11 header retrieved from the
  1341. * status of the now decapped first msdu. Leave enough headroom for
  1342. * accomodating any radio-tap /prism like PHY header
  1343. */
  1344. #define HTT_MAX_MONITOR_HEADER (512)
  1345. mpdu_buf = cdf_nbuf_alloc(pdev->osdev,
  1346. HTT_MAX_MONITOR_HEADER + mpdu_buf_len,
  1347. HTT_MAX_MONITOR_HEADER, 4, false);
  1348. if (!mpdu_buf)
  1349. goto mpdu_stitch_fail;
  1350. /* Copy the MPDU related header and enc headers into the first buffer
  1351. * - Note that there can be a 2 byte pad between heaader and enc header
  1352. */
  1353. prev_buf = mpdu_buf;
  1354. dest = cdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
  1355. if (!dest)
  1356. goto mpdu_stitch_fail;
  1357. cdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
  1358. hdr_desc += wifi_hdr_len;
  1359. /* NOTE - This padding is present only in the RAW header status - not
  1360. * when the MSDU data payload is in RAW format.
  1361. */
  1362. /* Skip the "IV pad" */
  1363. if (wifi_hdr_len & 0x3)
  1364. hdr_desc += 2;
  1365. /* The first LLC len is copied into the MPDU buffer */
  1366. frag_list_sum_len = 0;
  1367. frag_list_sum_len -= msdu_llc_len;
  1368. msdu_orig = head_msdu;
  1369. is_first_frag = 1;
  1370. amsdu_pad = 0;
  1371. while (msdu_orig) {
  1372. /* TODO: intra AMSDU padding - do we need it ??? */
  1373. msdu = clone_nbuf_fn(msdu_orig);
  1374. if (!msdu)
  1375. goto mpdu_stitch_fail;
  1376. if (is_first_frag) {
  1377. is_first_frag = 0;
  1378. head_frag_list_cloned = msdu;
  1379. } else {
  1380. /* Maintain the linking of the cloned MSDUS */
  1381. cdf_nbuf_set_next_ext(prev_buf, msdu);
  1382. /* Reload the hdr ptr only on non-first MSDUs */
  1383. rx_desc = htt_rx_desc(msdu_orig);
  1384. hdr_desc = &rx_desc->rx_hdr_status[0];
  1385. }
  1386. /* Copy this buffers MSDU related status into the prev buffer */
  1387. dest = cdf_nbuf_put_tail(prev_buf, msdu_llc_len + amsdu_pad);
  1388. dest += amsdu_pad;
  1389. cdf_mem_copy(dest, hdr_desc, msdu_llc_len);
  1390. /* Push the MSDU buffer beyond the decap header */
  1391. cdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
  1392. frag_list_sum_len +=
  1393. msdu_llc_len + cdf_nbuf_len(msdu) + amsdu_pad;
  1394. /* Set up intra-AMSDU pad to be added to start of next buffer -
  1395. * AMSDU pad is 4 byte pad on AMSDU subframe */
  1396. amsdu_pad = (msdu_llc_len + cdf_nbuf_len(msdu)) & 0x3;
  1397. amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
  1398. /* TODO FIXME How do we handle MSDUs that have fraglist - Should
  1399. * probably iterate all the frags cloning them along the way and
  1400. * and also updating the prev_buf pointer
  1401. */
  1402. /* Move to the next */
  1403. prev_buf = msdu;
  1404. msdu_orig = cdf_nbuf_next(msdu_orig);
  1405. }
  1406. /* TODO: Convert this to suitable cdf routines */
  1407. cdf_nbuf_append_ext_list(mpdu_buf, head_frag_list_cloned,
  1408. frag_list_sum_len);
  1409. mpdu_stitch_done:
  1410. /* Check if this buffer contains the PPDU end status for TSF */
  1411. if (rx_desc->attention.last_mpdu)
  1412. #ifdef HELIUMPLUS
  1413. rx_status->rs_tstamp.tsf =
  1414. rx_desc->ppdu_end.rx_pkt_end.phy_timestamp_1_lower_32;
  1415. #else
  1416. rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
  1417. #endif
  1418. /* All the nbufs have been linked into the ext list and
  1419. then unlink the nbuf list */
  1420. if (clone_not_reqd) {
  1421. msdu = head_msdu;
  1422. while (msdu) {
  1423. msdu_orig = msdu;
  1424. msdu = cdf_nbuf_next(msdu);
  1425. cdf_nbuf_set_next(msdu_orig, NULL);
  1426. }
  1427. }
  1428. return mpdu_buf;
  1429. mpdu_stitch_fail:
  1430. /* Free these alloced buffers and the orig buffers in non-clone case */
  1431. if (!clone_not_reqd) {
  1432. /* Free the head buffer */
  1433. if (mpdu_buf)
  1434. cdf_nbuf_free(mpdu_buf);
  1435. /* Free the partial list */
  1436. while (head_frag_list_cloned) {
  1437. msdu = head_frag_list_cloned;
  1438. head_frag_list_cloned =
  1439. cdf_nbuf_next_ext(head_frag_list_cloned);
  1440. cdf_nbuf_free(msdu);
  1441. }
  1442. } else {
  1443. /* Free the alloced head buffer */
  1444. if (decap_format != HW_RX_DECAP_FORMAT_RAW)
  1445. if (mpdu_buf)
  1446. cdf_nbuf_free(mpdu_buf);
  1447. /* Free the orig buffers */
  1448. msdu = head_msdu;
  1449. while (msdu) {
  1450. msdu_orig = msdu;
  1451. msdu = cdf_nbuf_next(msdu);
  1452. cdf_nbuf_free(msdu_orig);
  1453. }
  1454. }
  1455. return NULL;
  1456. }
  1457. int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc)
  1458. {
  1459. /*
  1460. * Currently the RSSI is provided only as a field in the
  1461. * HTT_T2H_RX_IND message, rather than in each rx descriptor.
  1462. */
  1463. return HTT_RSSI_INVALID;
  1464. }
  1465. /*
  1466. * htt_rx_amsdu_pop -
  1467. * global function pointer that is programmed during attach to point
  1468. * to either htt_rx_amsdu_pop_ll or htt_rx_amsdu_rx_in_order_pop_ll.
  1469. */
  1470. int (*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
  1471. cdf_nbuf_t rx_ind_msg,
  1472. cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
  1473. /*
  1474. * htt_rx_frag_pop -
  1475. * global function pointer that is programmed during attach to point
  1476. * to either htt_rx_amsdu_pop_ll
  1477. */
  1478. int (*htt_rx_frag_pop)(htt_pdev_handle pdev,
  1479. cdf_nbuf_t rx_ind_msg,
  1480. cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
  1481. int
  1482. (*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
  1483. cdf_nbuf_t offload_deliver_msg,
  1484. int *vdev_id,
  1485. int *peer_id,
  1486. int *tid,
  1487. uint8_t *fw_desc,
  1488. cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
  1489. void * (*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev,
  1490. cdf_nbuf_t rx_ind_msg);
  1491. bool (*htt_rx_mpdu_desc_retry)(
  1492. htt_pdev_handle pdev, void *mpdu_desc);
  1493. uint16_t (*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
  1494. void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
  1495. void *mpdu_desc,
  1496. union htt_rx_pn_t *pn, int pn_len_bits);
  1497. uint8_t (*htt_rx_mpdu_desc_tid)(
  1498. htt_pdev_handle pdev, void *mpdu_desc);
  1499. bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev, void *msdu_desc);
  1500. bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev, void *msdu_desc);
  1501. int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev, void *msdu_desc);
  1502. bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
  1503. int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
  1504. void * (*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, cdf_nbuf_t msdu);
  1505. bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
  1506. bool (*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
  1507. void *mpdu_desc, uint8_t *key_id);
  1508. void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
  1509. {
  1510. int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
  1511. cdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
  1512. pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
  1513. return (void *)htt_rx_desc(netbuf);
  1514. }
  1515. bool (*htt_rx_msdu_chan_info_present)(
  1516. htt_pdev_handle pdev,
  1517. void *mpdu_desc);
  1518. bool (*htt_rx_msdu_center_freq)(
  1519. htt_pdev_handle pdev,
  1520. struct ol_txrx_peer_t *peer,
  1521. void *mpdu_desc,
  1522. uint16_t *primary_chan_center_freq_mhz,
  1523. uint16_t *contig_chan1_center_freq_mhz,
  1524. uint16_t *contig_chan2_center_freq_mhz,
  1525. uint8_t *phy_mode);
  1526. void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
  1527. cdf_nbuf_t netbuf)
  1528. {
  1529. return (void *)htt_rx_desc(netbuf);
  1530. }
  1531. void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, cdf_nbuf_t msdu)
  1532. {
  1533. return htt_rx_desc(msdu);
  1534. }
  1535. bool htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev, void *mpdu_desc)
  1536. {
  1537. struct htt_host_rx_desc_base *rx_desc =
  1538. (struct htt_host_rx_desc_base *)mpdu_desc;
  1539. return (((*((uint32_t *) &rx_desc->mpdu_start)) &
  1540. RX_MPDU_START_0_ENCRYPTED_MASK) >>
  1541. RX_MPDU_START_0_ENCRYPTED_LSB) ? true : false;
  1542. }
  1543. bool htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev, void *mpdu_desc)
  1544. {
  1545. return false;
  1546. }
  1547. bool htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,
  1548. struct ol_txrx_peer_t *peer,
  1549. void *mpdu_desc,
  1550. uint16_t *primary_chan_center_freq_mhz,
  1551. uint16_t *contig_chan1_center_freq_mhz,
  1552. uint16_t *contig_chan2_center_freq_mhz,
  1553. uint8_t *phy_mode)
  1554. {
  1555. if (primary_chan_center_freq_mhz)
  1556. *primary_chan_center_freq_mhz = 0;
  1557. if (contig_chan1_center_freq_mhz)
  1558. *contig_chan1_center_freq_mhz = 0;
  1559. if (contig_chan2_center_freq_mhz)
  1560. *contig_chan2_center_freq_mhz = 0;
  1561. if (phy_mode)
  1562. *phy_mode = 0;
  1563. return false;
  1564. }
  1565. bool
  1566. htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev, void *mpdu_desc,
  1567. uint8_t *key_id)
  1568. {
  1569. struct htt_host_rx_desc_base *rx_desc = (struct htt_host_rx_desc_base *)
  1570. mpdu_desc;
  1571. if (!htt_rx_msdu_first_msdu_flag_ll(pdev, mpdu_desc))
  1572. return false;
  1573. *key_id = ((*(((uint32_t *) &rx_desc->msdu_end) + 1)) &
  1574. (RX_MSDU_END_1_KEY_ID_OCT_MASK >>
  1575. RX_MSDU_END_1_KEY_ID_OCT_LSB));
  1576. return true;
  1577. }
  1578. void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu)
  1579. {
  1580. cdf_nbuf_free(msdu);
  1581. }
  1582. void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu)
  1583. {
  1584. /*
  1585. * The rx descriptor is in the same buffer as the rx MSDU payload,
  1586. * and does not need to be freed separately.
  1587. */
  1588. }
  1589. void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev)
  1590. {
  1591. if (cdf_atomic_dec_and_test(&pdev->rx_ring.refill_ref_cnt)) {
  1592. int num_to_fill;
  1593. num_to_fill = pdev->rx_ring.fill_level -
  1594. pdev->rx_ring.fill_cnt;
  1595. htt_rx_ring_fill_n(pdev,
  1596. num_to_fill /* okay if <= 0 */);
  1597. }
  1598. cdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
  1599. }
  1600. #define AR600P_ASSEMBLE_HW_RATECODE(_rate, _nss, _pream) \
  1601. (((_pream) << 6) | ((_nss) << 4) | (_rate))
  1602. enum AR600P_HW_RATECODE_PREAM_TYPE {
  1603. AR600P_HW_RATECODE_PREAM_OFDM,
  1604. AR600P_HW_RATECODE_PREAM_CCK,
  1605. AR600P_HW_RATECODE_PREAM_HT,
  1606. AR600P_HW_RATECODE_PREAM_VHT,
  1607. };
  1608. /*--- RX In Order Hash Code --------------------------------------------------*/
  1609. /* Initializes the circular linked list */
  1610. static inline void htt_list_init(struct htt_list_node *head)
  1611. {
  1612. head->prev = head;
  1613. head->next = head;
  1614. }
  1615. /* Adds entry to the end of the linked list */
  1616. static inline void htt_list_add_tail(struct htt_list_node *head,
  1617. struct htt_list_node *node)
  1618. {
  1619. head->prev->next = node;
  1620. node->prev = head->prev;
  1621. node->next = head;
  1622. head->prev = node;
  1623. }
  1624. /* Removes the entry corresponding to the input node from the linked list */
  1625. static inline void htt_list_remove(struct htt_list_node *node)
  1626. {
  1627. node->prev->next = node->next;
  1628. node->next->prev = node->prev;
  1629. }
  1630. /* Helper macro to iterate through the linked list */
  1631. #define HTT_LIST_ITER_FWD(iter, head) for (iter = (head)->next; \
  1632. (iter) != (head); \
  1633. (iter) = (iter)->next) \
  1634. #ifdef RX_HASH_DEBUG
  1635. /* Hash cookie related macros */
  1636. #define HTT_RX_HASH_COOKIE 0xDEED
  1637. #define HTT_RX_HASH_COOKIE_SET(hash_element) \
  1638. ((hash_element)->cookie = HTT_RX_HASH_COOKIE)
  1639. #define HTT_RX_HASH_COOKIE_CHECK(hash_element) \
  1640. HTT_ASSERT_ALWAYS((hash_element)->cookie == HTT_RX_HASH_COOKIE)
  1641. /* Hash count related macros */
  1642. #define HTT_RX_HASH_COUNT_INCR(hash_bucket) \
  1643. ((hash_bucket).count++)
  1644. #define HTT_RX_HASH_COUNT_DECR(hash_bucket) \
  1645. ((hash_bucket).count--)
  1646. #define HTT_RX_HASH_COUNT_RESET(hash_bucket) ((hash_bucket).count = 0)
  1647. #define HTT_RX_HASH_COUNT_PRINT(hash_bucket) \
  1648. RX_HASH_LOG(cdf_print(" count %d\n", (hash_bucket).count))
  1649. #else /* RX_HASH_DEBUG */
  1650. /* Hash cookie related macros */
  1651. #define HTT_RX_HASH_COOKIE_SET(hash_element) /* no-op */
  1652. #define HTT_RX_HASH_COOKIE_CHECK(hash_element) /* no-op */
  1653. /* Hash count related macros */
  1654. #define HTT_RX_HASH_COUNT_INCR(hash_bucket) /* no-op */
  1655. #define HTT_RX_HASH_COUNT_DECR(hash_bucket) /* no-op */
  1656. #define HTT_RX_HASH_COUNT_PRINT(hash_bucket) /* no-op */
  1657. #define HTT_RX_HASH_COUNT_RESET(hash_bucket) /* no-op */
  1658. #endif /* RX_HASH_DEBUG */
  1659. /* Inserts the given "physical address - network buffer" pair into the
  1660. hash table for the given pdev. This function will do the following:
  1661. 1. Determine which bucket to insert the pair into
  1662. 2. First try to allocate the hash entry for this pair from the pre-allocated
  1663. entries list
  1664. 3. If there are no more entries in the pre-allocated entries list, allocate
  1665. the hash entry from the hash memory pool
  1666. Note: this function is not thread-safe
  1667. Returns 0 - success, 1 - failure */
  1668. int
  1669. htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
  1670. cdf_nbuf_t netbuf)
  1671. {
  1672. int i;
  1673. struct htt_rx_hash_entry *hash_element = NULL;
  1674. i = RX_HASH_FUNCTION(paddr);
  1675. /* Check if there are any entries in the pre-allocated free list */
  1676. if (pdev->rx_ring.hash_table[i].freepool.next !=
  1677. &pdev->rx_ring.hash_table[i].freepool) {
  1678. hash_element =
  1679. (struct htt_rx_hash_entry *)(
  1680. (char *)
  1681. pdev->rx_ring.hash_table[i].freepool.next -
  1682. pdev->rx_ring.listnode_offset);
  1683. if (cdf_unlikely(NULL == hash_element)) {
  1684. HTT_ASSERT_ALWAYS(0);
  1685. return 1;
  1686. }
  1687. htt_list_remove(pdev->rx_ring.hash_table[i].freepool.next);
  1688. } else {
  1689. hash_element = cdf_mem_malloc(sizeof(struct htt_rx_hash_entry));
  1690. if (cdf_unlikely(NULL == hash_element)) {
  1691. HTT_ASSERT_ALWAYS(0);
  1692. return 1;
  1693. }
  1694. hash_element->fromlist = 0;
  1695. }
  1696. hash_element->netbuf = netbuf;
  1697. hash_element->paddr = paddr;
  1698. HTT_RX_HASH_COOKIE_SET(hash_element);
  1699. htt_list_add_tail(&pdev->rx_ring.hash_table[i].listhead,
  1700. &hash_element->listnode);
  1701. RX_HASH_LOG(cdf_print("rx hash: %s: paddr 0x%x netbuf %p bucket %d\n",
  1702. __func__, paddr, netbuf, (int)i));
  1703. HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
  1704. HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
  1705. return 0;
  1706. }
  1707. /* Given a physical address this function will find the corresponding network
  1708. buffer from the hash table.
  1709. Note: this function is not thread-safe */
  1710. cdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr)
  1711. {
  1712. uint32_t i;
  1713. struct htt_list_node *list_iter = NULL;
  1714. cdf_nbuf_t netbuf = NULL;
  1715. struct htt_rx_hash_entry *hash_entry;
  1716. i = RX_HASH_FUNCTION(paddr);
  1717. HTT_LIST_ITER_FWD(list_iter, &pdev->rx_ring.hash_table[i].listhead) {
  1718. hash_entry = (struct htt_rx_hash_entry *)
  1719. ((char *)list_iter -
  1720. pdev->rx_ring.listnode_offset);
  1721. HTT_RX_HASH_COOKIE_CHECK(hash_entry);
  1722. if (hash_entry->paddr == paddr) {
  1723. /* Found the entry corresponding to paddr */
  1724. netbuf = hash_entry->netbuf;
  1725. htt_list_remove(&hash_entry->listnode);
  1726. HTT_RX_HASH_COUNT_DECR(pdev->rx_ring.hash_table[i]);
  1727. /* if the rx entry is from the pre-allocated list,
  1728. return it */
  1729. if (hash_entry->fromlist)
  1730. htt_list_add_tail(&pdev->rx_ring.hash_table[i].
  1731. freepool,
  1732. &hash_entry->listnode);
  1733. else
  1734. cdf_mem_free(hash_entry);
  1735. htt_rx_dbg_rxbuf_reset(pdev, netbuf);
  1736. break;
  1737. }
  1738. }
  1739. RX_HASH_LOG(cdf_print("rx hash: %s: paddr 0x%x, netbuf %p, bucket %d\n",
  1740. __func__, paddr, netbuf, (int)i));
  1741. HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
  1742. if (netbuf == NULL) {
  1743. cdf_print("rx hash: %s: no entry found for 0x%x!!!\n",
  1744. __func__, paddr);
  1745. HTT_ASSERT_ALWAYS(0);
  1746. }
  1747. return netbuf;
  1748. }
  1749. /* Initialization function of the rx buffer hash table. This function will
  1750. allocate a hash table of a certain pre-determined size and initialize all
  1751. the elements */
  1752. int htt_rx_hash_init(struct htt_pdev_t *pdev)
  1753. {
  1754. int i, j;
  1755. HTT_ASSERT2(CDF_IS_PWR2(RX_NUM_HASH_BUCKETS));
  1756. pdev->rx_ring.hash_table =
  1757. cdf_mem_malloc(RX_NUM_HASH_BUCKETS *
  1758. sizeof(struct htt_rx_hash_bucket));
  1759. if (NULL == pdev->rx_ring.hash_table) {
  1760. cdf_print("rx hash table allocation failed!\n");
  1761. return 1;
  1762. }
  1763. for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
  1764. HTT_RX_HASH_COUNT_RESET(pdev->rx_ring.hash_table[i]);
  1765. /* initialize the hash table buckets */
  1766. htt_list_init(&pdev->rx_ring.hash_table[i].listhead);
  1767. /* initialize the hash table free pool per bucket */
  1768. htt_list_init(&pdev->rx_ring.hash_table[i].freepool);
  1769. /* pre-allocate a pool of entries for this bucket */
  1770. pdev->rx_ring.hash_table[i].entries =
  1771. cdf_mem_malloc(RX_ENTRIES_SIZE *
  1772. sizeof(struct htt_rx_hash_entry));
  1773. if (NULL == pdev->rx_ring.hash_table[i].entries) {
  1774. cdf_print("rx hash bucket %d entries alloc failed\n",
  1775. (int)i);
  1776. while (i) {
  1777. i--;
  1778. cdf_mem_free(pdev->rx_ring.hash_table[i].
  1779. entries);
  1780. }
  1781. cdf_mem_free(pdev->rx_ring.hash_table);
  1782. pdev->rx_ring.hash_table = NULL;
  1783. return 1;
  1784. }
  1785. /* initialize the free list with pre-allocated entries */
  1786. for (j = 0; j < RX_ENTRIES_SIZE; j++) {
  1787. pdev->rx_ring.hash_table[i].entries[j].fromlist = 1;
  1788. htt_list_add_tail(&pdev->rx_ring.hash_table[i].freepool,
  1789. &pdev->rx_ring.hash_table[i].
  1790. entries[j].listnode);
  1791. }
  1792. }
  1793. pdev->rx_ring.listnode_offset =
  1794. cdf_offsetof(struct htt_rx_hash_entry, listnode);
  1795. return 0;
  1796. }
  1797. void htt_rx_hash_dump_table(struct htt_pdev_t *pdev)
  1798. {
  1799. uint32_t i;
  1800. struct htt_rx_hash_entry *hash_entry;
  1801. struct htt_list_node *list_iter = NULL;
  1802. for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
  1803. HTT_LIST_ITER_FWD(list_iter,
  1804. &pdev->rx_ring.hash_table[i].listhead) {
  1805. hash_entry =
  1806. (struct htt_rx_hash_entry *)((char *)list_iter -
  1807. pdev->rx_ring.
  1808. listnode_offset);
  1809. cdf_print("hash_table[%d]: netbuf %p paddr 0x%x\n", i,
  1810. hash_entry->netbuf, hash_entry->paddr);
  1811. }
  1812. }
  1813. }
  1814. /*--- RX In Order Hash Code --------------------------------------------------*/
  1815. /* move the function to the end of file
  1816. * to omit ll/hl pre-declaration
  1817. */
  1818. int htt_rx_attach(struct htt_pdev_t *pdev)
  1819. {
  1820. cdf_dma_addr_t paddr;
  1821. #if HTT_PADDR64
  1822. uint32_t ring_elem_size = sizeof(uint64_t);
  1823. #else
  1824. uint32_t ring_elem_size = sizeof(uint32_t);
  1825. #endif /* HTT_PADDR64 */
  1826. pdev->rx_ring.size = htt_rx_ring_size(pdev);
  1827. HTT_ASSERT2(CDF_IS_PWR2(pdev->rx_ring.size));
  1828. pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
  1829. /*
  1830. * Set the initial value for the level to which the rx ring
  1831. * should be filled, based on the max throughput and the worst
  1832. * likely latency for the host to fill the rx ring.
  1833. * In theory, this fill level can be dynamically adjusted from
  1834. * the initial value set here to reflect the actual host latency
  1835. * rather than a conservative assumption.
  1836. */
  1837. pdev->rx_ring.fill_level = htt_rx_ring_fill_level(pdev);
  1838. if (pdev->cfg.is_full_reorder_offload) {
  1839. if (htt_rx_hash_init(pdev))
  1840. goto fail1;
  1841. /* allocate the target index */
  1842. pdev->rx_ring.target_idx.vaddr =
  1843. cdf_os_mem_alloc_consistent(pdev->osdev,
  1844. sizeof(uint32_t),
  1845. &paddr,
  1846. cdf_get_dma_mem_context(
  1847. (&pdev->rx_ring.target_idx),
  1848. memctx));
  1849. if (!pdev->rx_ring.target_idx.vaddr)
  1850. goto fail1;
  1851. pdev->rx_ring.target_idx.paddr = paddr;
  1852. *pdev->rx_ring.target_idx.vaddr = 0;
  1853. } else {
  1854. pdev->rx_ring.buf.netbufs_ring =
  1855. cdf_mem_malloc(pdev->rx_ring.size * sizeof(cdf_nbuf_t));
  1856. if (!pdev->rx_ring.buf.netbufs_ring)
  1857. goto fail1;
  1858. pdev->rx_ring.sw_rd_idx.msdu_payld = 0;
  1859. pdev->rx_ring.sw_rd_idx.msdu_desc = 0;
  1860. }
  1861. pdev->rx_ring.buf.paddrs_ring =
  1862. cdf_os_mem_alloc_consistent(
  1863. pdev->osdev,
  1864. pdev->rx_ring.size * ring_elem_size,
  1865. &paddr,
  1866. cdf_get_dma_mem_context(
  1867. (&pdev->rx_ring.buf),
  1868. memctx));
  1869. if (!pdev->rx_ring.buf.paddrs_ring)
  1870. goto fail2;
  1871. pdev->rx_ring.base_paddr = paddr;
  1872. pdev->rx_ring.alloc_idx.vaddr =
  1873. cdf_os_mem_alloc_consistent(
  1874. pdev->osdev,
  1875. sizeof(uint32_t),
  1876. &paddr,
  1877. cdf_get_dma_mem_context(
  1878. (&pdev->rx_ring.alloc_idx),
  1879. memctx));
  1880. if (!pdev->rx_ring.alloc_idx.vaddr)
  1881. goto fail3;
  1882. pdev->rx_ring.alloc_idx.paddr = paddr;
  1883. *pdev->rx_ring.alloc_idx.vaddr = 0;
  1884. /*
  1885. * Initialize the Rx refill reference counter to be one so that
  1886. * only one thread is allowed to refill the Rx ring.
  1887. */
  1888. cdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
  1889. cdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
  1890. /* Initialize the Rx refill retry timer */
  1891. cdf_softirq_timer_init(pdev->osdev,
  1892. &pdev->rx_ring.refill_retry_timer,
  1893. htt_rx_ring_refill_retry, (void *)pdev,
  1894. CDF_TIMER_TYPE_SW);
  1895. pdev->rx_ring.fill_cnt = 0;
  1896. #ifdef DEBUG_DMA_DONE
  1897. pdev->rx_ring.dbg_ring_idx = 0;
  1898. pdev->rx_ring.dbg_refill_cnt = 0;
  1899. pdev->rx_ring.dbg_sync_success = 0;
  1900. #endif
  1901. #ifdef HTT_RX_RESTORE
  1902. pdev->rx_ring.rx_reset = 0;
  1903. pdev->rx_ring.htt_rx_restore = 0;
  1904. #endif
  1905. htt_rx_dbg_rxbuf_init(pdev);
  1906. htt_rx_ring_fill_n(pdev, pdev->rx_ring.fill_level);
  1907. if (pdev->cfg.is_full_reorder_offload) {
  1908. cdf_print("HTT: full reorder offload enabled\n");
  1909. htt_rx_amsdu_pop = htt_rx_amsdu_rx_in_order_pop_ll;
  1910. htt_rx_frag_pop = htt_rx_amsdu_rx_in_order_pop_ll;
  1911. htt_rx_mpdu_desc_list_next =
  1912. htt_rx_in_ord_mpdu_desc_list_next_ll;
  1913. } else {
  1914. htt_rx_amsdu_pop = htt_rx_amsdu_pop_ll;
  1915. htt_rx_frag_pop = htt_rx_amsdu_pop_ll;
  1916. htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_ll;
  1917. }
  1918. htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
  1919. htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
  1920. htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
  1921. htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
  1922. htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
  1923. htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
  1924. htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
  1925. htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
  1926. htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_ll;
  1927. htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_ll;
  1928. htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_ll;
  1929. htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_ll;
  1930. htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_ll;
  1931. htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_ll;
  1932. htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_ll;
  1933. return 0; /* success */
  1934. fail3:
  1935. cdf_os_mem_free_consistent(pdev->osdev,
  1936. pdev->rx_ring.size * sizeof(uint32_t),
  1937. pdev->rx_ring.buf.paddrs_ring,
  1938. pdev->rx_ring.base_paddr,
  1939. cdf_get_dma_mem_context((&pdev->rx_ring.buf),
  1940. memctx));
  1941. fail2:
  1942. if (pdev->cfg.is_full_reorder_offload) {
  1943. cdf_os_mem_free_consistent(pdev->osdev,
  1944. sizeof(uint32_t),
  1945. pdev->rx_ring.target_idx.vaddr,
  1946. pdev->rx_ring.target_idx.paddr,
  1947. cdf_get_dma_mem_context((&pdev->
  1948. rx_ring.
  1949. target_idx),
  1950. memctx));
  1951. htt_rx_hash_deinit(pdev);
  1952. } else {
  1953. cdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
  1954. }
  1955. fail1:
  1956. return 1; /* failure */
  1957. }
  1958. #ifdef IPA_OFFLOAD
  1959. #ifdef QCA_WIFI_3_0
  1960. /**
  1961. * htt_rx_ipa_uc_alloc_wdi2_rsc() - Allocate WDI2.0 resources
  1962. * @pdev: htt context
  1963. * @rx_ind_ring_elements: rx ring elements
  1964. *
  1965. * Return: 0 success
  1966. */
  1967. int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
  1968. unsigned int rx_ind_ring_elements)
  1969. {
  1970. /* Allocate RX2 indication ring */
  1971. /* RX2 IND ring element
  1972. * 4bytes: pointer
  1973. * 2bytes: VDEV ID
  1974. * 2bytes: length */
  1975. pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
  1976. cdf_os_mem_alloc_consistent(
  1977. pdev->osdev,
  1978. rx_ind_ring_elements *
  1979. sizeof(struct ipa_uc_rx_ring_elem_t),
  1980. &pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
  1981. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  1982. rx2_ind_ring_base),
  1983. memctx));
  1984. if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
  1985. cdf_print("%s: RX IND RING alloc fail", __func__);
  1986. return -ENOBUFS;
  1987. }
  1988. /* RX indication ring size, by bytes */
  1989. pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
  1990. rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
  1991. cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
  1992. pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
  1993. /* Allocate RX process done index */
  1994. pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
  1995. cdf_os_mem_alloc_consistent(
  1996. pdev->osdev,
  1997. 4,
  1998. &pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
  1999. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2000. rx_ipa_prc_done_idx),
  2001. memctx));
  2002. if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
  2003. cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
  2004. cdf_os_mem_free_consistent(
  2005. pdev->osdev,
  2006. pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
  2007. pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
  2008. pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
  2009. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2010. rx2_ind_ring_base),
  2011. memctx));
  2012. return -ENOBUFS;
  2013. }
  2014. cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
  2015. return 0;
  2016. }
  2017. #else
  2018. int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
  2019. unsigned int rx_ind_ring_elements)
  2020. {
  2021. return 0;
  2022. }
  2023. #endif
  2024. /**
  2025. * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
  2026. * @pdev: htt context
  2027. * @rx_ind_ring_size: rx ring size
  2028. *
  2029. * Return: 0 success
  2030. */
  2031. int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
  2032. unsigned int rx_ind_ring_elements)
  2033. {
  2034. int ret = 0;
  2035. /* Allocate RX indication ring */
  2036. /* RX IND ring element
  2037. * 4bytes: pointer
  2038. * 2bytes: VDEV ID
  2039. * 2bytes: length */
  2040. pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
  2041. cdf_os_mem_alloc_consistent(
  2042. pdev->osdev,
  2043. rx_ind_ring_elements *
  2044. sizeof(struct ipa_uc_rx_ring_elem_t),
  2045. &pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
  2046. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2047. rx_ind_ring_base),
  2048. memctx));
  2049. if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
  2050. cdf_print("%s: RX IND RING alloc fail", __func__);
  2051. return -ENOBUFS;
  2052. }
  2053. /* RX indication ring size, by bytes */
  2054. pdev->ipa_uc_rx_rsc.rx_ind_ring_size =
  2055. rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
  2056. cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
  2057. pdev->ipa_uc_rx_rsc.rx_ind_ring_size);
  2058. /* Allocate RX process done index */
  2059. pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
  2060. cdf_os_mem_alloc_consistent(
  2061. pdev->osdev,
  2062. 4,
  2063. &pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
  2064. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2065. rx_ipa_prc_done_idx),
  2066. memctx));
  2067. if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
  2068. cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
  2069. cdf_os_mem_free_consistent(
  2070. pdev->osdev,
  2071. pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
  2072. pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
  2073. pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
  2074. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2075. rx_ind_ring_base),
  2076. memctx));
  2077. return -ENOBUFS;
  2078. }
  2079. cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
  2080. ret = htt_rx_ipa_uc_alloc_wdi2_rsc(pdev, rx_ind_ring_elements);
  2081. return ret;
  2082. }
  2083. #ifdef QCA_WIFI_3_0
  2084. /**
  2085. * htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
  2086. * @pdev: htt context
  2087. *
  2088. * Return: None
  2089. */
  2090. void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
  2091. {
  2092. if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
  2093. cdf_os_mem_free_consistent(
  2094. pdev->osdev,
  2095. pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
  2096. pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
  2097. pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
  2098. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2099. rx2_ind_ring_base),
  2100. memctx));
  2101. }
  2102. if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
  2103. cdf_os_mem_free_consistent(
  2104. pdev->osdev,
  2105. 4,
  2106. pdev->ipa_uc_rx_rsc.
  2107. rx_ipa_prc_done_idx.vaddr,
  2108. pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
  2109. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2110. rx_ipa_prc_done_idx),
  2111. memctx));
  2112. }
  2113. }
  2114. #else
  2115. void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
  2116. {
  2117. return;
  2118. }
  2119. #endif
  2120. int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
  2121. {
  2122. if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
  2123. cdf_os_mem_free_consistent(
  2124. pdev->osdev,
  2125. pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
  2126. pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
  2127. pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
  2128. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2129. rx_ind_ring_base),
  2130. memctx));
  2131. }
  2132. if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
  2133. cdf_os_mem_free_consistent(
  2134. pdev->osdev,
  2135. 4,
  2136. pdev->ipa_uc_rx_rsc.
  2137. rx_ipa_prc_done_idx.vaddr,
  2138. pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
  2139. cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
  2140. rx2_ipa_prc_done_idx),
  2141. memctx));
  2142. }
  2143. htt_rx_ipa_uc_free_wdi2_rsc(pdev);
  2144. return 0;
  2145. }
  2146. #endif /* IPA_OFFLOAD */