dp_tx.c 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_tx.h"
  20. #include "dp_tx_desc.h"
  21. #include "dp_peer.h"
  22. #include "dp_types.h"
  23. #include "hal_tx.h"
  24. #include "qdf_mem.h"
  25. #include "qdf_nbuf.h"
  26. #include <wlan_cfg.h>
  27. #ifdef MESH_MODE_SUPPORT
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #ifdef TX_PER_PDEV_DESC_POOL
  31. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  32. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  33. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  34. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
  35. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  36. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  37. #else
  38. #ifdef TX_PER_VDEV_DESC_POOL
  39. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  40. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  41. #else
  42. #define DP_TX_GET_DESC_POOL_ID(vdev) qdf_get_cpu()
  43. #define DP_TX_GET_RING_ID(vdev) vdev->pdev->soc->tx_ring_map[qdf_get_cpu()]
  44. #endif /* TX_PER_VDEV_DESC_POOL */
  45. #endif /* TX_PER_PDEV_DESC_POOL */
  46. /* TODO Add support in TSO */
  47. #define DP_DESC_NUM_FRAG(x) 0
  48. /* disable TQM_BYPASS */
  49. #define TQM_BYPASS_WAR 0
  50. /**
  51. * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
  52. * @vdev: DP Virtual device handle
  53. * @nbuf: Buffer pointer
  54. * @queue: queue ids container for nbuf
  55. *
  56. * TX packet queue has 2 instances, software descriptors id and dma ring id
  57. * Based on tx feature and hardware configuration queue id combination could be
  58. * different.
  59. * For example -
  60. * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
  61. * With no XPS,lock based resource protection, Descriptor pool ids are different
  62. * for each vdev, dma ring id will be same as single pdev id
  63. *
  64. * Return: None
  65. */
  66. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  67. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  68. {
  69. /* get flow id */
  70. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  71. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  72. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  73. "%s, pool_id:%d ring_id: %d\n",
  74. __func__, queue->desc_pool_id, queue->ring_id);
  75. return;
  76. }
  77. #if defined(FEATURE_TSO)
  78. /**
  79. * dp_tx_tso_desc_release() - Release the tso segment
  80. * after unmapping all the fragments
  81. *
  82. * @pdev - physical device handle
  83. * @tx_desc - Tx software descriptor
  84. */
  85. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  86. struct dp_tx_desc_s *tx_desc)
  87. {
  88. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  89. if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
  90. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  91. "%s %d TSO desc is NULL!",
  92. __func__, __LINE__);
  93. qdf_assert(0);
  94. } else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
  95. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  96. "%s %d TSO common info is NULL!",
  97. __func__, __LINE__);
  98. qdf_assert(0);
  99. } else {
  100. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  101. (struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
  102. if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
  103. tso_num_desc->num_seg.tso_cmn_num_seg--;
  104. qdf_nbuf_unmap_tso_segment(soc->osdev,
  105. tx_desc->tso_desc, false);
  106. } else {
  107. tso_num_desc->num_seg.tso_cmn_num_seg--;
  108. qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
  109. qdf_nbuf_unmap_tso_segment(soc->osdev,
  110. tx_desc->tso_desc, true);
  111. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  112. tx_desc->tso_num_desc);
  113. tx_desc->tso_num_desc = NULL;
  114. }
  115. dp_tx_tso_desc_free(soc,
  116. tx_desc->pool_id, tx_desc->tso_desc);
  117. tx_desc->tso_desc = NULL;
  118. }
  119. }
  120. #else
  121. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  122. struct dp_tx_desc_s *tx_desc)
  123. {
  124. return;
  125. }
  126. #endif
  127. /**
  128. * dp_tx_desc_release() - Release Tx Descriptor
  129. * @tx_desc : Tx Descriptor
  130. * @desc_pool_id: Descriptor Pool ID
  131. *
  132. * Deallocate all resources attached to Tx descriptor and free the Tx
  133. * descriptor.
  134. *
  135. * Return:
  136. */
  137. static void
  138. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  139. {
  140. struct dp_pdev *pdev = tx_desc->pdev;
  141. struct dp_soc *soc;
  142. uint8_t comp_status = 0;
  143. qdf_assert(pdev);
  144. soc = pdev->soc;
  145. if (tx_desc->frm_type == dp_tx_frm_tso)
  146. dp_tx_tso_desc_release(soc, tx_desc);
  147. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  148. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  149. qdf_atomic_dec(&pdev->num_tx_outstanding);
  150. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  151. qdf_atomic_dec(&pdev->num_tx_exception);
  152. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  153. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  154. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
  155. else
  156. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  157. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  158. "Tx Completion Release desc %d status %d outstanding %d\n",
  159. tx_desc->id, comp_status,
  160. qdf_atomic_read(&pdev->num_tx_outstanding));
  161. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  162. return;
  163. }
  164. /**
  165. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  166. * @vdev: DP vdev Handle
  167. * @nbuf: skb
  168. *
  169. * Prepares and fills HTT metadata in the frame pre-header for special frames
  170. * that should be transmitted using varying transmit parameters.
  171. * There are 2 VDEV modes that currently needs this special metadata -
  172. * 1) Mesh Mode
  173. * 2) DSRC Mode
  174. *
  175. * Return: HTT metadata size
  176. *
  177. */
  178. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  179. uint32_t *meta_data)
  180. {
  181. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  182. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  183. uint8_t htt_desc_size;
  184. /* Size rounded of multiple of 8 bytes */
  185. uint8_t htt_desc_size_aligned;
  186. uint8_t *hdr = NULL;
  187. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
  188. /*
  189. * Metadata - HTT MSDU Extension header
  190. */
  191. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  192. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  193. if (vdev->mesh_vdev) {
  194. /* Fill and add HTT metaheader */
  195. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  196. if (hdr == NULL) {
  197. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  198. "Error in filling HTT metadata\n");
  199. return 0;
  200. }
  201. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  202. } else if (vdev->opmode == wlan_op_mode_ocb) {
  203. /* Todo - Add support for DSRC */
  204. }
  205. return htt_desc_size_aligned;
  206. }
  207. /**
  208. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  209. * @tso_seg: TSO segment to process
  210. * @ext_desc: Pointer to MSDU extension descriptor
  211. *
  212. * Return: void
  213. */
  214. #if defined(FEATURE_TSO)
  215. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  216. void *ext_desc)
  217. {
  218. uint8_t num_frag;
  219. uint32_t tso_flags;
  220. /*
  221. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  222. * tcp_flag_mask
  223. *
  224. * Checksum enable flags are set in TCL descriptor and not in Extension
  225. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  226. */
  227. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  228. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  229. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  230. tso_seg->tso_flags.ip_len);
  231. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  232. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  233. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  234. uint32_t lo = 0;
  235. uint32_t hi = 0;
  236. qdf_dmaaddr_to_32s(
  237. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  238. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  239. tso_seg->tso_frags[num_frag].length);
  240. }
  241. return;
  242. }
  243. #else
  244. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  245. void *ext_desc)
  246. {
  247. return;
  248. }
  249. #endif
  250. #if defined(FEATURE_TSO)
  251. /**
  252. * dp_tx_free_tso_seg() - Loop through the tso segments
  253. * allocated and free them
  254. *
  255. * @soc: soc handle
  256. * @free_seg: list of tso segments
  257. * @msdu_info: msdu descriptor
  258. *
  259. * Return - void
  260. */
  261. static void dp_tx_free_tso_seg(struct dp_soc *soc,
  262. struct qdf_tso_seg_elem_t *free_seg,
  263. struct dp_tx_msdu_info_s *msdu_info)
  264. {
  265. struct qdf_tso_seg_elem_t *next_seg;
  266. while (free_seg) {
  267. next_seg = free_seg->next;
  268. dp_tx_tso_desc_free(soc,
  269. msdu_info->tx_queue.desc_pool_id,
  270. free_seg);
  271. free_seg = next_seg;
  272. }
  273. }
  274. /**
  275. * dp_tx_free_tso_num_seg() - Loop through the tso num segments
  276. * allocated and free them
  277. *
  278. * @soc: soc handle
  279. * @free_seg: list of tso segments
  280. * @msdu_info: msdu descriptor
  281. * Return - void
  282. */
  283. static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
  284. struct qdf_tso_num_seg_elem_t *free_seg,
  285. struct dp_tx_msdu_info_s *msdu_info)
  286. {
  287. struct qdf_tso_num_seg_elem_t *next_seg;
  288. while (free_seg) {
  289. next_seg = free_seg->next;
  290. dp_tso_num_seg_free(soc,
  291. msdu_info->tx_queue.desc_pool_id,
  292. free_seg);
  293. free_seg = next_seg;
  294. }
  295. }
  296. /**
  297. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  298. * @vdev: virtual device handle
  299. * @msdu: network buffer
  300. * @msdu_info: meta data associated with the msdu
  301. *
  302. * Return: QDF_STATUS_SUCCESS success
  303. */
  304. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  305. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  306. {
  307. struct qdf_tso_seg_elem_t *tso_seg;
  308. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  309. struct dp_soc *soc = vdev->pdev->soc;
  310. struct qdf_tso_info_t *tso_info;
  311. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  312. tso_info = &msdu_info->u.tso_info;
  313. tso_info->curr_seg = NULL;
  314. tso_info->tso_seg_list = NULL;
  315. tso_info->num_segs = num_seg;
  316. msdu_info->frm_type = dp_tx_frm_tso;
  317. tso_info->tso_num_seg_list = NULL;
  318. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  319. while (num_seg) {
  320. tso_seg = dp_tx_tso_desc_alloc(
  321. soc, msdu_info->tx_queue.desc_pool_id);
  322. if (tso_seg) {
  323. tso_seg->next = tso_info->tso_seg_list;
  324. tso_info->tso_seg_list = tso_seg;
  325. num_seg--;
  326. } else {
  327. struct qdf_tso_seg_elem_t *free_seg =
  328. tso_info->tso_seg_list;
  329. dp_tx_free_tso_seg(soc, free_seg, msdu_info);
  330. return QDF_STATUS_E_NOMEM;
  331. }
  332. }
  333. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  334. tso_num_seg = dp_tso_num_seg_alloc(soc,
  335. msdu_info->tx_queue.desc_pool_id);
  336. if (tso_num_seg) {
  337. tso_num_seg->next = tso_info->tso_num_seg_list;
  338. tso_info->tso_num_seg_list = tso_num_seg;
  339. } else {
  340. /* Bug: free tso_num_seg and tso_seg */
  341. /* Free the already allocated num of segments */
  342. struct qdf_tso_seg_elem_t *free_seg =
  343. tso_info->tso_seg_list;
  344. TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
  345. __func__);
  346. dp_tx_free_tso_seg(soc, free_seg, msdu_info);
  347. return QDF_STATUS_E_NOMEM;
  348. }
  349. msdu_info->num_seg =
  350. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  351. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  352. msdu_info->num_seg);
  353. if (!(msdu_info->num_seg)) {
  354. dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
  355. dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
  356. msdu_info);
  357. return QDF_STATUS_E_INVAL;
  358. }
  359. tso_info->curr_seg = tso_info->tso_seg_list;
  360. return QDF_STATUS_SUCCESS;
  361. }
  362. #else
  363. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  364. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  365. {
  366. return QDF_STATUS_E_NOMEM;
  367. }
  368. #endif
  369. /**
  370. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  371. * @vdev: DP Vdev handle
  372. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  373. * @desc_pool_id: Descriptor Pool ID
  374. *
  375. * Return:
  376. */
  377. static
  378. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  379. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  380. {
  381. uint8_t i;
  382. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  383. struct dp_tx_seg_info_s *seg_info;
  384. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  385. struct dp_soc *soc = vdev->pdev->soc;
  386. /* Allocate an extension descriptor */
  387. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  388. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  389. if (!msdu_ext_desc) {
  390. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  391. return NULL;
  392. }
  393. if (qdf_unlikely(vdev->mesh_vdev)) {
  394. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  395. &msdu_info->meta_data[0],
  396. sizeof(struct htt_tx_msdu_desc_ext2_t));
  397. qdf_atomic_inc(&vdev->pdev->num_tx_exception);
  398. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
  399. }
  400. switch (msdu_info->frm_type) {
  401. case dp_tx_frm_sg:
  402. case dp_tx_frm_me:
  403. case dp_tx_frm_raw:
  404. seg_info = msdu_info->u.sg_info.curr_seg;
  405. /* Update the buffer pointers in MSDU Extension Descriptor */
  406. for (i = 0; i < seg_info->frag_cnt; i++) {
  407. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  408. seg_info->frags[i].paddr_lo,
  409. seg_info->frags[i].paddr_hi,
  410. seg_info->frags[i].len);
  411. }
  412. break;
  413. case dp_tx_frm_tso:
  414. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  415. &cached_ext_desc[0]);
  416. break;
  417. default:
  418. break;
  419. }
  420. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  421. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  422. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  423. msdu_ext_desc->vaddr);
  424. return msdu_ext_desc;
  425. }
  426. /**
  427. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  428. * @vdev: DP vdev handle
  429. * @nbuf: skb
  430. * @desc_pool_id: Descriptor pool ID
  431. * Allocate and prepare Tx descriptor with msdu information.
  432. *
  433. * Return: Pointer to Tx Descriptor on success,
  434. * NULL on failure
  435. */
  436. static
  437. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  438. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  439. uint32_t *meta_data)
  440. {
  441. uint8_t align_pad;
  442. uint8_t is_exception = 0;
  443. uint8_t htt_hdr_size;
  444. struct ether_header *eh;
  445. struct dp_tx_desc_s *tx_desc;
  446. struct dp_pdev *pdev = vdev->pdev;
  447. struct dp_soc *soc = pdev->soc;
  448. /* Allocate software Tx descriptor */
  449. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  450. if (qdf_unlikely(!tx_desc)) {
  451. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  452. "%s Tx Desc Alloc Failed\n", __func__);
  453. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  454. return NULL;
  455. }
  456. /* Flow control/Congestion Control counters */
  457. qdf_atomic_inc(&pdev->num_tx_outstanding);
  458. /* Initialize the SW tx descriptor */
  459. tx_desc->nbuf = nbuf;
  460. tx_desc->frm_type = dp_tx_frm_std;
  461. tx_desc->tx_encap_type = vdev->tx_encap_type;
  462. tx_desc->vdev = vdev;
  463. tx_desc->pdev = pdev;
  464. tx_desc->msdu_ext_desc = NULL;
  465. /**
  466. * For non-scatter regular frames, buffer pointer is directly
  467. * programmed in TCL input descriptor instead of using an MSDU
  468. * extension descriptor.For this cass, HW requirement is that
  469. * descriptor should always point to a 8-byte aligned address.
  470. *
  471. * So we add alignment pad to start of buffer, and specify the actual
  472. * start of data through pkt_offset
  473. */
  474. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  475. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  476. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  477. "qdf_nbuf_push_head failed\n");
  478. goto failure;
  479. }
  480. tx_desc->pkt_offset = align_pad;
  481. /*
  482. * For special modes (vdev_type == ocb or mesh), data frames should be
  483. * transmitted using varying transmit parameters (tx spec) which include
  484. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  485. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  486. * These frames are sent as exception packets to firmware.
  487. *
  488. * HTT Metadata should be ensured to be multiple of 8-bytes,
  489. * to get 8-byte aligned start address along with align_pad added above
  490. *
  491. * |-----------------------------|
  492. * | |
  493. * |-----------------------------| <-----Buffer Pointer Address given
  494. * | | ^ in HW descriptor (aligned)
  495. * | HTT Metadata | |
  496. * | | |
  497. * | | | Packet Offset given in descriptor
  498. * | | |
  499. * |-----------------------------| |
  500. * | Alignment Pad | v
  501. * |-----------------------------| <----- Actual buffer start address
  502. * | SKB Data | (Unaligned)
  503. * | |
  504. * | |
  505. * | |
  506. * | |
  507. * | |
  508. * |-----------------------------|
  509. */
  510. if (qdf_unlikely(vdev->mesh_vdev ||
  511. (vdev->opmode == wlan_op_mode_ocb))) {
  512. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  513. meta_data);
  514. if (htt_hdr_size == 0)
  515. goto failure;
  516. tx_desc->pkt_offset += htt_hdr_size;
  517. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  518. is_exception = 1;
  519. }
  520. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  521. qdf_nbuf_map(soc->osdev, nbuf,
  522. QDF_DMA_TO_DEVICE))) {
  523. /* Handle failure */
  524. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  525. "qdf_nbuf_map failed\n");
  526. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  527. goto failure;
  528. }
  529. if (qdf_unlikely(vdev->nawds_enabled)) {
  530. eh = (struct ether_header *) qdf_nbuf_data(nbuf);
  531. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  532. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  533. is_exception = 1;
  534. }
  535. }
  536. #if !TQM_BYPASS_WAR
  537. if (is_exception)
  538. #endif
  539. {
  540. /* Temporary WAR due to TQM VP issues */
  541. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  542. qdf_atomic_inc(&pdev->num_tx_exception);
  543. }
  544. return tx_desc;
  545. failure:
  546. dp_tx_desc_release(tx_desc, desc_pool_id);
  547. return NULL;
  548. }
  549. /**
  550. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  551. * @vdev: DP vdev handle
  552. * @nbuf: skb
  553. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  554. * @desc_pool_id : Descriptor Pool ID
  555. *
  556. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  557. * information. For frames wth fragments, allocate and prepare
  558. * an MSDU extension descriptor
  559. *
  560. * Return: Pointer to Tx Descriptor on success,
  561. * NULL on failure
  562. */
  563. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  564. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  565. uint8_t desc_pool_id)
  566. {
  567. struct dp_tx_desc_s *tx_desc;
  568. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  569. struct dp_pdev *pdev = vdev->pdev;
  570. struct dp_soc *soc = pdev->soc;
  571. /* Allocate software Tx descriptor */
  572. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  573. if (!tx_desc) {
  574. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  575. return NULL;
  576. }
  577. /* Flow control/Congestion Control counters */
  578. qdf_atomic_inc(&pdev->num_tx_outstanding);
  579. /* Initialize the SW tx descriptor */
  580. tx_desc->nbuf = nbuf;
  581. tx_desc->frm_type = msdu_info->frm_type;
  582. tx_desc->tx_encap_type = vdev->tx_encap_type;
  583. tx_desc->vdev = vdev;
  584. tx_desc->pdev = pdev;
  585. tx_desc->pkt_offset = 0;
  586. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  587. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  588. /* Handle scattered frames - TSO/SG/ME */
  589. /* Allocate and prepare an extension descriptor for scattered frames */
  590. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  591. if (!msdu_ext_desc) {
  592. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  593. "%s Tx Extension Descriptor Alloc Fail\n",
  594. __func__);
  595. goto failure;
  596. }
  597. #if TQM_BYPASS_WAR
  598. /* Temporary WAR due to TQM VP issues */
  599. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  600. qdf_atomic_inc(&pdev->num_tx_exception);
  601. #endif
  602. if (qdf_unlikely(vdev->mesh_vdev))
  603. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  604. tx_desc->msdu_ext_desc = msdu_ext_desc;
  605. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  606. return tx_desc;
  607. failure:
  608. dp_tx_desc_release(tx_desc, desc_pool_id);
  609. return NULL;
  610. }
  611. /**
  612. * dp_tx_prepare_raw() - Prepare RAW packet TX
  613. * @vdev: DP vdev handle
  614. * @nbuf: buffer pointer
  615. * @seg_info: Pointer to Segment info Descriptor to be prepared
  616. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  617. * descriptor
  618. *
  619. * Return:
  620. */
  621. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  622. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  623. {
  624. qdf_nbuf_t curr_nbuf = NULL;
  625. uint16_t total_len = 0;
  626. int32_t i;
  627. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  628. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  629. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  630. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  631. if ((qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
  632. && (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU)) {
  633. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  634. }
  635. if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
  636. QDF_DMA_TO_DEVICE)) {
  637. qdf_print("dma map error\n");
  638. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  639. qdf_nbuf_free(nbuf);
  640. return NULL;
  641. }
  642. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  643. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  644. seg_info->frags[i].paddr_lo =
  645. qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  646. seg_info->frags[i].paddr_hi = 0x0;
  647. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  648. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  649. total_len += qdf_nbuf_len(curr_nbuf);
  650. }
  651. seg_info->frag_cnt = i;
  652. seg_info->total_len = total_len;
  653. seg_info->next = NULL;
  654. sg_info->curr_seg = seg_info;
  655. msdu_info->frm_type = dp_tx_frm_raw;
  656. msdu_info->num_seg = 1;
  657. return nbuf;
  658. }
  659. /**
  660. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  661. * @soc: DP Soc Handle
  662. * @vdev: DP vdev handle
  663. * @tx_desc: Tx Descriptor Handle
  664. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  665. * @fw_metadata: Metadata to send to Target Firmware along with frame
  666. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  667. *
  668. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  669. * from software Tx descriptor
  670. *
  671. * Return:
  672. */
  673. static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  674. struct dp_tx_desc_s *tx_desc, uint8_t tid,
  675. uint16_t fw_metadata, uint8_t ring_id)
  676. {
  677. uint8_t type;
  678. uint16_t length;
  679. void *hal_tx_desc, *hal_tx_desc_cached;
  680. qdf_dma_addr_t dma_addr;
  681. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
  682. /* Return Buffer Manager ID */
  683. uint8_t bm_id = ring_id;
  684. void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
  685. hal_tx_desc_cached = (void *) cached_desc;
  686. qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  687. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
  688. length = HAL_TX_EXT_DESC_WITH_META_DATA;
  689. type = HAL_TX_BUF_TYPE_EXT_DESC;
  690. dma_addr = tx_desc->msdu_ext_desc->paddr;
  691. } else {
  692. length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
  693. type = HAL_TX_BUF_TYPE_BUFFER;
  694. dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  695. }
  696. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  697. hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
  698. dma_addr , bm_id, tx_desc->id, type);
  699. hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
  700. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  701. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  702. hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
  703. vdev->dscp_tid_map_id);
  704. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  705. "%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u\n",
  706. __func__, length, type, (uint64_t)dma_addr,
  707. tx_desc->pkt_offset, tx_desc->id);
  708. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  709. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  710. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  711. vdev->hal_desc_addr_search_flags);
  712. if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  713. || qdf_nbuf_is_tso(tx_desc->nbuf)) {
  714. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  715. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  716. }
  717. if (tid != HTT_TX_EXT_TID_INVALID)
  718. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  719. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  720. hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
  721. /* Sync cached descriptor with HW */
  722. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
  723. if (!hal_tx_desc) {
  724. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  725. "%s TCL ring full ring_id:%d\n", __func__, ring_id);
  726. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  727. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  728. return QDF_STATUS_E_RESOURCES;
  729. }
  730. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  731. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  732. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
  733. /*
  734. * If one packet is enqueued in HW, PM usage count needs to be
  735. * incremented by one to prevent future runtime suspend. This
  736. * should be tied with the success of enqueuing. It will be
  737. * decremented after the packet has been sent.
  738. */
  739. hif_pm_runtime_get_noresume(soc->hif_handle);
  740. return QDF_STATUS_SUCCESS;
  741. }
  742. /**
  743. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  744. * @vdev: DP vdev handle
  745. * @nbuf: skb
  746. *
  747. * Extract the DSCP or PCP information from frame and map into TID value.
  748. * Software based TID classification is required when more than 2 DSCP-TID
  749. * mapping tables are needed.
  750. * Hardware supports 2 DSCP-TID mapping tables
  751. *
  752. * Return: void
  753. */
  754. static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  755. struct dp_tx_msdu_info_s *msdu_info)
  756. {
  757. uint8_t tos = 0, dscp_tid_override = 0;
  758. uint8_t *hdr_ptr, *L3datap;
  759. uint8_t is_mcast = 0;
  760. struct ether_header *eh = NULL;
  761. qdf_ethervlan_header_t *evh = NULL;
  762. uint16_t ether_type;
  763. qdf_llc_t *llcHdr;
  764. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  765. /* for mesh packets don't do any classification */
  766. if (qdf_unlikely(vdev->mesh_vdev))
  767. return;
  768. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  769. eh = (struct ether_header *) nbuf->data;
  770. hdr_ptr = eh->ether_dhost;
  771. L3datap = hdr_ptr + sizeof(struct ether_header);
  772. } else {
  773. qdf_dot3_qosframe_t *qos_wh =
  774. (qdf_dot3_qosframe_t *) nbuf->data;
  775. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  776. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  777. return;
  778. }
  779. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  780. ether_type = eh->ether_type;
  781. /*
  782. * Check if packet is dot3 or eth2 type.
  783. */
  784. if (IS_LLC_PRESENT(ether_type)) {
  785. ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
  786. sizeof(*llcHdr));
  787. if (ether_type == htons(ETHERTYPE_8021Q)) {
  788. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  789. sizeof(*llcHdr);
  790. ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
  791. + sizeof(*llcHdr) +
  792. sizeof(qdf_net_vlanhdr_t));
  793. } else {
  794. L3datap = hdr_ptr + sizeof(struct ether_header) +
  795. sizeof(*llcHdr);
  796. }
  797. } else {
  798. if (ether_type == htons(ETHERTYPE_8021Q)) {
  799. evh = (qdf_ethervlan_header_t *) eh;
  800. ether_type = evh->ether_type;
  801. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  802. }
  803. }
  804. /*
  805. * Find priority from IP TOS DSCP field
  806. */
  807. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  808. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  809. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  810. /* Only for unicast frames */
  811. if (!is_mcast) {
  812. /* send it on VO queue */
  813. msdu_info->tid = DP_VO_TID;
  814. }
  815. } else {
  816. /*
  817. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  818. * from TOS byte.
  819. */
  820. tos = ip->ip_tos;
  821. dscp_tid_override = 1;
  822. }
  823. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  824. /* TODO
  825. * use flowlabel
  826. *igmpmld cases to be handled in phase 2
  827. */
  828. unsigned long ver_pri_flowlabel;
  829. unsigned long pri;
  830. ver_pri_flowlabel = *(unsigned long *) L3datap;
  831. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  832. DP_IPV6_PRIORITY_SHIFT;
  833. tos = pri;
  834. dscp_tid_override = 1;
  835. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  836. msdu_info->tid = DP_VO_TID;
  837. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  838. /* Only for unicast frames */
  839. if (!is_mcast) {
  840. /* send ucast arp on VO queue */
  841. msdu_info->tid = DP_VO_TID;
  842. }
  843. }
  844. /*
  845. * Assign all MCAST packets to BE
  846. */
  847. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  848. if (is_mcast) {
  849. tos = 0;
  850. dscp_tid_override = 1;
  851. }
  852. }
  853. if (dscp_tid_override == 1) {
  854. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  855. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  856. }
  857. return;
  858. }
  859. #ifdef CONVERGED_TDLS_ENABLE
  860. /**
  861. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  862. * @tx_desc: TX descriptor
  863. *
  864. * Return: None
  865. */
  866. static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
  867. {
  868. if (tx_desc->vdev) {
  869. if (tx_desc->vdev->is_tdls_frame)
  870. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  871. tx_desc->vdev->is_tdls_frame = false;
  872. }
  873. }
  874. /**
  875. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  876. * @tx_desc: TX descriptor
  877. * @vdev: datapath vdev handle
  878. *
  879. * Return: None
  880. */
  881. static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
  882. struct dp_vdev *vdev)
  883. {
  884. struct hal_tx_completion_status ts = {0};
  885. qdf_nbuf_t nbuf = tx_desc->nbuf;
  886. hal_tx_comp_get_status(&tx_desc->comp, &ts);
  887. if (vdev->tx_non_std_data_callback.func) {
  888. qdf_nbuf_set_next(tx_desc->nbuf, NULL);
  889. vdev->tx_non_std_data_callback.func(
  890. vdev->tx_non_std_data_callback.ctxt,
  891. nbuf, ts.status);
  892. return;
  893. }
  894. }
  895. #endif
  896. /**
  897. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  898. * @vdev: DP vdev handle
  899. * @nbuf: skb
  900. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  901. * @tx_q: Tx queue to be used for this Tx frame
  902. * @peer_id: peer_id of the peer in case of NAWDS frames
  903. *
  904. * Return: NULL on success,
  905. * nbuf when it fails to send
  906. */
  907. static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  908. uint8_t tid, struct dp_tx_queue *tx_q,
  909. uint32_t *meta_data, uint16_t peer_id)
  910. {
  911. struct dp_pdev *pdev = vdev->pdev;
  912. struct dp_soc *soc = pdev->soc;
  913. struct dp_tx_desc_s *tx_desc;
  914. QDF_STATUS status;
  915. void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
  916. uint16_t htt_tcl_metadata = 0;
  917. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 0);
  918. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  919. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, meta_data);
  920. if (!tx_desc) {
  921. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  922. "%s Tx_desc prepare Fail vdev %pK queue %d\n",
  923. __func__, vdev, tx_q->desc_pool_id);
  924. return nbuf;
  925. }
  926. dp_tx_update_tdls_flags(tx_desc);
  927. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  928. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  929. "%s %d : HAL RING Access Failed -- %pK\n",
  930. __func__, __LINE__, hal_srng);
  931. DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
  932. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  933. goto fail_return;
  934. }
  935. if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  936. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  937. HTT_TCL_METADATA_TYPE_PEER_BASED);
  938. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  939. peer_id);
  940. } else
  941. htt_tcl_metadata = vdev->htt_tcl_metadata;
  942. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  943. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
  944. htt_tcl_metadata, tx_q->ring_id);
  945. if (status != QDF_STATUS_SUCCESS) {
  946. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  947. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
  948. __func__, tx_desc, tx_q->ring_id);
  949. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  950. goto fail_return;
  951. }
  952. nbuf = NULL;
  953. fail_return:
  954. if (hif_pm_runtime_get(soc->hif_handle) == 0) {
  955. hal_srng_access_end(soc->hal_soc, hal_srng);
  956. hif_pm_runtime_put(soc->hif_handle);
  957. } else {
  958. hal_srng_access_end_reap(soc->hal_soc, hal_srng);
  959. }
  960. return nbuf;
  961. }
  962. /**
  963. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  964. * @vdev: DP vdev handle
  965. * @nbuf: skb
  966. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  967. *
  968. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  969. *
  970. * Return: NULL on success,
  971. * nbuf when it fails to send
  972. */
  973. #if QDF_LOCK_STATS
  974. static noinline
  975. #else
  976. static
  977. #endif
  978. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  979. struct dp_tx_msdu_info_s *msdu_info)
  980. {
  981. uint8_t i;
  982. struct dp_pdev *pdev = vdev->pdev;
  983. struct dp_soc *soc = pdev->soc;
  984. struct dp_tx_desc_s *tx_desc;
  985. QDF_STATUS status;
  986. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  987. void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
  988. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  989. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  990. "%s %d : HAL RING Access Failed -- %pK\n",
  991. __func__, __LINE__, hal_srng);
  992. DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
  993. return nbuf;
  994. }
  995. if (msdu_info->frm_type == dp_tx_frm_me)
  996. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  997. i = 0;
  998. /* Print statement to track i and num_seg */
  999. /*
  1000. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1001. * descriptors using information in msdu_info
  1002. */
  1003. while (i < msdu_info->num_seg) {
  1004. /*
  1005. * Setup Tx descriptor for an MSDU, and MSDU extension
  1006. * descriptor
  1007. */
  1008. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1009. tx_q->desc_pool_id);
  1010. if (!tx_desc) {
  1011. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1012. "%s Tx_desc prepare Fail vdev %pK queue %d\n",
  1013. __func__, vdev, tx_q->desc_pool_id);
  1014. if (msdu_info->frm_type == dp_tx_frm_me) {
  1015. dp_tx_me_free_buf(pdev,
  1016. (void *)(msdu_info->u.sg_info
  1017. .curr_seg->frags[0].vaddr));
  1018. }
  1019. goto done;
  1020. }
  1021. if (msdu_info->frm_type == dp_tx_frm_me) {
  1022. tx_desc->me_buffer =
  1023. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1024. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1025. }
  1026. /*
  1027. * Enqueue the Tx MSDU descriptor to HW for transmit
  1028. */
  1029. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
  1030. vdev->htt_tcl_metadata, tx_q->ring_id);
  1031. if (status != QDF_STATUS_SUCCESS) {
  1032. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1033. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
  1034. __func__, tx_desc, tx_q->ring_id);
  1035. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  1036. dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
  1037. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1038. goto done;
  1039. }
  1040. /*
  1041. * TODO
  1042. * if tso_info structure can be modified to have curr_seg
  1043. * as first element, following 2 blocks of code (for TSO and SG)
  1044. * can be combined into 1
  1045. */
  1046. /*
  1047. * For frames with multiple segments (TSO, ME), jump to next
  1048. * segment.
  1049. */
  1050. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1051. if (msdu_info->u.tso_info.curr_seg->next) {
  1052. msdu_info->u.tso_info.curr_seg =
  1053. msdu_info->u.tso_info.curr_seg->next;
  1054. /*
  1055. * If this is a jumbo nbuf, then increment the number of
  1056. * nbuf users for each additional segment of the msdu.
  1057. * This will ensure that the skb is freed only after
  1058. * receiving tx completion for all segments of an nbuf
  1059. */
  1060. qdf_nbuf_inc_users(nbuf);
  1061. /* Check with MCL if this is needed */
  1062. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
  1063. }
  1064. }
  1065. /*
  1066. * For Multicast-Unicast converted packets,
  1067. * each converted frame (for a client) is represented as
  1068. * 1 segment
  1069. */
  1070. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  1071. (msdu_info->frm_type == dp_tx_frm_me)) {
  1072. if (msdu_info->u.sg_info.curr_seg->next) {
  1073. msdu_info->u.sg_info.curr_seg =
  1074. msdu_info->u.sg_info.curr_seg->next;
  1075. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1076. }
  1077. }
  1078. i++;
  1079. }
  1080. nbuf = NULL;
  1081. done:
  1082. if (hif_pm_runtime_get(soc->hif_handle) == 0) {
  1083. hal_srng_access_end(soc->hal_soc, hal_srng);
  1084. hif_pm_runtime_put(soc->hif_handle);
  1085. } else {
  1086. hal_srng_access_end_reap(soc->hal_soc, hal_srng);
  1087. }
  1088. return nbuf;
  1089. }
  1090. /**
  1091. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  1092. * for SG frames
  1093. * @vdev: DP vdev handle
  1094. * @nbuf: skb
  1095. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1096. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1097. *
  1098. * Return: NULL on success,
  1099. * nbuf when it fails to send
  1100. */
  1101. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1102. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1103. {
  1104. uint32_t cur_frag, nr_frags;
  1105. qdf_dma_addr_t paddr;
  1106. struct dp_tx_sg_info_s *sg_info;
  1107. sg_info = &msdu_info->u.sg_info;
  1108. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  1109. if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
  1110. QDF_DMA_TO_DEVICE)) {
  1111. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1112. "dma map error\n");
  1113. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1114. qdf_nbuf_free(nbuf);
  1115. return NULL;
  1116. }
  1117. seg_info->frags[0].paddr_lo = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1118. seg_info->frags[0].paddr_hi = 0;
  1119. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  1120. seg_info->frags[0].vaddr = (void *) nbuf;
  1121. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  1122. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  1123. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  1124. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1125. "frag dma map error\n");
  1126. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1127. qdf_nbuf_free(nbuf);
  1128. return NULL;
  1129. }
  1130. paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1131. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  1132. seg_info->frags[cur_frag + 1].paddr_hi =
  1133. ((uint64_t) paddr) >> 32;
  1134. seg_info->frags[cur_frag + 1].len =
  1135. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  1136. }
  1137. seg_info->frag_cnt = (cur_frag + 1);
  1138. seg_info->total_len = qdf_nbuf_len(nbuf);
  1139. seg_info->next = NULL;
  1140. sg_info->curr_seg = seg_info;
  1141. msdu_info->frm_type = dp_tx_frm_sg;
  1142. msdu_info->num_seg = 1;
  1143. return nbuf;
  1144. }
  1145. #ifdef MESH_MODE_SUPPORT
  1146. /**
  1147. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  1148. and prepare msdu_info for mesh frames.
  1149. * @vdev: DP vdev handle
  1150. * @nbuf: skb
  1151. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1152. *
  1153. * Return: NULL on failure,
  1154. * nbuf when extracted successfully
  1155. */
  1156. static
  1157. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1158. struct dp_tx_msdu_info_s *msdu_info)
  1159. {
  1160. struct meta_hdr_s *mhdr;
  1161. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1162. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1163. nbuf = qdf_nbuf_unshare(nbuf);
  1164. if (nbuf == NULL) {
  1165. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1166. "qdf_nbuf_unshare failed\n");
  1167. return nbuf;
  1168. }
  1169. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1170. qdf_mem_set(meta_data, 0, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1171. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  1172. meta_data->power = mhdr->power;
  1173. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  1174. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  1175. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  1176. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  1177. meta_data->dyn_bw = 1;
  1178. meta_data->valid_pwr = 1;
  1179. meta_data->valid_mcs_mask = 1;
  1180. meta_data->valid_nss_mask = 1;
  1181. meta_data->valid_preamble_type = 1;
  1182. meta_data->valid_retries = 1;
  1183. meta_data->valid_bw_info = 1;
  1184. }
  1185. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  1186. meta_data->encrypt_type = 0;
  1187. meta_data->valid_encrypt_type = 1;
  1188. }
  1189. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  1190. msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
  1191. else
  1192. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1193. meta_data->valid_key_flags = 1;
  1194. meta_data->key_flags = (mhdr->keyix & 0x3);
  1195. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  1196. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1197. "qdf_nbuf_pull_head failed\n");
  1198. qdf_nbuf_free(nbuf);
  1199. return NULL;
  1200. }
  1201. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1202. "%s , Meta hdr %0x %0x %0x %0x %0x\n",
  1203. __func__, msdu_info->meta_data[0],
  1204. msdu_info->meta_data[1],
  1205. msdu_info->meta_data[2],
  1206. msdu_info->meta_data[3],
  1207. msdu_info->meta_data[4]);
  1208. return nbuf;
  1209. }
  1210. #else
  1211. static
  1212. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1213. struct dp_tx_msdu_info_s *msdu_info)
  1214. {
  1215. return nbuf;
  1216. }
  1217. #endif
  1218. /**
  1219. * dp_tx_prepare_nawds(): Tramit NAWDS frames
  1220. * @vdev: dp_vdev handle
  1221. * @nbuf: skb
  1222. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1223. * @tx_q: Tx queue to be used for this Tx frame
  1224. * @meta_data: Meta date for mesh
  1225. * @peer_id: peer_id of the peer in case of NAWDS frames
  1226. *
  1227. * return: NULL on success nbuf on failure
  1228. */
  1229. static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1230. uint8_t tid, struct dp_tx_queue *tx_q, uint32_t *meta_data,
  1231. uint32_t peer_id)
  1232. {
  1233. struct dp_peer *peer = NULL;
  1234. qdf_nbuf_t nbuf_copy;
  1235. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1236. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  1237. (peer->nawds_enabled || peer->bss_peer)) {
  1238. nbuf_copy = qdf_nbuf_copy(nbuf);
  1239. if (!nbuf_copy) {
  1240. QDF_TRACE(QDF_MODULE_ID_DP,
  1241. QDF_TRACE_LEVEL_ERROR,
  1242. "nbuf copy failed");
  1243. }
  1244. peer_id = peer->peer_ids[0];
  1245. nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, tid,
  1246. tx_q, meta_data, peer_id);
  1247. if (nbuf_copy != NULL) {
  1248. qdf_nbuf_free(nbuf);
  1249. return nbuf_copy;
  1250. }
  1251. }
  1252. }
  1253. if (peer_id == HTT_INVALID_PEER)
  1254. return nbuf;
  1255. qdf_nbuf_free(nbuf);
  1256. return NULL;
  1257. }
  1258. /**
  1259. * dp_tx_send() - Transmit a frame on a given VAP
  1260. * @vap_dev: DP vdev handle
  1261. * @nbuf: skb
  1262. *
  1263. * Entry point for Core Tx layer (DP_TX) invoked from
  1264. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  1265. * cases
  1266. *
  1267. * Return: NULL on success,
  1268. * nbuf when it fails to send
  1269. */
  1270. qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
  1271. {
  1272. struct ether_header *eh = NULL;
  1273. struct dp_tx_msdu_info_s msdu_info;
  1274. struct dp_tx_seg_info_s seg_info;
  1275. struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
  1276. uint16_t peer_id = HTT_INVALID_PEER;
  1277. qdf_nbuf_t nbuf_mesh = NULL;
  1278. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  1279. qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
  1280. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1281. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1282. "%s , skb %0x:%0x:%0x:%0x:%0x:%0x\n",
  1283. __func__, nbuf->data[0], nbuf->data[1], nbuf->data[2],
  1284. nbuf->data[3], nbuf->data[4], nbuf->data[5]);
  1285. /*
  1286. * Set Default Host TID value to invalid TID
  1287. * (TID override disabled)
  1288. */
  1289. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  1290. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1291. if (qdf_unlikely(vdev->mesh_vdev)) {
  1292. nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  1293. &msdu_info);
  1294. if (nbuf_mesh == NULL) {
  1295. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1296. "Extracting mesh metadata failed\n");
  1297. return nbuf;
  1298. }
  1299. nbuf = nbuf_mesh;
  1300. }
  1301. /*
  1302. * Get HW Queue to use for this frame.
  1303. * TCL supports upto 4 DMA rings, out of which 3 rings are
  1304. * dedicated for data and 1 for command.
  1305. * "queue_id" maps to one hardware ring.
  1306. * With each ring, we also associate a unique Tx descriptor pool
  1307. * to minimize lock contention for these resources.
  1308. */
  1309. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1310. /*
  1311. * TCL H/W supports 2 DSCP-TID mapping tables.
  1312. * Table 1 - Default DSCP-TID mapping table
  1313. * Table 2 - 1 DSCP-TID override table
  1314. *
  1315. * If we need a different DSCP-TID mapping for this vap,
  1316. * call tid_classify to extract DSCP/ToS from frame and
  1317. * map to a TID and store in msdu_info. This is later used
  1318. * to fill in TCL Input descriptor (per-packet TID override).
  1319. */
  1320. if (vdev->dscp_tid_map_id > 1)
  1321. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  1322. /* Reset the control block */
  1323. qdf_nbuf_reset_ctxt(nbuf);
  1324. /*
  1325. * Classify the frame and call corresponding
  1326. * "prepare" function which extracts the segment (TSO)
  1327. * and fragmentation information (for TSO , SG, ME, or Raw)
  1328. * into MSDU_INFO structure which is later used to fill
  1329. * SW and HW descriptors.
  1330. */
  1331. if (qdf_nbuf_is_tso(nbuf)) {
  1332. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1333. "%s TSO frame %pK\n", __func__, vdev);
  1334. DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
  1335. qdf_nbuf_len(nbuf));
  1336. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  1337. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1338. "%s tso_prepare fail vdev_id:%d\n",
  1339. __func__, vdev->vdev_id);
  1340. DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
  1341. return nbuf;
  1342. }
  1343. goto send_multiple;
  1344. }
  1345. /* SG */
  1346. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1347. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  1348. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1349. "%s non-TSO SG frame %pK\n", __func__, vdev);
  1350. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  1351. qdf_nbuf_len(nbuf));
  1352. goto send_multiple;
  1353. }
  1354. #ifdef ATH_SUPPORT_IQUE
  1355. /* Mcast to Ucast Conversion*/
  1356. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  1357. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1358. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  1359. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1360. "%s Mcast frm for ME %pK\n", __func__, vdev);
  1361. DP_STATS_INC_PKT(vdev,
  1362. tx_i.mcast_en.mcast_pkt, 1,
  1363. qdf_nbuf_len(nbuf));
  1364. if (dp_tx_prepare_send_me(vdev, nbuf)) {
  1365. qdf_nbuf_free(nbuf);
  1366. return NULL;
  1367. }
  1368. return nbuf;
  1369. }
  1370. }
  1371. #endif
  1372. /* RAW */
  1373. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1374. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  1375. if (nbuf == NULL)
  1376. return NULL;
  1377. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1378. "%s Raw frame %pK\n", __func__, vdev);
  1379. goto send_multiple;
  1380. }
  1381. if (vdev->nawds_enabled) {
  1382. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1383. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  1384. nbuf = dp_tx_prepare_nawds(vdev, nbuf, msdu_info.tid,
  1385. &msdu_info.tx_queue,
  1386. msdu_info.meta_data, peer_id);
  1387. return nbuf;
  1388. }
  1389. }
  1390. /* Single linear frame */
  1391. /*
  1392. * If nbuf is a simple linear frame, use send_single function to
  1393. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  1394. * SRNG. There is no need to setup a MSDU extension descriptor.
  1395. */
  1396. nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info.tid,
  1397. &msdu_info.tx_queue, msdu_info.meta_data, peer_id);
  1398. return nbuf;
  1399. send_multiple:
  1400. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  1401. return nbuf;
  1402. }
  1403. /**
  1404. * dp_tx_reinject_handler() - Tx Reinject Handler
  1405. * @tx_desc: software descriptor head pointer
  1406. * @status : Tx completion status from HTT descriptor
  1407. *
  1408. * This function reinjects frames back to Target.
  1409. * Todo - Host queue needs to be added
  1410. *
  1411. * Return: none
  1412. */
  1413. static
  1414. void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  1415. {
  1416. struct dp_vdev *vdev;
  1417. struct dp_peer *peer = NULL;
  1418. uint32_t peer_id = HTT_INVALID_PEER;
  1419. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1420. qdf_nbuf_t nbuf_copy = NULL;
  1421. struct dp_tx_msdu_info_s msdu_info;
  1422. vdev = tx_desc->vdev;
  1423. qdf_assert(vdev);
  1424. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  1425. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1426. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1427. "%s Tx reinject path\n", __func__);
  1428. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  1429. qdf_nbuf_len(tx_desc->nbuf));
  1430. if (!vdev->osif_proxy_arp) {
  1431. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1432. "function pointer to proxy arp not present\n");
  1433. return;
  1434. }
  1435. if (qdf_unlikely(vdev->mesh_vdev)) {
  1436. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  1437. } else {
  1438. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1439. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  1440. (peer->bss_peer || peer->nawds_enabled)
  1441. && !(vdev->osif_proxy_arp(
  1442. vdev->osif_vdev,
  1443. nbuf))) {
  1444. nbuf_copy = qdf_nbuf_copy(nbuf);
  1445. if (!nbuf_copy) {
  1446. QDF_TRACE(QDF_MODULE_ID_DP,
  1447. QDF_TRACE_LEVEL_DEBUG,
  1448. FL("nbuf copy failed"));
  1449. break;
  1450. }
  1451. if (peer->nawds_enabled)
  1452. peer_id = peer->peer_ids[0];
  1453. else
  1454. peer_id = HTT_INVALID_PEER;
  1455. nbuf_copy = dp_tx_send_msdu_single(vdev,
  1456. nbuf_copy, msdu_info.tid,
  1457. &msdu_info.tx_queue,
  1458. msdu_info.meta_data, peer_id);
  1459. if (nbuf_copy) {
  1460. QDF_TRACE(QDF_MODULE_ID_DP,
  1461. QDF_TRACE_LEVEL_DEBUG,
  1462. FL("pkt send failed"));
  1463. qdf_nbuf_free(nbuf_copy);
  1464. }
  1465. }
  1466. }
  1467. }
  1468. qdf_nbuf_free(nbuf);
  1469. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  1470. }
  1471. /**
  1472. * dp_tx_inspect_handler() - Tx Inspect Handler
  1473. * @tx_desc: software descriptor head pointer
  1474. * @status : Tx completion status from HTT descriptor
  1475. *
  1476. * Handles Tx frames sent back to Host for inspection
  1477. * (ProxyARP)
  1478. *
  1479. * Return: none
  1480. */
  1481. static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  1482. {
  1483. struct dp_soc *soc;
  1484. struct dp_pdev *pdev = tx_desc->pdev;
  1485. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1486. "%s Tx inspect path\n",
  1487. __func__);
  1488. qdf_assert(pdev);
  1489. soc = pdev->soc;
  1490. DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
  1491. qdf_nbuf_len(tx_desc->nbuf));
  1492. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  1493. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  1494. }
  1495. #ifdef FEATURE_PERPKT_INFO
  1496. static QDF_STATUS
  1497. dp_send_compl_to_stack(struct dp_soc *soc, struct dp_tx_desc_s *desc,
  1498. uint16_t peer_id, uint16_t ppdu_id)
  1499. {
  1500. struct tx_capture_hdr *ppdu_hdr;
  1501. struct ethhdr *eh;
  1502. struct dp_peer *peer = NULL;
  1503. qdf_nbuf_t netbuf = desc->nbuf;
  1504. if (!desc->pdev->tx_sniffer_enable)
  1505. return QDF_STATUS_E_NOSUPPORT;
  1506. eh = (struct ethhdr *)(netbuf->data);
  1507. peer = (peer_id == HTT_INVALID_PEER) ? NULL :
  1508. dp_peer_find_by_id(soc, peer_id);
  1509. if (!peer) {
  1510. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1511. FL("Peer Invalid"));
  1512. return QDF_STATUS_E_INVAL;
  1513. }
  1514. if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
  1515. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1516. FL("No headroom"));
  1517. return QDF_STATUS_E_NOMEM;
  1518. }
  1519. ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
  1520. qdf_mem_copy(ppdu_hdr->ta, (eh->h_dest), IEEE80211_ADDR_LEN);
  1521. ppdu_hdr->ppdu_id = ppdu_id;
  1522. qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
  1523. IEEE80211_ADDR_LEN);
  1524. dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
  1525. netbuf, peer_id,
  1526. WDI_NO_VAL, desc->pdev->pdev_id);
  1527. return QDF_STATUS_SUCCESS;
  1528. }
  1529. #else
  1530. static QDF_STATUS
  1531. dp_send_compl_to_stack(struct dp_soc *soc, struct dp_tx_desc_s *desc,
  1532. uint16_t peer_id, uint16_t ppdu_id)
  1533. {
  1534. return QDF_STATUS_E_NOSUPPORT;
  1535. }
  1536. #endif
  1537. /**
  1538. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  1539. * @soc: Soc handle
  1540. * @desc: software Tx descriptor to be processed
  1541. *
  1542. * Return: none
  1543. */
  1544. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  1545. struct dp_tx_desc_s *desc)
  1546. {
  1547. struct dp_vdev *vdev = desc->vdev;
  1548. qdf_nbuf_t nbuf = desc->nbuf;
  1549. struct hal_tx_completion_status ts = {0};
  1550. if (desc)
  1551. hal_tx_comp_get_status(&desc->comp, &ts);
  1552. /* If it is TDLS mgmt, don't unmap or free the frame */
  1553. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  1554. return dp_non_std_tx_comp_free_buff(desc, vdev);
  1555. /* 0 : MSDU buffer, 1 : MLE */
  1556. if (desc->msdu_ext_desc) {
  1557. /* TSO free */
  1558. if (hal_tx_ext_desc_get_tso_enable(
  1559. desc->msdu_ext_desc->vaddr)) {
  1560. /* If remaining number of segment is 0
  1561. * actual TSO may unmap and free */
  1562. if (!DP_DESC_NUM_FRAG(desc)) {
  1563. qdf_nbuf_unmap(soc->osdev, nbuf,
  1564. QDF_DMA_TO_DEVICE);
  1565. qdf_nbuf_free(nbuf);
  1566. return;
  1567. }
  1568. }
  1569. }
  1570. if (desc->flags & DP_TX_DESC_FLAG_ME)
  1571. dp_tx_me_free_buf(desc->pdev, desc->me_buffer);
  1572. qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1573. if (dp_send_compl_to_stack(soc, desc, ts.peer_id, ts.ppdu_id) ==
  1574. QDF_STATUS_SUCCESS)
  1575. return;
  1576. if (!vdev->mesh_vdev) {
  1577. qdf_nbuf_free(nbuf);
  1578. } else {
  1579. vdev->osif_tx_free_ext((nbuf));
  1580. }
  1581. }
  1582. /**
  1583. * dp_tx_mec_handler() - Tx MEC Notify Handler
  1584. * @vdev: pointer to dp dev handler
  1585. * @status : Tx completion status from HTT descriptor
  1586. *
  1587. * Handles MEC notify event sent from fw to Host
  1588. *
  1589. * Return: none
  1590. */
  1591. #ifdef FEATURE_WDS
  1592. void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
  1593. {
  1594. struct dp_soc *soc;
  1595. uint32_t flags = IEEE80211_NODE_F_WDS_HM;
  1596. struct dp_peer *peer;
  1597. uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
  1598. soc = vdev->pdev->soc;
  1599. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1600. peer = TAILQ_FIRST(&vdev->peer_list);
  1601. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1602. if (!peer) {
  1603. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1604. FL("peer is NULL"));
  1605. return;
  1606. }
  1607. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1608. "%s Tx MEC Handler\n",
  1609. __func__);
  1610. for (i = 0; i < DP_MAC_ADDR_LEN; i++)
  1611. mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
  1612. status[(DP_MAC_ADDR_LEN - 2) + i];
  1613. if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN) &&
  1614. !dp_peer_add_ast(soc, peer, mac_addr, 2)) {
  1615. soc->cdp_soc.ol_ops->peer_add_wds_entry(
  1616. vdev->pdev->osif_pdev,
  1617. mac_addr,
  1618. vdev->mac_addr.raw,
  1619. flags);
  1620. }
  1621. }
  1622. #else
  1623. static void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
  1624. {
  1625. }
  1626. #endif
  1627. /**
  1628. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  1629. * @tx_desc: software descriptor head pointer
  1630. * @status : Tx completion status from HTT descriptor
  1631. *
  1632. * This function will process HTT Tx indication messages from Target
  1633. *
  1634. * Return: none
  1635. */
  1636. static
  1637. void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  1638. {
  1639. uint8_t tx_status;
  1640. struct dp_pdev *pdev;
  1641. struct dp_vdev *vdev;
  1642. struct dp_soc *soc;
  1643. uint32_t *htt_status_word = (uint32_t *) status;
  1644. qdf_assert(tx_desc->pdev);
  1645. pdev = tx_desc->pdev;
  1646. vdev = tx_desc->vdev;
  1647. soc = pdev->soc;
  1648. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
  1649. switch (tx_status) {
  1650. case HTT_TX_FW2WBM_TX_STATUS_OK:
  1651. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  1652. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  1653. {
  1654. dp_tx_comp_free_buf(soc, tx_desc);
  1655. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  1656. break;
  1657. }
  1658. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  1659. {
  1660. dp_tx_reinject_handler(tx_desc, status);
  1661. break;
  1662. }
  1663. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  1664. {
  1665. dp_tx_inspect_handler(tx_desc, status);
  1666. break;
  1667. }
  1668. case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
  1669. {
  1670. dp_tx_mec_handler(vdev, status);
  1671. break;
  1672. }
  1673. default:
  1674. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1675. "%s Invalid HTT tx_status %d\n",
  1676. __func__, tx_status);
  1677. break;
  1678. }
  1679. }
  1680. #ifdef MESH_MODE_SUPPORT
  1681. /**
  1682. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  1683. * in mesh meta header
  1684. * @tx_desc: software descriptor head pointer
  1685. * @ts: pointer to tx completion stats
  1686. * Return: none
  1687. */
  1688. static
  1689. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  1690. struct hal_tx_completion_status *ts)
  1691. {
  1692. struct meta_hdr_s *mhdr;
  1693. qdf_nbuf_t netbuf = tx_desc->nbuf;
  1694. if (!tx_desc->msdu_ext_desc) {
  1695. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  1696. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1697. "netbuf %pK offset %d\n",
  1698. netbuf, tx_desc->pkt_offset);
  1699. return;
  1700. }
  1701. }
  1702. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  1703. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1704. "netbuf %pK offset %d\n", netbuf,
  1705. sizeof(struct meta_hdr_s));
  1706. return;
  1707. }
  1708. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  1709. mhdr->rssi = ts->ack_frame_rssi;
  1710. mhdr->channel = tx_desc->pdev->operating_channel;
  1711. }
  1712. #else
  1713. static
  1714. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  1715. struct hal_tx_completion_status *ts)
  1716. {
  1717. }
  1718. #endif
  1719. /**
  1720. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  1721. * @peer: Handle to DP peer
  1722. * @ts: pointer to HAL Tx completion stats
  1723. * @length: MSDU length
  1724. *
  1725. * Return: None
  1726. */
  1727. static void dp_tx_update_peer_stats(struct dp_peer *peer,
  1728. struct hal_tx_completion_status *ts, uint32_t length)
  1729. {
  1730. struct dp_pdev *pdev = peer->vdev->pdev;
  1731. struct dp_soc *soc = pdev->soc;
  1732. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  1733. if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
  1734. return;
  1735. DP_STATS_INCC(peer, tx.tx_failed, 1,
  1736. !(ts->status == HAL_TX_TQM_RR_FRAME_ACKED));
  1737. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  1738. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  1739. DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
  1740. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  1741. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  1742. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  1743. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  1744. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  1745. if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
  1746. return;
  1747. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
  1748. ((ts->mcs >= MAX_MCS_11A) && (ts->pkt_type == DOT11_A)));
  1749. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
  1750. ((ts->mcs <= MAX_MCS_11A) && (ts->pkt_type == DOT11_A)));
  1751. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
  1752. ((ts->mcs >= MAX_MCS_11B) && (ts->pkt_type == DOT11_B)));
  1753. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
  1754. ((ts->mcs <= MAX_MCS_11B) && (ts->pkt_type == DOT11_B)));
  1755. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
  1756. ((ts->mcs >= MAX_MCS_11A) && (ts->pkt_type == DOT11_N)));
  1757. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
  1758. ((ts->mcs <= MAX_MCS_11A) && (ts->pkt_type == DOT11_N)));
  1759. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
  1760. ((ts->mcs >= MAX_MCS_11AC) && (ts->pkt_type == DOT11_AC)));
  1761. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
  1762. ((ts->mcs <= MAX_MCS_11AC) && (ts->pkt_type == DOT11_AC)));
  1763. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[MAX_MCS], 1,
  1764. ((ts->mcs >= (MAX_MCS-1)) && (ts->pkt_type == DOT11_AX)));
  1765. DP_STATS_INCC(peer, tx.pkt_type[ts->pkt_type].mcs_count[ts->mcs], 1,
  1766. ((ts->mcs <= (MAX_MCS-1)) && (ts->pkt_type == DOT11_AX)));
  1767. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  1768. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  1769. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  1770. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  1771. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  1772. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  1773. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  1774. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  1775. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  1776. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  1777. if (soc->cdp_soc.ol_ops->update_dp_stats) {
  1778. soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
  1779. &peer->stats, ts->peer_id,
  1780. UPDATE_PEER_STATS);
  1781. }
  1782. }
  1783. /**
  1784. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  1785. * @tx_desc: software descriptor head pointer
  1786. * @length: packet length
  1787. *
  1788. * Return: none
  1789. */
  1790. static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
  1791. uint32_t length)
  1792. {
  1793. struct hal_tx_completion_status ts;
  1794. struct dp_soc *soc = NULL;
  1795. struct dp_vdev *vdev = tx_desc->vdev;
  1796. struct dp_peer *peer = NULL;
  1797. hal_tx_comp_get_status(&tx_desc->comp, &ts);
  1798. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1799. "-------------------- \n"
  1800. "Tx Completion Stats: \n"
  1801. "-------------------- \n"
  1802. "ack_frame_rssi = %d \n"
  1803. "first_msdu = %d \n"
  1804. "last_msdu = %d \n"
  1805. "msdu_part_of_amsdu = %d \n"
  1806. "rate_stats valid = %d \n"
  1807. "bw = %d \n"
  1808. "pkt_type = %d \n"
  1809. "stbc = %d \n"
  1810. "ldpc = %d \n"
  1811. "sgi = %d \n"
  1812. "mcs = %d \n"
  1813. "ofdma = %d \n"
  1814. "tones_in_ru = %d \n"
  1815. "tsf = %d \n"
  1816. "ppdu_id = %d \n"
  1817. "transmit_cnt = %d \n"
  1818. "tid = %d \n"
  1819. "peer_id = %d \n",
  1820. ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
  1821. ts.msdu_part_of_amsdu, ts.valid, ts.bw,
  1822. ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
  1823. ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
  1824. ts.ppdu_id, ts.transmit_cnt, ts.tid,
  1825. ts.peer_id);
  1826. if (!vdev) {
  1827. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1828. "invalid vdev");
  1829. goto out;
  1830. }
  1831. soc = vdev->pdev->soc;
  1832. /* Update SoC level stats */
  1833. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  1834. (ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
  1835. /* Update per-packet stats */
  1836. if (qdf_unlikely(vdev->mesh_vdev))
  1837. dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
  1838. /* Update peer level stats */
  1839. peer = dp_peer_find_by_id(soc, ts.peer_id);
  1840. if (!peer) {
  1841. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1842. "invalid peer");
  1843. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  1844. goto out;
  1845. }
  1846. dp_tx_update_peer_stats(peer, &ts, length);
  1847. out:
  1848. return;
  1849. }
  1850. /**
  1851. * dp_tx_comp_process_desc() - Tx complete software descriptor handler
  1852. * @soc: core txrx main context
  1853. * @comp_head: software descriptor head pointer
  1854. *
  1855. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  1856. * and release the software descriptors after processing is complete
  1857. *
  1858. * Return: none
  1859. */
  1860. static void dp_tx_comp_process_desc(struct dp_soc *soc,
  1861. struct dp_tx_desc_s *comp_head)
  1862. {
  1863. struct dp_tx_desc_s *desc;
  1864. struct dp_tx_desc_s *next;
  1865. struct hal_tx_completion_status ts = {0};
  1866. uint32_t length;
  1867. struct dp_peer *peer;
  1868. DP_HIST_INIT();
  1869. desc = comp_head;
  1870. while (desc) {
  1871. hal_tx_comp_get_status(&desc->comp, &ts);
  1872. peer = dp_peer_find_by_id(soc, ts.peer_id);
  1873. length = qdf_nbuf_len(desc->nbuf);
  1874. /* Process Tx status in descriptor */
  1875. if (soc->process_tx_status ||
  1876. (desc->vdev && desc->vdev->mesh_vdev))
  1877. dp_tx_comp_process_tx_status(desc, length);
  1878. dp_tx_comp_free_buf(soc, desc);
  1879. DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
  1880. next = desc->next;
  1881. dp_tx_desc_release(desc, desc->pool_id);
  1882. desc = next;
  1883. }
  1884. DP_TX_HIST_STATS_PER_PDEV();
  1885. }
  1886. /**
  1887. * dp_tx_comp_handler() - Tx completion handler
  1888. * @soc: core txrx main context
  1889. * @ring_id: completion ring id
  1890. * @quota: No. of packets/descriptors that can be serviced in one loop
  1891. *
  1892. * This function will collect hardware release ring element contents and
  1893. * handle descriptor contents. Based on contents, free packet or handle error
  1894. * conditions
  1895. *
  1896. * Return: none
  1897. */
  1898. uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
  1899. {
  1900. void *tx_comp_hal_desc;
  1901. uint8_t buffer_src;
  1902. uint8_t pool_id;
  1903. uint32_t tx_desc_id;
  1904. struct dp_tx_desc_s *tx_desc = NULL;
  1905. struct dp_tx_desc_s *head_desc = NULL;
  1906. struct dp_tx_desc_s *tail_desc = NULL;
  1907. uint32_t num_processed;
  1908. uint32_t count;
  1909. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  1910. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1911. "%s %d : HAL RING Access Failed -- %pK\n",
  1912. __func__, __LINE__, hal_srng);
  1913. return 0;
  1914. }
  1915. num_processed = 0;
  1916. count = 0;
  1917. /* Find head descriptor from completion ring */
  1918. while (qdf_likely(tx_comp_hal_desc =
  1919. hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
  1920. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  1921. /* If this buffer was not released by TQM or FW, then it is not
  1922. * Tx completion indication, assert */
  1923. if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  1924. (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  1925. QDF_TRACE(QDF_MODULE_ID_DP,
  1926. QDF_TRACE_LEVEL_FATAL,
  1927. "Tx comp release_src != TQM | FW");
  1928. qdf_assert_always(0);
  1929. }
  1930. /* Get descriptor id */
  1931. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  1932. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  1933. DP_TX_DESC_ID_POOL_OS;
  1934. /* Pool ID is out of limit. Error */
  1935. if (pool_id > wlan_cfg_get_num_tx_desc_pool(
  1936. soc->wlan_cfg_ctx)) {
  1937. QDF_TRACE(QDF_MODULE_ID_DP,
  1938. QDF_TRACE_LEVEL_FATAL,
  1939. "Tx Comp pool id %d not valid",
  1940. pool_id);
  1941. qdf_assert_always(0);
  1942. }
  1943. /* Find Tx descriptor */
  1944. tx_desc = dp_tx_desc_find(soc, pool_id,
  1945. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  1946. DP_TX_DESC_ID_PAGE_OS,
  1947. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  1948. DP_TX_DESC_ID_OFFSET_OS);
  1949. /* Pool id is not matching. Error */
  1950. if (tx_desc && (tx_desc->pool_id != pool_id)) {
  1951. QDF_TRACE(QDF_MODULE_ID_DP,
  1952. QDF_TRACE_LEVEL_FATAL,
  1953. "Tx Comp pool id %d not matched %d",
  1954. pool_id, tx_desc->pool_id);
  1955. qdf_assert_always(0);
  1956. }
  1957. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  1958. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  1959. QDF_TRACE(QDF_MODULE_ID_DP,
  1960. QDF_TRACE_LEVEL_FATAL,
  1961. "Txdesc invalid, flgs = %x,id = %d",
  1962. tx_desc->flags, tx_desc_id);
  1963. qdf_assert_always(0);
  1964. }
  1965. /*
  1966. * If the release source is FW, process the HTT status
  1967. */
  1968. if (qdf_unlikely(buffer_src ==
  1969. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  1970. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  1971. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  1972. htt_tx_status);
  1973. dp_tx_process_htt_completion(tx_desc,
  1974. htt_tx_status);
  1975. } else {
  1976. /* First ring descriptor on the cycle */
  1977. if (!head_desc) {
  1978. head_desc = tx_desc;
  1979. tail_desc = tx_desc;
  1980. }
  1981. tail_desc->next = tx_desc;
  1982. tx_desc->next = NULL;
  1983. tail_desc = tx_desc;
  1984. /* Collect hw completion contents */
  1985. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  1986. &tx_desc->comp, soc->process_tx_status);
  1987. }
  1988. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  1989. /* Decrement PM usage count if the packet has been sent.*/
  1990. hif_pm_runtime_put(soc->hif_handle);
  1991. /*
  1992. * Processed packet count is more than given quota
  1993. * stop to processing
  1994. */
  1995. if ((num_processed >= quota))
  1996. break;
  1997. count++;
  1998. }
  1999. hal_srng_access_end(soc->hal_soc, hal_srng);
  2000. /* Process the reaped descriptors */
  2001. if (head_desc)
  2002. dp_tx_comp_process_desc(soc, head_desc);
  2003. return num_processed;
  2004. }
  2005. #ifdef CONVERGED_TDLS_ENABLE
  2006. /**
  2007. * dp_tx_non_std() - Allow the control-path SW to send data frames
  2008. *
  2009. * @data_vdev - which vdev should transmit the tx data frames
  2010. * @tx_spec - what non-standard handling to apply to the tx data frames
  2011. * @msdu_list - NULL-terminated list of tx MSDUs
  2012. *
  2013. * Return: NULL on success,
  2014. * nbuf when it fails to send
  2015. */
  2016. qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
  2017. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  2018. {
  2019. struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
  2020. if (tx_spec & OL_TX_SPEC_NO_FREE)
  2021. vdev->is_tdls_frame = true;
  2022. return dp_tx_send(vdev_handle, msdu_list);
  2023. }
  2024. #endif
  2025. /**
  2026. * dp_tx_vdev_attach() - attach vdev to dp tx
  2027. * @vdev: virtual device instance
  2028. *
  2029. * Return: QDF_STATUS_SUCCESS: success
  2030. * QDF_STATUS_E_RESOURCES: Error return
  2031. */
  2032. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  2033. {
  2034. /*
  2035. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  2036. */
  2037. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  2038. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  2039. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  2040. vdev->vdev_id);
  2041. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
  2042. DP_SW2HW_MACID(vdev->pdev->pdev_id));
  2043. /*
  2044. * Set HTT Extension Valid bit to 0 by default
  2045. */
  2046. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  2047. dp_tx_vdev_update_search_flags(vdev);
  2048. return QDF_STATUS_SUCCESS;
  2049. }
  2050. /**
  2051. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  2052. * @vdev: virtual device instance
  2053. *
  2054. * Return: void
  2055. *
  2056. */
  2057. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  2058. {
  2059. /*
  2060. * Enable both AddrY (SA based search) and AddrX (Da based search)
  2061. * for TDLS link
  2062. *
  2063. * Enable AddrY (SA based search) only for non-WDS STA and
  2064. * ProxySTA VAP modes.
  2065. *
  2066. * In all other VAP modes, only DA based search should be
  2067. * enabled
  2068. */
  2069. if (vdev->opmode == wlan_op_mode_sta &&
  2070. vdev->tdls_link_connected)
  2071. vdev->hal_desc_addr_search_flags =
  2072. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  2073. else if ((vdev->opmode == wlan_op_mode_sta &&
  2074. (!vdev->wds_enabled || vdev->proxysta_vdev)))
  2075. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  2076. else
  2077. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  2078. }
  2079. /**
  2080. * dp_tx_vdev_detach() - detach vdev from dp tx
  2081. * @vdev: virtual device instance
  2082. *
  2083. * Return: QDF_STATUS_SUCCESS: success
  2084. * QDF_STATUS_E_RESOURCES: Error return
  2085. */
  2086. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  2087. {
  2088. return QDF_STATUS_SUCCESS;
  2089. }
  2090. /**
  2091. * dp_tx_pdev_attach() - attach pdev to dp tx
  2092. * @pdev: physical device instance
  2093. *
  2094. * Return: QDF_STATUS_SUCCESS: success
  2095. * QDF_STATUS_E_RESOURCES: Error return
  2096. */
  2097. QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
  2098. {
  2099. struct dp_soc *soc = pdev->soc;
  2100. /* Initialize Flow control counters */
  2101. qdf_atomic_init(&pdev->num_tx_exception);
  2102. qdf_atomic_init(&pdev->num_tx_outstanding);
  2103. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2104. /* Initialize descriptors in TCL Ring */
  2105. hal_tx_init_data_ring(soc->hal_soc,
  2106. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  2107. }
  2108. return QDF_STATUS_SUCCESS;
  2109. }
  2110. /**
  2111. * dp_tx_pdev_detach() - detach pdev from dp tx
  2112. * @pdev: physical device instance
  2113. *
  2114. * Return: QDF_STATUS_SUCCESS: success
  2115. * QDF_STATUS_E_RESOURCES: Error return
  2116. */
  2117. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
  2118. {
  2119. /* What should do here? */
  2120. return QDF_STATUS_SUCCESS;
  2121. }
  2122. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  2123. /* Pools will be allocated dynamically */
  2124. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  2125. int num_desc)
  2126. {
  2127. uint8_t i;
  2128. for (i = 0; i < num_pool; i++) {
  2129. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  2130. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  2131. }
  2132. return 0;
  2133. }
  2134. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  2135. {
  2136. uint8_t i;
  2137. for (i = 0; i < num_pool; i++)
  2138. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  2139. }
  2140. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  2141. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  2142. int num_desc)
  2143. {
  2144. uint8_t i;
  2145. /* Allocate software Tx descriptor pools */
  2146. for (i = 0; i < num_pool; i++) {
  2147. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  2148. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2149. "%s Tx Desc Pool alloc %d failed %pK\n",
  2150. __func__, i, soc);
  2151. return ENOMEM;
  2152. }
  2153. }
  2154. return 0;
  2155. }
  2156. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  2157. {
  2158. uint8_t i;
  2159. for (i = 0; i < num_pool; i++) {
  2160. if (dp_tx_desc_pool_free(soc, i)) {
  2161. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2162. "%s Tx Desc Pool Free failed\n", __func__);
  2163. }
  2164. }
  2165. }
  2166. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  2167. /**
  2168. * dp_tx_soc_detach() - detach soc from dp tx
  2169. * @soc: core txrx main context
  2170. *
  2171. * This function will detach dp tx into main device context
  2172. * will free dp tx resource and initialize resources
  2173. *
  2174. * Return: QDF_STATUS_SUCCESS: success
  2175. * QDF_STATUS_E_RESOURCES: Error return
  2176. */
  2177. QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
  2178. {
  2179. uint8_t num_pool;
  2180. uint16_t num_desc;
  2181. uint16_t num_ext_desc;
  2182. uint8_t i;
  2183. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  2184. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  2185. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  2186. dp_tx_flow_control_deinit(soc);
  2187. dp_tx_delete_static_pools(soc, num_pool);
  2188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2189. "%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
  2190. __func__, num_pool, num_desc);
  2191. for (i = 0; i < num_pool; i++) {
  2192. if (dp_tx_ext_desc_pool_free(soc, i)) {
  2193. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2194. "%s Tx Ext Desc Pool Free failed\n",
  2195. __func__);
  2196. return QDF_STATUS_E_RESOURCES;
  2197. }
  2198. }
  2199. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2200. "%s MSDU Ext Desc Pool %d Free descs = %d\n",
  2201. __func__, num_pool, num_ext_desc);
  2202. for (i = 0; i < num_pool; i++) {
  2203. dp_tx_tso_desc_pool_free(soc, i);
  2204. }
  2205. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2206. "%s TSO Desc Pool %d Free descs = %d\n",
  2207. __func__, num_pool, num_desc);
  2208. for (i = 0; i < num_pool; i++)
  2209. dp_tx_tso_num_seg_pool_free(soc, i);
  2210. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2211. "%s TSO Num of seg Desc Pool %d Free descs = %d\n",
  2212. __func__, num_pool, num_desc);
  2213. return QDF_STATUS_SUCCESS;
  2214. }
  2215. /**
  2216. * dp_tx_soc_attach() - attach soc to dp tx
  2217. * @soc: core txrx main context
  2218. *
  2219. * This function will attach dp tx into main device context
  2220. * will allocate dp tx resource and initialize resources
  2221. *
  2222. * Return: QDF_STATUS_SUCCESS: success
  2223. * QDF_STATUS_E_RESOURCES: Error return
  2224. */
  2225. QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
  2226. {
  2227. uint8_t i;
  2228. uint8_t num_pool;
  2229. uint32_t num_desc;
  2230. uint32_t num_ext_desc;
  2231. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  2232. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  2233. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  2234. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  2235. goto fail;
  2236. dp_tx_flow_control_init(soc);
  2237. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2238. "%s Tx Desc Alloc num_pool = %d, descs = %d\n",
  2239. __func__, num_pool, num_desc);
  2240. /* Allocate extension tx descriptor pools */
  2241. for (i = 0; i < num_pool; i++) {
  2242. if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
  2243. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2244. "MSDU Ext Desc Pool alloc %d failed %pK\n",
  2245. i, soc);
  2246. goto fail;
  2247. }
  2248. }
  2249. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2250. "%s MSDU Ext Desc Alloc %d, descs = %d\n",
  2251. __func__, num_pool, num_ext_desc);
  2252. for (i = 0; i < num_pool; i++) {
  2253. if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
  2254. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2255. "TSO Desc Pool alloc %d failed %pK\n",
  2256. i, soc);
  2257. goto fail;
  2258. }
  2259. }
  2260. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2261. "%s TSO Desc Alloc %d, descs = %d\n",
  2262. __func__, num_pool, num_desc);
  2263. for (i = 0; i < num_pool; i++) {
  2264. if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
  2265. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2266. "TSO Num of seg Pool alloc %d failed %pK\n",
  2267. i, soc);
  2268. goto fail;
  2269. }
  2270. }
  2271. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2272. "%s TSO Num of seg pool Alloc %d, descs = %d\n",
  2273. __func__, num_pool, num_desc);
  2274. /* Initialize descriptors in TCL Rings */
  2275. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2276. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2277. hal_tx_init_data_ring(soc->hal_soc,
  2278. soc->tcl_data_ring[i].hal_srng);
  2279. }
  2280. }
  2281. /*
  2282. * todo - Add a runtime config option to enable this.
  2283. */
  2284. /*
  2285. * Due to multiple issues on NPR EMU, enable it selectively
  2286. * only for NPR EMU, should be removed, once NPR platforms
  2287. * are stable.
  2288. */
  2289. soc->process_tx_status = 1;
  2290. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2291. "%s HAL Tx init Success\n", __func__);
  2292. return QDF_STATUS_SUCCESS;
  2293. fail:
  2294. /* Detach will take care of freeing only allocated resources */
  2295. dp_tx_soc_detach(soc);
  2296. return QDF_STATUS_E_RESOURCES;
  2297. }
  2298. /*
  2299. * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
  2300. * pdev: pointer to DP PDEV structure
  2301. * seg_info_head: Pointer to the head of list
  2302. *
  2303. * return: void
  2304. */
  2305. static inline void dp_tx_me_mem_free(struct dp_pdev *pdev,
  2306. struct dp_tx_seg_info_s *seg_info_head)
  2307. {
  2308. struct dp_tx_me_buf_t *mc_uc_buf;
  2309. struct dp_tx_seg_info_s *seg_info_new = NULL;
  2310. qdf_nbuf_t nbuf = NULL;
  2311. uint64_t phy_addr;
  2312. while (seg_info_head) {
  2313. nbuf = seg_info_head->nbuf;
  2314. mc_uc_buf = (struct dp_tx_me_buf_t *)
  2315. seg_info_new->frags[0].vaddr;
  2316. phy_addr = seg_info_head->frags[0].paddr_hi;
  2317. phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
  2318. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  2319. phy_addr,
  2320. QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
  2321. dp_tx_me_free_buf(pdev, mc_uc_buf);
  2322. qdf_nbuf_free(nbuf);
  2323. seg_info_new = seg_info_head;
  2324. seg_info_head = seg_info_head->next;
  2325. qdf_mem_free(seg_info_new);
  2326. }
  2327. }
  2328. /**
  2329. * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
  2330. * @vdev: DP VDEV handle
  2331. * @nbuf: Multicast nbuf
  2332. * @newmac: Table of the clients to which packets have to be sent
  2333. * @new_mac_cnt: No of clients
  2334. *
  2335. * return: no of converted packets
  2336. */
  2337. uint16_t
  2338. dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
  2339. uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
  2340. {
  2341. struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
  2342. struct dp_pdev *pdev = vdev->pdev;
  2343. struct ether_header *eh;
  2344. uint8_t *data;
  2345. uint16_t len;
  2346. /* reference to frame dst addr */
  2347. uint8_t *dstmac;
  2348. /* copy of original frame src addr */
  2349. uint8_t srcmac[DP_MAC_ADDR_LEN];
  2350. /* local index into newmac */
  2351. uint8_t new_mac_idx = 0;
  2352. struct dp_tx_me_buf_t *mc_uc_buf;
  2353. qdf_nbuf_t nbuf_clone;
  2354. struct dp_tx_msdu_info_s msdu_info;
  2355. struct dp_tx_seg_info_s *seg_info_head = NULL;
  2356. struct dp_tx_seg_info_s *seg_info_tail = NULL;
  2357. struct dp_tx_seg_info_s *seg_info_new;
  2358. struct dp_tx_frag_info_s data_frag;
  2359. qdf_dma_addr_t paddr_data;
  2360. qdf_dma_addr_t paddr_mcbuf = 0;
  2361. uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
  2362. QDF_STATUS status;
  2363. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  2364. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2365. eh = (struct ether_header *) nbuf;
  2366. qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
  2367. len = qdf_nbuf_len(nbuf);
  2368. data = qdf_nbuf_data(nbuf);
  2369. status = qdf_nbuf_map(vdev->osdev, nbuf,
  2370. QDF_DMA_TO_DEVICE);
  2371. if (status) {
  2372. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2373. "Mapping failure Error:%d", status);
  2374. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  2375. return 0;
  2376. }
  2377. paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
  2378. /*preparing data fragment*/
  2379. data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
  2380. data_frag.paddr_lo = (uint32_t)paddr_data;
  2381. data_frag.paddr_hi = ((uint64_t)paddr_data & 0xffffffff00000000) >> 32;
  2382. data_frag.len = len - DP_MAC_ADDR_LEN;
  2383. for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
  2384. dstmac = newmac[new_mac_idx];
  2385. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2386. "added mac addr (%pM)", dstmac);
  2387. /* Check for NULL Mac Address */
  2388. if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
  2389. continue;
  2390. /* frame to self mac. skip */
  2391. if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
  2392. continue;
  2393. /*
  2394. * TODO: optimize to avoid malloc in per-packet path
  2395. * For eg. seg_pool can be made part of vdev structure
  2396. */
  2397. seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
  2398. if (!seg_info_new) {
  2399. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2400. "alloc failed");
  2401. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
  2402. goto fail_seg_alloc;
  2403. }
  2404. mc_uc_buf = dp_tx_me_alloc_buf(pdev);
  2405. if (mc_uc_buf == NULL)
  2406. goto fail_buf_alloc;
  2407. /*
  2408. * TODO: Check if we need to clone the nbuf
  2409. * Or can we just use the reference for all cases
  2410. */
  2411. if (new_mac_idx < (new_mac_cnt - 1)) {
  2412. nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
  2413. if (nbuf_clone == NULL) {
  2414. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
  2415. goto fail_clone;
  2416. }
  2417. } else {
  2418. /*
  2419. * Update the ref
  2420. * to account for frame sent without cloning
  2421. */
  2422. qdf_nbuf_ref(nbuf);
  2423. nbuf_clone = nbuf;
  2424. }
  2425. qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
  2426. status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
  2427. QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
  2428. &paddr_mcbuf);
  2429. if (status) {
  2430. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2431. "Mapping failure Error:%d", status);
  2432. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  2433. goto fail_map;
  2434. }
  2435. seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
  2436. seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
  2437. seg_info_new->frags[0].paddr_hi =
  2438. ((u64)paddr_mcbuf & 0xffffffff00000000) >> 32;
  2439. seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
  2440. seg_info_new->frags[1] = data_frag;
  2441. seg_info_new->nbuf = nbuf_clone;
  2442. seg_info_new->frag_cnt = 2;
  2443. seg_info_new->total_len = len;
  2444. seg_info_new->next = NULL;
  2445. if (seg_info_head == NULL)
  2446. seg_info_head = seg_info_new;
  2447. else
  2448. seg_info_tail->next = seg_info_new;
  2449. seg_info_tail = seg_info_new;
  2450. }
  2451. if (!seg_info_head)
  2452. return 0;
  2453. msdu_info.u.sg_info.curr_seg = seg_info_head;
  2454. msdu_info.num_seg = new_mac_cnt;
  2455. msdu_info.frm_type = dp_tx_frm_me;
  2456. DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
  2457. dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2458. while (seg_info_head->next) {
  2459. seg_info_new = seg_info_head;
  2460. seg_info_head = seg_info_head->next;
  2461. qdf_mem_free(seg_info_new);
  2462. }
  2463. qdf_mem_free(seg_info_head);
  2464. return new_mac_cnt;
  2465. fail_map:
  2466. qdf_nbuf_free(nbuf_clone);
  2467. fail_clone:
  2468. dp_tx_me_free_buf(pdev, mc_uc_buf);
  2469. fail_buf_alloc:
  2470. qdf_mem_free(seg_info_new);
  2471. fail_seg_alloc:
  2472. dp_tx_me_mem_free(pdev, seg_info_head);
  2473. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  2474. return 0;
  2475. }