dp_tx.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_tx.h"
  20. #include "dp_tx_desc.h"
  21. #include "dp_peer.h"
  22. #include "dp_types.h"
  23. #include "hal_tx.h"
  24. #include "qdf_mem.h"
  25. #include "qdf_nbuf.h"
  26. #include <wlan_cfg.h>
  27. #ifdef MESH_MODE_SUPPORT
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #ifdef TX_PER_PDEV_DESC_POOL
  31. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  32. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  33. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  34. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
  35. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  36. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  37. #else
  38. #ifdef TX_PER_VDEV_DESC_POOL
  39. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  40. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  41. #else
  42. #define DP_TX_GET_DESC_POOL_ID(vdev) qdf_get_cpu()
  43. #define DP_TX_GET_RING_ID(vdev) vdev->pdev->soc->tx_ring_map[qdf_get_cpu()]
  44. #endif /* TX_PER_VDEV_DESC_POOL */
  45. #endif /* TX_PER_PDEV_DESC_POOL */
  46. /* TODO Add support in TSO */
  47. #define DP_DESC_NUM_FRAG(x) 0
  48. /* disable TQM_BYPASS */
  49. #define TQM_BYPASS_WAR 0
  50. /**
  51. * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
  52. * @vdev: DP Virtual device handle
  53. * @nbuf: Buffer pointer
  54. * @queue: queue ids container for nbuf
  55. *
  56. * TX packet queue has 2 instances, software descriptors id and dma ring id
  57. * Based on tx feature and hardware configuration queue id combination could be
  58. * different.
  59. * For example -
  60. * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
  61. * With no XPS,lock based resource protection, Descriptor pool ids are different
  62. * for each vdev, dma ring id will be same as single pdev id
  63. *
  64. * Return: None
  65. */
  66. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  67. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  68. {
  69. /* get flow id */
  70. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  71. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  72. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  73. "%s, pool_id:%d ring_id: %d\n",
  74. __func__, queue->desc_pool_id, queue->ring_id);
  75. return;
  76. }
  77. #if defined(FEATURE_TSO)
  78. /**
  79. * dp_tx_tso_desc_release() - Release the tso segment
  80. * after unmapping all the fragments
  81. *
  82. * @pdev - physical device handle
  83. * @tx_desc - Tx software descriptor
  84. */
  85. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  86. struct dp_tx_desc_s *tx_desc)
  87. {
  88. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  89. if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
  90. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  91. "%s %d TSO desc is NULL!",
  92. __func__, __LINE__);
  93. qdf_assert(0);
  94. } else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
  95. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  96. "%s %d TSO common info is NULL!",
  97. __func__, __LINE__);
  98. qdf_assert(0);
  99. } else {
  100. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  101. (struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
  102. if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
  103. tso_num_desc->num_seg.tso_cmn_num_seg--;
  104. qdf_nbuf_unmap_tso_segment(soc->osdev,
  105. tx_desc->tso_desc, false);
  106. } else {
  107. tso_num_desc->num_seg.tso_cmn_num_seg--;
  108. qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
  109. qdf_nbuf_unmap_tso_segment(soc->osdev,
  110. tx_desc->tso_desc, true);
  111. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  112. tx_desc->tso_num_desc);
  113. tx_desc->tso_num_desc = NULL;
  114. }
  115. dp_tx_tso_desc_free(soc,
  116. tx_desc->pool_id, tx_desc->tso_desc);
  117. tx_desc->tso_desc = NULL;
  118. }
  119. }
  120. #else
  121. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  122. struct dp_tx_desc_s *tx_desc)
  123. {
  124. return;
  125. }
  126. #endif
  127. /**
  128. * dp_tx_desc_release() - Release Tx Descriptor
  129. * @tx_desc : Tx Descriptor
  130. * @desc_pool_id: Descriptor Pool ID
  131. *
  132. * Deallocate all resources attached to Tx descriptor and free the Tx
  133. * descriptor.
  134. *
  135. * Return:
  136. */
  137. static void
  138. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  139. {
  140. struct dp_pdev *pdev = tx_desc->pdev;
  141. struct dp_soc *soc;
  142. uint8_t comp_status = 0;
  143. qdf_assert(pdev);
  144. soc = pdev->soc;
  145. if (tx_desc->frm_type == dp_tx_frm_tso)
  146. dp_tx_tso_desc_release(soc, tx_desc);
  147. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  148. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  149. qdf_atomic_dec(&pdev->num_tx_outstanding);
  150. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  151. qdf_atomic_dec(&pdev->num_tx_exception);
  152. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  153. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  154. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
  155. else
  156. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  157. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  158. "Tx Completion Release desc %d status %d outstanding %d\n",
  159. tx_desc->id, comp_status,
  160. qdf_atomic_read(&pdev->num_tx_outstanding));
  161. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  162. return;
  163. }
  164. /**
  165. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  166. * @vdev: DP vdev Handle
  167. * @nbuf: skb
  168. *
  169. * Prepares and fills HTT metadata in the frame pre-header for special frames
  170. * that should be transmitted using varying transmit parameters.
  171. * There are 2 VDEV modes that currently needs this special metadata -
  172. * 1) Mesh Mode
  173. * 2) DSRC Mode
  174. *
  175. * Return: HTT metadata size
  176. *
  177. */
  178. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  179. uint32_t *meta_data)
  180. {
  181. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  182. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  183. uint8_t htt_desc_size;
  184. /* Size rounded of multiple of 8 bytes */
  185. uint8_t htt_desc_size_aligned;
  186. uint8_t *hdr = NULL;
  187. qdf_nbuf_unshare(nbuf);
  188. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
  189. /*
  190. * Metadata - HTT MSDU Extension header
  191. */
  192. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  193. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  194. if (vdev->mesh_vdev) {
  195. /* Fill and add HTT metaheader */
  196. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  197. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  198. } else if (vdev->opmode == wlan_op_mode_ocb) {
  199. /* Todo - Add support for DSRC */
  200. }
  201. return htt_desc_size_aligned;
  202. }
  203. /**
  204. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  205. * @tso_seg: TSO segment to process
  206. * @ext_desc: Pointer to MSDU extension descriptor
  207. *
  208. * Return: void
  209. */
  210. #if defined(FEATURE_TSO)
  211. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  212. void *ext_desc)
  213. {
  214. uint8_t num_frag;
  215. uint32_t tso_flags;
  216. /*
  217. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  218. * tcp_flag_mask
  219. *
  220. * Checksum enable flags are set in TCL descriptor and not in Extension
  221. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  222. */
  223. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  224. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  225. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  226. tso_seg->tso_flags.ip_len);
  227. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  228. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  229. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  230. uint32_t lo = 0;
  231. uint32_t hi = 0;
  232. qdf_dmaaddr_to_32s(
  233. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  234. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  235. tso_seg->tso_frags[num_frag].length);
  236. }
  237. return;
  238. }
  239. #else
  240. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  241. void *ext_desc)
  242. {
  243. return;
  244. }
  245. #endif
  246. #if defined(FEATURE_TSO)
  247. /**
  248. * dp_tx_free_tso_seg() - Loop through the tso segments
  249. * allocated and free them
  250. *
  251. * @soc: soc handle
  252. * @free_seg: list of tso segments
  253. * @msdu_info: msdu descriptor
  254. *
  255. * Return - void
  256. */
  257. static void dp_tx_free_tso_seg(struct dp_soc *soc,
  258. struct qdf_tso_seg_elem_t *free_seg,
  259. struct dp_tx_msdu_info_s *msdu_info)
  260. {
  261. struct qdf_tso_seg_elem_t *next_seg;
  262. while (free_seg) {
  263. next_seg = free_seg->next;
  264. dp_tx_tso_desc_free(soc,
  265. msdu_info->tx_queue.desc_pool_id,
  266. free_seg);
  267. free_seg = next_seg;
  268. }
  269. }
  270. /**
  271. * dp_tx_free_tso_num_seg() - Loop through the tso num segments
  272. * allocated and free them
  273. *
  274. * @soc: soc handle
  275. * @free_seg: list of tso segments
  276. * @msdu_info: msdu descriptor
  277. * Return - void
  278. */
  279. static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
  280. struct qdf_tso_num_seg_elem_t *free_seg,
  281. struct dp_tx_msdu_info_s *msdu_info)
  282. {
  283. struct qdf_tso_num_seg_elem_t *next_seg;
  284. while (free_seg) {
  285. next_seg = free_seg->next;
  286. dp_tso_num_seg_free(soc,
  287. msdu_info->tx_queue.desc_pool_id,
  288. free_seg);
  289. free_seg = next_seg;
  290. }
  291. }
  292. /**
  293. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  294. * @vdev: virtual device handle
  295. * @msdu: network buffer
  296. * @msdu_info: meta data associated with the msdu
  297. *
  298. * Return: QDF_STATUS_SUCCESS success
  299. */
  300. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  301. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  302. {
  303. struct qdf_tso_seg_elem_t *tso_seg;
  304. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  305. struct dp_soc *soc = vdev->pdev->soc;
  306. struct qdf_tso_info_t *tso_info;
  307. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  308. tso_info = &msdu_info->u.tso_info;
  309. tso_info->curr_seg = NULL;
  310. tso_info->tso_seg_list = NULL;
  311. tso_info->num_segs = num_seg;
  312. msdu_info->frm_type = dp_tx_frm_tso;
  313. tso_info->tso_num_seg_list = NULL;
  314. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  315. while (num_seg) {
  316. tso_seg = dp_tx_tso_desc_alloc(
  317. soc, msdu_info->tx_queue.desc_pool_id);
  318. if (tso_seg) {
  319. tso_seg->next = tso_info->tso_seg_list;
  320. tso_info->tso_seg_list = tso_seg;
  321. num_seg--;
  322. } else {
  323. struct qdf_tso_seg_elem_t *free_seg =
  324. tso_info->tso_seg_list;
  325. dp_tx_free_tso_seg(soc, free_seg, msdu_info);
  326. return QDF_STATUS_E_NOMEM;
  327. }
  328. }
  329. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  330. tso_num_seg = dp_tso_num_seg_alloc(soc,
  331. msdu_info->tx_queue.desc_pool_id);
  332. if (tso_num_seg) {
  333. tso_num_seg->next = tso_info->tso_num_seg_list;
  334. tso_info->tso_num_seg_list = tso_num_seg;
  335. } else {
  336. /* Bug: free tso_num_seg and tso_seg */
  337. /* Free the already allocated num of segments */
  338. struct qdf_tso_seg_elem_t *free_seg =
  339. tso_info->tso_seg_list;
  340. TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
  341. __func__);
  342. dp_tx_free_tso_seg(soc, free_seg, msdu_info);
  343. return QDF_STATUS_E_NOMEM;
  344. }
  345. msdu_info->num_seg =
  346. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  347. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  348. msdu_info->num_seg);
  349. if (!(msdu_info->num_seg)) {
  350. dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
  351. dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
  352. msdu_info);
  353. return QDF_STATUS_E_INVAL;
  354. }
  355. tso_info->curr_seg = tso_info->tso_seg_list;
  356. return QDF_STATUS_SUCCESS;
  357. }
  358. #else
  359. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  360. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  361. {
  362. return QDF_STATUS_E_NOMEM;
  363. }
  364. #endif
  365. /**
  366. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  367. * @vdev: DP Vdev handle
  368. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  369. * @desc_pool_id: Descriptor Pool ID
  370. *
  371. * Return:
  372. */
  373. static
  374. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  375. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  376. {
  377. uint8_t i;
  378. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  379. struct dp_tx_seg_info_s *seg_info;
  380. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  381. struct dp_soc *soc = vdev->pdev->soc;
  382. /* Allocate an extension descriptor */
  383. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  384. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  385. if (!msdu_ext_desc) {
  386. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  387. return NULL;
  388. }
  389. if (qdf_unlikely(vdev->mesh_vdev)) {
  390. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  391. &msdu_info->meta_data[0],
  392. sizeof(struct htt_tx_msdu_desc_ext2_t));
  393. qdf_atomic_inc(&vdev->pdev->num_tx_exception);
  394. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
  395. }
  396. switch (msdu_info->frm_type) {
  397. case dp_tx_frm_sg:
  398. case dp_tx_frm_me:
  399. case dp_tx_frm_raw:
  400. seg_info = msdu_info->u.sg_info.curr_seg;
  401. /* Update the buffer pointers in MSDU Extension Descriptor */
  402. for (i = 0; i < seg_info->frag_cnt; i++) {
  403. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  404. seg_info->frags[i].paddr_lo,
  405. seg_info->frags[i].paddr_hi,
  406. seg_info->frags[i].len);
  407. }
  408. break;
  409. case dp_tx_frm_tso:
  410. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  411. &cached_ext_desc[0]);
  412. break;
  413. default:
  414. break;
  415. }
  416. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  417. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  418. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  419. msdu_ext_desc->vaddr);
  420. return msdu_ext_desc;
  421. }
  422. /**
  423. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  424. * @vdev: DP vdev handle
  425. * @nbuf: skb
  426. * @desc_pool_id: Descriptor pool ID
  427. * Allocate and prepare Tx descriptor with msdu information.
  428. *
  429. * Return: Pointer to Tx Descriptor on success,
  430. * NULL on failure
  431. */
  432. static
  433. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  434. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  435. uint32_t *meta_data)
  436. {
  437. uint8_t align_pad;
  438. uint8_t is_exception = 0;
  439. uint8_t htt_hdr_size;
  440. struct ether_header *eh;
  441. struct dp_tx_desc_s *tx_desc;
  442. struct dp_pdev *pdev = vdev->pdev;
  443. struct dp_soc *soc = pdev->soc;
  444. /* Allocate software Tx descriptor */
  445. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  446. if (qdf_unlikely(!tx_desc)) {
  447. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  448. "%s Tx Desc Alloc Failed\n", __func__);
  449. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  450. return NULL;
  451. }
  452. /* Flow control/Congestion Control counters */
  453. qdf_atomic_inc(&pdev->num_tx_outstanding);
  454. /* Initialize the SW tx descriptor */
  455. tx_desc->nbuf = nbuf;
  456. tx_desc->frm_type = dp_tx_frm_std;
  457. tx_desc->tx_encap_type = vdev->tx_encap_type;
  458. tx_desc->vdev = vdev;
  459. tx_desc->pdev = pdev;
  460. tx_desc->msdu_ext_desc = NULL;
  461. /**
  462. * For non-scatter regular frames, buffer pointer is directly
  463. * programmed in TCL input descriptor instead of using an MSDU
  464. * extension descriptor.For this cass, HW requirement is that
  465. * descriptor should always point to a 8-byte aligned address.
  466. *
  467. * So we add alignment pad to start of buffer, and specify the actual
  468. * start of data through pkt_offset
  469. */
  470. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  471. qdf_nbuf_push_head(nbuf, align_pad);
  472. tx_desc->pkt_offset = align_pad;
  473. /*
  474. * For special modes (vdev_type == ocb or mesh), data frames should be
  475. * transmitted using varying transmit parameters (tx spec) which include
  476. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  477. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  478. * These frames are sent as exception packets to firmware.
  479. *
  480. * HTT Metadata should be ensured to be multiple of 8-bytes,
  481. * to get 8-byte aligned start address along with align_pad added above
  482. *
  483. * |-----------------------------|
  484. * | |
  485. * |-----------------------------| <-----Buffer Pointer Address given
  486. * | | ^ in HW descriptor (aligned)
  487. * | HTT Metadata | |
  488. * | | |
  489. * | | | Packet Offset given in descriptor
  490. * | | |
  491. * |-----------------------------| |
  492. * | Alignment Pad | v
  493. * |-----------------------------| <----- Actual buffer start address
  494. * | SKB Data | (Unaligned)
  495. * | |
  496. * | |
  497. * | |
  498. * | |
  499. * | |
  500. * |-----------------------------|
  501. */
  502. if (qdf_unlikely(vdev->mesh_vdev ||
  503. (vdev->opmode == wlan_op_mode_ocb))) {
  504. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  505. meta_data);
  506. tx_desc->pkt_offset += htt_hdr_size;
  507. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  508. is_exception = 1;
  509. }
  510. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  511. qdf_nbuf_map(soc->osdev, nbuf,
  512. QDF_DMA_TO_DEVICE))) {
  513. /* Handle failure */
  514. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  515. "qdf_nbuf_map failed\n");
  516. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  517. goto failure;
  518. }
  519. if (qdf_unlikely(vdev->nawds_enabled)) {
  520. eh = (struct ether_header *) qdf_nbuf_data(nbuf);
  521. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  522. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  523. is_exception = 1;
  524. }
  525. }
  526. #if !TQM_BYPASS_WAR
  527. if (is_exception)
  528. #endif
  529. {
  530. /* Temporary WAR due to TQM VP issues */
  531. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  532. qdf_atomic_inc(&pdev->num_tx_exception);
  533. }
  534. return tx_desc;
  535. failure:
  536. dp_tx_desc_release(tx_desc, desc_pool_id);
  537. return NULL;
  538. }
  539. /**
  540. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  541. * @vdev: DP vdev handle
  542. * @nbuf: skb
  543. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  544. * @desc_pool_id : Descriptor Pool ID
  545. *
  546. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  547. * information. For frames wth fragments, allocate and prepare
  548. * an MSDU extension descriptor
  549. *
  550. * Return: Pointer to Tx Descriptor on success,
  551. * NULL on failure
  552. */
  553. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  554. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  555. uint8_t desc_pool_id)
  556. {
  557. struct dp_tx_desc_s *tx_desc;
  558. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  559. struct dp_pdev *pdev = vdev->pdev;
  560. struct dp_soc *soc = pdev->soc;
  561. /* Allocate software Tx descriptor */
  562. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  563. if (!tx_desc) {
  564. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  565. return NULL;
  566. }
  567. /* Flow control/Congestion Control counters */
  568. qdf_atomic_inc(&pdev->num_tx_outstanding);
  569. /* Initialize the SW tx descriptor */
  570. tx_desc->nbuf = nbuf;
  571. tx_desc->frm_type = msdu_info->frm_type;
  572. tx_desc->tx_encap_type = vdev->tx_encap_type;
  573. tx_desc->vdev = vdev;
  574. tx_desc->pdev = pdev;
  575. tx_desc->pkt_offset = 0;
  576. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  577. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  578. /* Handle scattered frames - TSO/SG/ME */
  579. /* Allocate and prepare an extension descriptor for scattered frames */
  580. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  581. if (!msdu_ext_desc) {
  582. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  583. "%s Tx Extension Descriptor Alloc Fail\n",
  584. __func__);
  585. goto failure;
  586. }
  587. #if TQM_BYPASS_WAR
  588. /* Temporary WAR due to TQM VP issues */
  589. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  590. qdf_atomic_inc(&pdev->num_tx_exception);
  591. #endif
  592. if (qdf_unlikely(vdev->mesh_vdev))
  593. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  594. tx_desc->msdu_ext_desc = msdu_ext_desc;
  595. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  596. return tx_desc;
  597. failure:
  598. dp_tx_desc_release(tx_desc, desc_pool_id);
  599. return NULL;
  600. }
  601. /**
  602. * dp_tx_prepare_raw() - Prepare RAW packet TX
  603. * @vdev: DP vdev handle
  604. * @nbuf: buffer pointer
  605. * @seg_info: Pointer to Segment info Descriptor to be prepared
  606. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  607. * descriptor
  608. *
  609. * Return:
  610. */
  611. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  612. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  613. {
  614. qdf_nbuf_t curr_nbuf = NULL;
  615. uint16_t total_len = 0;
  616. int32_t i;
  617. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  618. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  619. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  620. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  621. if ((qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
  622. && (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU)) {
  623. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  624. }
  625. if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
  626. QDF_DMA_TO_DEVICE)) {
  627. qdf_print("dma map error\n");
  628. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  629. qdf_nbuf_free(nbuf);
  630. return NULL;
  631. }
  632. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  633. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  634. seg_info->frags[i].paddr_lo =
  635. qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  636. seg_info->frags[i].paddr_hi = 0x0;
  637. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  638. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  639. total_len += qdf_nbuf_len(curr_nbuf);
  640. }
  641. seg_info->frag_cnt = i;
  642. seg_info->total_len = total_len;
  643. seg_info->next = NULL;
  644. sg_info->curr_seg = seg_info;
  645. msdu_info->frm_type = dp_tx_frm_raw;
  646. msdu_info->num_seg = 1;
  647. return nbuf;
  648. }
  649. /**
  650. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  651. * @soc: DP Soc Handle
  652. * @vdev: DP vdev handle
  653. * @tx_desc: Tx Descriptor Handle
  654. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  655. * @fw_metadata: Metadata to send to Target Firmware along with frame
  656. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  657. *
  658. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  659. * from software Tx descriptor
  660. *
  661. * Return:
  662. */
  663. static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  664. struct dp_tx_desc_s *tx_desc, uint8_t tid,
  665. uint16_t fw_metadata, uint8_t ring_id)
  666. {
  667. uint8_t type;
  668. uint16_t length;
  669. void *hal_tx_desc, *hal_tx_desc_cached;
  670. qdf_dma_addr_t dma_addr;
  671. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
  672. /* Return Buffer Manager ID */
  673. uint8_t bm_id = ring_id;
  674. void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
  675. hal_tx_desc_cached = (void *) cached_desc;
  676. qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  677. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
  678. length = HAL_TX_EXT_DESC_WITH_META_DATA;
  679. type = HAL_TX_BUF_TYPE_EXT_DESC;
  680. dma_addr = tx_desc->msdu_ext_desc->paddr;
  681. } else {
  682. length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
  683. type = HAL_TX_BUF_TYPE_BUFFER;
  684. dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  685. }
  686. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  687. hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
  688. dma_addr , bm_id, tx_desc->id, type);
  689. hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
  690. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  691. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  692. hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
  693. vdev->dscp_tid_map_id);
  694. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  695. "%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u\n",
  696. __func__, length, type, (uint64_t)dma_addr,
  697. tx_desc->pkt_offset, tx_desc->id);
  698. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  699. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  700. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  701. vdev->hal_desc_addr_search_flags);
  702. if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  703. || qdf_nbuf_is_tso(tx_desc->nbuf)) {
  704. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  705. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  706. }
  707. if (tid != HTT_TX_EXT_TID_INVALID)
  708. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  709. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  710. hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
  711. /* Sync cached descriptor with HW */
  712. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
  713. if (!hal_tx_desc) {
  714. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  715. "%s TCL ring full ring_id:%d\n", __func__, ring_id);
  716. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  717. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  718. return QDF_STATUS_E_RESOURCES;
  719. }
  720. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  721. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  722. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
  723. return QDF_STATUS_SUCCESS;
  724. }
  725. /**
  726. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  727. * @vdev: DP vdev handle
  728. * @nbuf: skb
  729. *
  730. * Extract the DSCP or PCP information from frame and map into TID value.
  731. * Software based TID classification is required when more than 2 DSCP-TID
  732. * mapping tables are needed.
  733. * Hardware supports 2 DSCP-TID mapping tables
  734. *
  735. * Return: void
  736. */
  737. static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  738. struct dp_tx_msdu_info_s *msdu_info)
  739. {
  740. uint8_t tos = 0, dscp_tid_override = 0;
  741. uint8_t *hdr_ptr, *L3datap;
  742. uint8_t is_mcast = 0;
  743. struct ether_header *eh = NULL;
  744. qdf_ethervlan_header_t *evh = NULL;
  745. uint16_t ether_type;
  746. qdf_llc_t *llcHdr;
  747. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  748. /* for mesh packets don't do any classification */
  749. if (qdf_unlikely(vdev->mesh_vdev))
  750. return;
  751. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  752. eh = (struct ether_header *) nbuf->data;
  753. hdr_ptr = eh->ether_dhost;
  754. L3datap = hdr_ptr + sizeof(struct ether_header);
  755. } else {
  756. qdf_dot3_qosframe_t *qos_wh =
  757. (qdf_dot3_qosframe_t *) nbuf->data;
  758. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  759. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  760. return;
  761. }
  762. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  763. ether_type = eh->ether_type;
  764. /*
  765. * Check if packet is dot3 or eth2 type.
  766. */
  767. if (IS_LLC_PRESENT(ether_type)) {
  768. ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
  769. sizeof(*llcHdr));
  770. if (ether_type == htons(ETHERTYPE_8021Q)) {
  771. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  772. sizeof(*llcHdr);
  773. ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
  774. + sizeof(*llcHdr) +
  775. sizeof(qdf_net_vlanhdr_t));
  776. } else {
  777. L3datap = hdr_ptr + sizeof(struct ether_header) +
  778. sizeof(*llcHdr);
  779. }
  780. } else {
  781. if (ether_type == htons(ETHERTYPE_8021Q)) {
  782. evh = (qdf_ethervlan_header_t *) eh;
  783. ether_type = evh->ether_type;
  784. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  785. }
  786. }
  787. /*
  788. * Find priority from IP TOS DSCP field
  789. */
  790. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  791. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  792. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  793. /* Only for unicast frames */
  794. if (!is_mcast) {
  795. /* send it on VO queue */
  796. msdu_info->tid = DP_VO_TID;
  797. }
  798. } else {
  799. /*
  800. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  801. * from TOS byte.
  802. */
  803. tos = ip->ip_tos;
  804. dscp_tid_override = 1;
  805. }
  806. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  807. /* TODO
  808. * use flowlabel
  809. *igmpmld cases to be handled in phase 2
  810. */
  811. unsigned long ver_pri_flowlabel;
  812. unsigned long pri;
  813. ver_pri_flowlabel = *(unsigned long *) L3datap;
  814. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  815. DP_IPV6_PRIORITY_SHIFT;
  816. tos = pri;
  817. dscp_tid_override = 1;
  818. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  819. msdu_info->tid = DP_VO_TID;
  820. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  821. /* Only for unicast frames */
  822. if (!is_mcast) {
  823. /* send ucast arp on VO queue */
  824. msdu_info->tid = DP_VO_TID;
  825. }
  826. }
  827. /*
  828. * Assign all MCAST packets to BE
  829. */
  830. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  831. if (is_mcast) {
  832. tos = 0;
  833. dscp_tid_override = 1;
  834. }
  835. }
  836. if (dscp_tid_override == 1) {
  837. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  838. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  839. }
  840. return;
  841. }
  842. /**
  843. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  844. * @vdev: DP vdev handle
  845. * @nbuf: skb
  846. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  847. * @tx_q: Tx queue to be used for this Tx frame
  848. * @peer_id: peer_id of the peer in case of NAWDS frames
  849. *
  850. * Return: NULL on success,
  851. * nbuf when it fails to send
  852. */
  853. static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  854. uint8_t tid, struct dp_tx_queue *tx_q,
  855. uint32_t *meta_data, uint16_t peer_id)
  856. {
  857. struct dp_pdev *pdev = vdev->pdev;
  858. struct dp_soc *soc = pdev->soc;
  859. struct dp_tx_desc_s *tx_desc;
  860. QDF_STATUS status;
  861. void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
  862. uint16_t htt_tcl_metadata = 0;
  863. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 0);
  864. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  865. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, meta_data);
  866. if (!tx_desc) {
  867. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  868. "%s Tx_desc prepare Fail vdev %p queue %d\n",
  869. __func__, vdev, tx_q->desc_pool_id);
  870. return nbuf;
  871. }
  872. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  873. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  874. "%s %d : HAL RING Access Failed -- %p\n",
  875. __func__, __LINE__, hal_srng);
  876. DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
  877. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  878. goto fail_return;
  879. }
  880. if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  881. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  882. HTT_TCL_METADATA_TYPE_PEER_BASED);
  883. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  884. peer_id);
  885. } else
  886. htt_tcl_metadata = vdev->htt_tcl_metadata;
  887. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  888. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
  889. htt_tcl_metadata, tx_q->ring_id);
  890. if (status != QDF_STATUS_SUCCESS) {
  891. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  892. "%s Tx_hw_enqueue Fail tx_desc %p queue %d\n",
  893. __func__, tx_desc, tx_q->ring_id);
  894. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  895. goto fail_return;
  896. }
  897. nbuf = NULL;
  898. fail_return:
  899. hal_srng_access_end(soc->hal_soc, hal_srng);
  900. return nbuf;
  901. }
  902. /**
  903. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  904. * @vdev: DP vdev handle
  905. * @nbuf: skb
  906. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  907. *
  908. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  909. *
  910. * Return: NULL on success,
  911. * nbuf when it fails to send
  912. */
  913. #if QDF_LOCK_STATS
  914. static noinline
  915. #else
  916. static
  917. #endif
  918. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  919. struct dp_tx_msdu_info_s *msdu_info)
  920. {
  921. uint8_t i;
  922. struct dp_pdev *pdev = vdev->pdev;
  923. struct dp_soc *soc = pdev->soc;
  924. struct dp_tx_desc_s *tx_desc;
  925. QDF_STATUS status;
  926. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  927. void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
  928. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  929. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  930. "%s %d : HAL RING Access Failed -- %p\n",
  931. __func__, __LINE__, hal_srng);
  932. DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
  933. return nbuf;
  934. }
  935. if (msdu_info->frm_type == dp_tx_frm_me)
  936. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  937. i = 0;
  938. /* Print statement to track i and num_seg */
  939. /*
  940. * For each segment (maps to 1 MSDU) , prepare software and hardware
  941. * descriptors using information in msdu_info
  942. */
  943. while (i < msdu_info->num_seg) {
  944. /*
  945. * Setup Tx descriptor for an MSDU, and MSDU extension
  946. * descriptor
  947. */
  948. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  949. tx_q->desc_pool_id);
  950. if (!tx_desc) {
  951. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  952. "%s Tx_desc prepare Fail vdev %p queue %d\n",
  953. __func__, vdev, tx_q->desc_pool_id);
  954. if (msdu_info->frm_type == dp_tx_frm_me) {
  955. dp_tx_me_free_buf(pdev,
  956. (void *)(msdu_info->u.sg_info
  957. .curr_seg->frags[0].vaddr));
  958. }
  959. goto done;
  960. }
  961. if (msdu_info->frm_type == dp_tx_frm_me) {
  962. tx_desc->me_buffer =
  963. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  964. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  965. }
  966. /*
  967. * Enqueue the Tx MSDU descriptor to HW for transmit
  968. */
  969. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
  970. vdev->htt_tcl_metadata, tx_q->ring_id);
  971. if (status != QDF_STATUS_SUCCESS) {
  972. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  973. "%s Tx_hw_enqueue Fail tx_desc %p queue %d\n",
  974. __func__, tx_desc, tx_q->ring_id);
  975. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  976. dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
  977. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  978. goto done;
  979. }
  980. /*
  981. * TODO
  982. * if tso_info structure can be modified to have curr_seg
  983. * as first element, following 2 blocks of code (for TSO and SG)
  984. * can be combined into 1
  985. */
  986. /*
  987. * For frames with multiple segments (TSO, ME), jump to next
  988. * segment.
  989. */
  990. if (msdu_info->frm_type == dp_tx_frm_tso) {
  991. if (msdu_info->u.tso_info.curr_seg->next) {
  992. msdu_info->u.tso_info.curr_seg =
  993. msdu_info->u.tso_info.curr_seg->next;
  994. /*
  995. * If this is a jumbo nbuf, then increment the number of
  996. * nbuf users for each additional segment of the msdu.
  997. * This will ensure that the skb is freed only after
  998. * receiving tx completion for all segments of an nbuf
  999. */
  1000. qdf_nbuf_inc_users(nbuf);
  1001. /* Check with MCL if this is needed */
  1002. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
  1003. }
  1004. }
  1005. /*
  1006. * For Multicast-Unicast converted packets,
  1007. * each converted frame (for a client) is represented as
  1008. * 1 segment
  1009. */
  1010. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  1011. (msdu_info->frm_type == dp_tx_frm_me)) {
  1012. if (msdu_info->u.sg_info.curr_seg->next) {
  1013. msdu_info->u.sg_info.curr_seg =
  1014. msdu_info->u.sg_info.curr_seg->next;
  1015. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1016. }
  1017. }
  1018. i++;
  1019. }
  1020. nbuf = NULL;
  1021. done:
  1022. hal_srng_access_end(soc->hal_soc, hal_srng);
  1023. return nbuf;
  1024. }
  1025. /**
  1026. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  1027. * for SG frames
  1028. * @vdev: DP vdev handle
  1029. * @nbuf: skb
  1030. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1031. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1032. *
  1033. * Return: NULL on success,
  1034. * nbuf when it fails to send
  1035. */
  1036. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1037. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1038. {
  1039. uint32_t cur_frag, nr_frags;
  1040. qdf_dma_addr_t paddr;
  1041. struct dp_tx_sg_info_s *sg_info;
  1042. sg_info = &msdu_info->u.sg_info;
  1043. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  1044. if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
  1045. QDF_DMA_TO_DEVICE)) {
  1046. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1047. "dma map error\n");
  1048. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1049. qdf_nbuf_free(nbuf);
  1050. return NULL;
  1051. }
  1052. seg_info->frags[0].paddr_lo = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1053. seg_info->frags[0].paddr_hi = 0;
  1054. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  1055. seg_info->frags[0].vaddr = (void *) nbuf;
  1056. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  1057. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  1058. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  1059. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1060. "frag dma map error\n");
  1061. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1062. qdf_nbuf_free(nbuf);
  1063. return NULL;
  1064. }
  1065. paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1066. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  1067. seg_info->frags[cur_frag + 1].paddr_hi =
  1068. ((uint64_t) paddr) >> 32;
  1069. seg_info->frags[cur_frag + 1].len =
  1070. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  1071. }
  1072. seg_info->frag_cnt = (cur_frag + 1);
  1073. seg_info->total_len = qdf_nbuf_len(nbuf);
  1074. seg_info->next = NULL;
  1075. sg_info->curr_seg = seg_info;
  1076. msdu_info->frm_type = dp_tx_frm_sg;
  1077. msdu_info->num_seg = 1;
  1078. return nbuf;
  1079. }
  1080. #ifdef MESH_MODE_SUPPORT
  1081. /**
  1082. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  1083. and prepare msdu_info for mesh frames.
  1084. * @vdev: DP vdev handle
  1085. * @nbuf: skb
  1086. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1087. *
  1088. * Return: void
  1089. */
  1090. static
  1091. void dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1092. struct dp_tx_msdu_info_s *msdu_info)
  1093. {
  1094. struct meta_hdr_s *mhdr;
  1095. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1096. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1097. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1098. qdf_mem_set(meta_data, 0, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1099. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  1100. meta_data->power = mhdr->power;
  1101. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  1102. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  1103. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  1104. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  1105. meta_data->dyn_bw = 1;
  1106. meta_data->valid_pwr = 1;
  1107. meta_data->valid_mcs_mask = 1;
  1108. meta_data->valid_nss_mask = 1;
  1109. meta_data->valid_preamble_type = 1;
  1110. meta_data->valid_retries = 1;
  1111. meta_data->valid_bw_info = 1;
  1112. }
  1113. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  1114. meta_data->encrypt_type = 0;
  1115. meta_data->valid_encrypt_type = 1;
  1116. }
  1117. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  1118. msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
  1119. else
  1120. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1121. meta_data->valid_key_flags = 1;
  1122. meta_data->key_flags = (mhdr->keyix & 0x3);
  1123. qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s));
  1124. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1125. "%s , Meta hdr %0x %0x %0x %0x %0x\n",
  1126. __func__, msdu_info->meta_data[0],
  1127. msdu_info->meta_data[1],
  1128. msdu_info->meta_data[2],
  1129. msdu_info->meta_data[3],
  1130. msdu_info->meta_data[4]);
  1131. return;
  1132. }
  1133. #else
  1134. static
  1135. void dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1136. struct dp_tx_msdu_info_s *msdu_info)
  1137. {
  1138. }
  1139. #endif
  1140. /**
  1141. * dp_tx_prepare_nawds(): Tramit NAWDS frames
  1142. * @vdev: dp_vdev handle
  1143. * @nbuf: skb
  1144. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1145. * @tx_q: Tx queue to be used for this Tx frame
  1146. * @meta_data: Meta date for mesh
  1147. * @peer_id: peer_id of the peer in case of NAWDS frames
  1148. *
  1149. * return: NULL on success nbuf on failure
  1150. */
  1151. static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1152. uint8_t tid, struct dp_tx_queue *tx_q, uint32_t *meta_data,
  1153. uint32_t peer_id)
  1154. {
  1155. struct dp_peer *peer = NULL;
  1156. qdf_nbuf_t nbuf_copy;
  1157. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1158. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  1159. (peer->nawds_enabled || peer->bss_peer)) {
  1160. nbuf_copy = qdf_nbuf_copy(nbuf);
  1161. if (!nbuf_copy) {
  1162. QDF_TRACE(QDF_MODULE_ID_DP,
  1163. QDF_TRACE_LEVEL_ERROR,
  1164. "nbuf copy failed");
  1165. }
  1166. peer_id = peer->peer_ids[0];
  1167. nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, tid,
  1168. tx_q, meta_data, peer_id);
  1169. if (nbuf_copy != NULL) {
  1170. qdf_nbuf_free(nbuf);
  1171. return nbuf_copy;
  1172. }
  1173. }
  1174. }
  1175. if (peer_id == HTT_INVALID_PEER)
  1176. return nbuf;
  1177. qdf_nbuf_free(nbuf);
  1178. return NULL;
  1179. }
  1180. /**
  1181. * dp_tx_send() - Transmit a frame on a given VAP
  1182. * @vap_dev: DP vdev handle
  1183. * @nbuf: skb
  1184. *
  1185. * Entry point for Core Tx layer (DP_TX) invoked from
  1186. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  1187. * cases
  1188. *
  1189. * Return: NULL on success,
  1190. * nbuf when it fails to send
  1191. */
  1192. qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
  1193. {
  1194. struct ether_header *eh = NULL;
  1195. struct dp_tx_msdu_info_s msdu_info;
  1196. struct dp_tx_seg_info_s seg_info;
  1197. struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
  1198. struct dp_soc *soc = vdev->pdev->soc;
  1199. uint16_t peer_id = HTT_INVALID_PEER;
  1200. uint8_t count;
  1201. uint8_t found = 0;
  1202. uint8_t oldest_mec_entry_idx = 0;
  1203. uint64_t oldest_mec_ts = 0;
  1204. struct mect_entry *mect_entry;
  1205. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  1206. qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
  1207. if (qdf_nbuf_get_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD)
  1208. goto out;
  1209. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1210. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  1211. for (count = 0; count < soc->mect_cnt; count++) {
  1212. mect_entry = &soc->mect_table[count];
  1213. if (!memcmp(mect_entry->mac_addr, eh->ether_shost,
  1214. DP_MAC_ADDR_LEN)) {
  1215. found = 1;
  1216. break;
  1217. }
  1218. if (!oldest_mec_ts) {
  1219. oldest_mec_entry_idx = count;
  1220. oldest_mec_ts = mect_entry->ts;
  1221. } else if (mect_entry->ts < oldest_mec_ts) {
  1222. oldest_mec_entry_idx = count;
  1223. oldest_mec_ts = mect_entry->ts;
  1224. }
  1225. }
  1226. if (!found) {
  1227. if (count >= DP_MAX_MECT_ENTRIES)
  1228. count = oldest_mec_entry_idx;
  1229. else
  1230. soc->mect_cnt++;
  1231. mect_entry = &soc->mect_table[count];
  1232. mect_entry->ts = jiffies_64;
  1233. memcpy(mect_entry->mac_addr, eh->ether_shost,
  1234. DP_MAC_ADDR_LEN);
  1235. }
  1236. }
  1237. out:
  1238. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1239. "%s , skb %0x:%0x:%0x:%0x:%0x:%0x\n",
  1240. __func__, nbuf->data[0], nbuf->data[1], nbuf->data[2],
  1241. nbuf->data[3], nbuf->data[4], nbuf->data[5]);
  1242. /*
  1243. * Set Default Host TID value to invalid TID
  1244. * (TID override disabled)
  1245. */
  1246. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  1247. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1248. if (qdf_unlikely(vdev->mesh_vdev))
  1249. dp_tx_extract_mesh_meta_data(vdev, nbuf, &msdu_info);
  1250. /*
  1251. * Get HW Queue to use for this frame.
  1252. * TCL supports upto 4 DMA rings, out of which 3 rings are
  1253. * dedicated for data and 1 for command.
  1254. * "queue_id" maps to one hardware ring.
  1255. * With each ring, we also associate a unique Tx descriptor pool
  1256. * to minimize lock contention for these resources.
  1257. */
  1258. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1259. /*
  1260. * TCL H/W supports 2 DSCP-TID mapping tables.
  1261. * Table 1 - Default DSCP-TID mapping table
  1262. * Table 2 - 1 DSCP-TID override table
  1263. *
  1264. * If we need a different DSCP-TID mapping for this vap,
  1265. * call tid_classify to extract DSCP/ToS from frame and
  1266. * map to a TID and store in msdu_info. This is later used
  1267. * to fill in TCL Input descriptor (per-packet TID override).
  1268. */
  1269. if (vdev->dscp_tid_map_id > 1)
  1270. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  1271. /* Reset the control block */
  1272. qdf_nbuf_reset_ctxt(nbuf);
  1273. /*
  1274. * Classify the frame and call corresponding
  1275. * "prepare" function which extracts the segment (TSO)
  1276. * and fragmentation information (for TSO , SG, ME, or Raw)
  1277. * into MSDU_INFO structure which is later used to fill
  1278. * SW and HW descriptors.
  1279. */
  1280. if (qdf_nbuf_is_tso(nbuf)) {
  1281. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1282. "%s TSO frame %p\n", __func__, vdev);
  1283. DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
  1284. qdf_nbuf_len(nbuf));
  1285. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  1286. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1287. "%s tso_prepare fail vdev_id:%d\n",
  1288. __func__, vdev->vdev_id);
  1289. DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
  1290. return nbuf;
  1291. }
  1292. goto send_multiple;
  1293. }
  1294. /* SG */
  1295. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1296. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  1297. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1298. "%s non-TSO SG frame %p\n", __func__, vdev);
  1299. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  1300. qdf_nbuf_len(nbuf));
  1301. goto send_multiple;
  1302. }
  1303. #ifdef ATH_SUPPORT_IQUE
  1304. /* Mcast to Ucast Conversion*/
  1305. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  1306. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1307. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  1308. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1309. "%s Mcast frm for ME %p\n", __func__, vdev);
  1310. DP_STATS_INC_PKT(vdev,
  1311. tx_i.mcast_en.mcast_pkt, 1,
  1312. qdf_nbuf_len(nbuf));
  1313. if (dp_tx_prepare_send_me(vdev, nbuf)) {
  1314. qdf_nbuf_free(nbuf);
  1315. return NULL;
  1316. }
  1317. return nbuf;
  1318. }
  1319. }
  1320. #endif
  1321. /* RAW */
  1322. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1323. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  1324. if (nbuf == NULL)
  1325. return NULL;
  1326. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1327. "%s Raw frame %p\n", __func__, vdev);
  1328. goto send_multiple;
  1329. }
  1330. if (vdev->nawds_enabled) {
  1331. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1332. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  1333. nbuf = dp_tx_prepare_nawds(vdev, nbuf, msdu_info.tid,
  1334. &msdu_info.tx_queue,
  1335. msdu_info.meta_data, peer_id);
  1336. return nbuf;
  1337. }
  1338. }
  1339. /* Single linear frame */
  1340. /*
  1341. * If nbuf is a simple linear frame, use send_single function to
  1342. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  1343. * SRNG. There is no need to setup a MSDU extension descriptor.
  1344. */
  1345. nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info.tid,
  1346. &msdu_info.tx_queue, msdu_info.meta_data, peer_id);
  1347. return nbuf;
  1348. send_multiple:
  1349. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  1350. return nbuf;
  1351. }
  1352. /**
  1353. * dp_tx_reinject_handler() - Tx Reinject Handler
  1354. * @tx_desc: software descriptor head pointer
  1355. * @status : Tx completion status from HTT descriptor
  1356. *
  1357. * This function reinjects frames back to Target.
  1358. * Todo - Host queue needs to be added
  1359. *
  1360. * Return: none
  1361. */
  1362. static
  1363. void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  1364. {
  1365. struct dp_vdev *vdev;
  1366. struct dp_peer *peer = NULL;
  1367. uint32_t peer_id = HTT_INVALID_PEER;
  1368. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1369. qdf_nbuf_t nbuf_copy = NULL;
  1370. struct dp_tx_msdu_info_s msdu_info;
  1371. vdev = tx_desc->vdev;
  1372. qdf_assert(vdev);
  1373. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  1374. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1375. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1376. "%s Tx reinject path\n", __func__);
  1377. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  1378. qdf_nbuf_len(tx_desc->nbuf));
  1379. if (!vdev->osif_proxy_arp) {
  1380. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1381. "function pointer to proxy arp not present\n");
  1382. return;
  1383. }
  1384. if (qdf_unlikely(vdev->mesh_vdev)) {
  1385. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  1386. } else {
  1387. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1388. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  1389. (peer->bss_peer || peer->nawds_enabled)
  1390. && !(vdev->osif_proxy_arp(
  1391. vdev->osif_vdev,
  1392. nbuf))) {
  1393. nbuf_copy = qdf_nbuf_copy(nbuf);
  1394. if (!nbuf_copy) {
  1395. QDF_TRACE(QDF_MODULE_ID_DP,
  1396. QDF_TRACE_LEVEL_ERROR,
  1397. FL("nbuf copy failed"));
  1398. break;
  1399. }
  1400. if (peer->nawds_enabled)
  1401. peer_id = peer->peer_ids[0];
  1402. else
  1403. peer_id = HTT_INVALID_PEER;
  1404. nbuf_copy = dp_tx_send_msdu_single(vdev,
  1405. nbuf_copy, msdu_info.tid,
  1406. &msdu_info.tx_queue,
  1407. msdu_info.meta_data, peer_id);
  1408. if (nbuf_copy) {
  1409. QDF_TRACE(QDF_MODULE_ID_DP,
  1410. QDF_TRACE_LEVEL_ERROR,
  1411. FL("pkt send failed"));
  1412. qdf_nbuf_free(nbuf_copy);
  1413. }
  1414. }
  1415. }
  1416. }
  1417. qdf_nbuf_free(nbuf);
  1418. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  1419. }
  1420. /**
  1421. * dp_tx_inspect_handler() - Tx Inspect Handler
  1422. * @tx_desc: software descriptor head pointer
  1423. * @status : Tx completion status from HTT descriptor
  1424. *
  1425. * Handles Tx frames sent back to Host for inspection
  1426. * (ProxyARP)
  1427. *
  1428. * Return: none
  1429. */
  1430. static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  1431. {
  1432. struct dp_soc *soc;
  1433. struct dp_pdev *pdev = tx_desc->pdev;
  1434. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1435. "%s Tx inspect path\n",
  1436. __func__);
  1437. qdf_assert(pdev);
  1438. soc = pdev->soc;
  1439. DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
  1440. qdf_nbuf_len(tx_desc->nbuf));
  1441. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  1442. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  1443. }
  1444. /**
  1445. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  1446. * @soc: Soc handle
  1447. * @desc: software Tx descriptor to be processed
  1448. *
  1449. * Return: none
  1450. */
  1451. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  1452. struct dp_tx_desc_s *desc)
  1453. {
  1454. struct dp_vdev *vdev = desc->vdev;
  1455. qdf_nbuf_t nbuf = desc->nbuf;
  1456. /* 0 : MSDU buffer, 1 : MLE */
  1457. if (desc->msdu_ext_desc) {
  1458. /* TSO free */
  1459. if (hal_tx_ext_desc_get_tso_enable(
  1460. desc->msdu_ext_desc->vaddr)) {
  1461. /* If remaining number of segment is 0
  1462. * actual TSO may unmap and free */
  1463. if (!DP_DESC_NUM_FRAG(desc)) {
  1464. qdf_nbuf_unmap(soc->osdev, nbuf,
  1465. QDF_DMA_TO_DEVICE);
  1466. qdf_nbuf_free(nbuf);
  1467. return;
  1468. }
  1469. }
  1470. }
  1471. if (desc->flags & DP_TX_DESC_FLAG_ME)
  1472. dp_tx_me_free_buf(desc->pdev, desc->me_buffer);
  1473. qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1474. if (!vdev->mesh_vdev) {
  1475. qdf_nbuf_free(nbuf);
  1476. } else {
  1477. vdev->osif_tx_free_ext((nbuf));
  1478. }
  1479. }
  1480. /**
  1481. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  1482. * @tx_desc: software descriptor head pointer
  1483. * @status : Tx completion status from HTT descriptor
  1484. *
  1485. * This function will process HTT Tx indication messages from Target
  1486. *
  1487. * Return: none
  1488. */
  1489. static
  1490. void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  1491. {
  1492. uint8_t tx_status;
  1493. struct dp_pdev *pdev;
  1494. struct dp_soc *soc;
  1495. uint32_t *htt_status_word = (uint32_t *) status;
  1496. qdf_assert(tx_desc->pdev);
  1497. pdev = tx_desc->pdev;
  1498. soc = pdev->soc;
  1499. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
  1500. switch (tx_status) {
  1501. case HTT_TX_FW2WBM_TX_STATUS_OK:
  1502. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  1503. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  1504. {
  1505. dp_tx_comp_free_buf(soc, tx_desc);
  1506. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  1507. break;
  1508. }
  1509. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  1510. {
  1511. dp_tx_reinject_handler(tx_desc, status);
  1512. break;
  1513. }
  1514. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  1515. {
  1516. dp_tx_inspect_handler(tx_desc, status);
  1517. break;
  1518. }
  1519. default:
  1520. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1521. "%s Invalid HTT tx_status %d\n",
  1522. __func__, tx_status);
  1523. break;
  1524. }
  1525. }
  1526. #ifdef MESH_MODE_SUPPORT
  1527. /**
  1528. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  1529. * in mesh meta header
  1530. * @tx_desc: software descriptor head pointer
  1531. * @ts: pointer to tx completion stats
  1532. * Return: none
  1533. */
  1534. static
  1535. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  1536. struct hal_tx_completion_status *ts)
  1537. {
  1538. struct meta_hdr_s *mhdr;
  1539. qdf_nbuf_t netbuf = tx_desc->nbuf;
  1540. if (!tx_desc->msdu_ext_desc) {
  1541. qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset);
  1542. }
  1543. qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s));
  1544. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  1545. mhdr->rssi = ts->ack_frame_rssi;
  1546. mhdr->channel = tx_desc->pdev->operating_channel;
  1547. }
  1548. #else
  1549. static
  1550. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  1551. struct hal_tx_completion_status *ts)
  1552. {
  1553. }
  1554. #endif
  1555. /**
  1556. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  1557. * @tx_desc: software descriptor head pointer
  1558. * @length: packet length
  1559. *
  1560. * Return: none
  1561. */
  1562. static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
  1563. uint32_t length)
  1564. {
  1565. struct hal_tx_completion_status ts;
  1566. struct dp_soc *soc = NULL;
  1567. struct dp_vdev *vdev = tx_desc->vdev;
  1568. struct dp_peer *peer = NULL;
  1569. struct dp_pdev *pdev = NULL;
  1570. uint8_t comp_status = 0;
  1571. qdf_mem_zero(&ts, sizeof(struct hal_tx_completion_status));
  1572. hal_tx_comp_get_status(&tx_desc->comp, &ts);
  1573. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1574. "-------------------- \n"
  1575. "Tx Completion Stats: \n"
  1576. "-------------------- \n"
  1577. "ack_frame_rssi = %d \n"
  1578. "first_msdu = %d \n"
  1579. "last_msdu = %d \n"
  1580. "msdu_part_of_amsdu = %d \n"
  1581. "rate_stats valid = %d \n"
  1582. "bw = %d \n"
  1583. "pkt_type = %d \n"
  1584. "stbc = %d \n"
  1585. "ldpc = %d \n"
  1586. "sgi = %d \n"
  1587. "mcs = %d \n"
  1588. "ofdma = %d \n"
  1589. "tones_in_ru = %d \n"
  1590. "tsf = %d \n"
  1591. "ppdu_id = %d \n"
  1592. "transmit_cnt = %d \n"
  1593. "tid = %d \n"
  1594. "peer_id = %d \n",
  1595. ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
  1596. ts.msdu_part_of_amsdu, ts.valid, ts.bw,
  1597. ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
  1598. ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
  1599. ts.ppdu_id, ts.transmit_cnt, ts.tid,
  1600. ts.peer_id);
  1601. if (qdf_unlikely(tx_desc->vdev->mesh_vdev))
  1602. dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
  1603. if (!vdev) {
  1604. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1605. "invalid peer");
  1606. goto fail;
  1607. }
  1608. soc = tx_desc->vdev->pdev->soc;
  1609. peer = dp_peer_find_by_id(soc, ts.peer_id);
  1610. if (!peer) {
  1611. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1612. "invalid peer");
  1613. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  1614. goto out;
  1615. }
  1616. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  1617. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  1618. hal_tx_comp_get_buffer_source(&tx_desc->comp)) {
  1619. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
  1620. DP_STATS_INCC(peer, tx.dropped.mpdu_age_out, 1,
  1621. (comp_status == HAL_TX_TQM_RR_REM_CMD_AGED));
  1622. DP_STATS_INCC(peer, tx.dropped.fw_discard_reason1, 1,
  1623. (comp_status == HAL_TX_TQM_RR_FW_REASON1));
  1624. DP_STATS_INCC(peer, tx.dropped.fw_discard_reason2, 1,
  1625. (comp_status == HAL_TX_TQM_RR_FW_REASON2));
  1626. DP_STATS_INCC(peer, tx.dropped.fw_discard_reason3, 1,
  1627. (comp_status == HAL_TX_TQM_RR_FW_REASON3));
  1628. DP_STATS_INCC(peer, tx.tx_failed, 1,
  1629. comp_status != HAL_TX_TQM_RR_FRAME_ACKED);
  1630. if (comp_status == HAL_TX_TQM_RR_FRAME_ACKED) {
  1631. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1632. mcs_count[MAX_MCS], 1,
  1633. ((ts.mcs >= MAX_MCS_11A) && (ts.pkt_type
  1634. == DOT11_A)));
  1635. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1636. mcs_count[ts.mcs], 1,
  1637. ((ts.mcs <= MAX_MCS_11A) && (ts.pkt_type
  1638. == DOT11_A)));
  1639. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1640. mcs_count[MAX_MCS], 1,
  1641. ((ts.mcs >= MAX_MCS_11B)
  1642. && (ts.pkt_type == DOT11_B)));
  1643. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1644. mcs_count[ts.mcs], 1,
  1645. ((ts.mcs <= MAX_MCS_11B)
  1646. && (ts.pkt_type == DOT11_B)));
  1647. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1648. mcs_count[MAX_MCS], 1,
  1649. ((ts.mcs >= MAX_MCS_11A)
  1650. && (ts.pkt_type == DOT11_N)));
  1651. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1652. mcs_count[ts.mcs], 1,
  1653. ((ts.mcs <= MAX_MCS_11A)
  1654. && (ts.pkt_type == DOT11_N)));
  1655. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1656. mcs_count[MAX_MCS], 1,
  1657. ((ts.mcs >= MAX_MCS_11AC)
  1658. && (ts.pkt_type == DOT11_AC)));
  1659. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1660. mcs_count[ts.mcs], 1,
  1661. ((ts.mcs <= MAX_MCS_11AC)
  1662. && (ts.pkt_type == DOT11_AC)));
  1663. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1664. mcs_count[MAX_MCS], 1,
  1665. ((ts.mcs >= MAX_MCS)
  1666. && (ts.pkt_type == DOT11_AX)));
  1667. DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
  1668. mcs_count[ts.mcs], 1,
  1669. ((ts.mcs <= MAX_MCS)
  1670. && (ts.pkt_type == DOT11_AX)));
  1671. DP_STATS_INC(peer, tx.sgi_count[ts.sgi], 1);
  1672. DP_STATS_INC(peer, tx.bw[ts.bw], 1);
  1673. DP_STATS_UPD(peer, tx.last_ack_rssi, ts.ack_frame_rssi);
  1674. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts.tid)]
  1675. , 1);
  1676. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  1677. DP_STATS_INCC(peer, tx.stbc, 1, ts.stbc);
  1678. DP_STATS_INCC(peer, tx.ofdma, 1, ts.ofdma);
  1679. DP_STATS_INCC(peer, tx.ldpc, 1, ts.ldpc);
  1680. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1,
  1681. (ts.first_msdu && ts.last_msdu));
  1682. DP_STATS_INCC(peer, tx.amsdu_cnt, 1,
  1683. !(ts.first_msdu && ts.last_msdu));
  1684. DP_STATS_INCC(peer, tx.retries, 1, ts.transmit_cnt > 1);
  1685. }
  1686. }
  1687. /* TODO: This call is temporary.
  1688. * Stats update has to be attached to the HTT PPDU message
  1689. */
  1690. out:
  1691. pdev = vdev->pdev;
  1692. if (pdev->enhanced_stats_en && soc->cdp_soc.ol_ops->update_dp_stats) {
  1693. if (peer) {
  1694. soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
  1695. &peer->stats, ts.peer_id,
  1696. UPDATE_PEER_STATS);
  1697. }
  1698. dp_aggregate_vdev_stats(tx_desc->vdev);
  1699. soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
  1700. &vdev->stats, vdev->vdev_id, UPDATE_VDEV_STATS);
  1701. }
  1702. fail:
  1703. return;
  1704. }
  1705. /**
  1706. * dp_tx_comp_process_desc() - Tx complete software descriptor handler
  1707. * @soc: core txrx main context
  1708. * @comp_head: software descriptor head pointer
  1709. *
  1710. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  1711. * and release the software descriptors after processing is complete
  1712. *
  1713. * Return: none
  1714. */
  1715. static void dp_tx_comp_process_desc(struct dp_soc *soc,
  1716. struct dp_tx_desc_s *comp_head)
  1717. {
  1718. struct dp_tx_desc_s *desc;
  1719. struct dp_tx_desc_s *next;
  1720. struct hal_tx_completion_status ts = {0};
  1721. uint32_t length;
  1722. struct dp_peer *peer;
  1723. DP_HIST_INIT();
  1724. desc = comp_head;
  1725. while (desc) {
  1726. hal_tx_comp_get_status(&desc->comp, &ts);
  1727. peer = dp_peer_find_by_id(soc, ts.peer_id);
  1728. length = qdf_nbuf_len(desc->nbuf);
  1729. /* Process Tx status in descriptor */
  1730. if (soc->process_tx_status ||
  1731. (desc->vdev && desc->vdev->mesh_vdev))
  1732. dp_tx_comp_process_tx_status(desc, length);
  1733. dp_tx_comp_free_buf(soc, desc);
  1734. DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
  1735. next = desc->next;
  1736. dp_tx_desc_release(desc, desc->pool_id);
  1737. desc = next;
  1738. }
  1739. DP_TX_HIST_STATS_PER_PDEV();
  1740. }
  1741. /**
  1742. * dp_tx_comp_handler() - Tx completion handler
  1743. * @soc: core txrx main context
  1744. * @ring_id: completion ring id
  1745. * @budget: No. of packets/descriptors that can be serviced in one loop
  1746. *
  1747. * This function will collect hardware release ring element contents and
  1748. * handle descriptor contents. Based on contents, free packet or handle error
  1749. * conditions
  1750. *
  1751. * Return: none
  1752. */
  1753. uint32_t dp_tx_comp_handler(struct dp_soc *soc, uint32_t ring_id,
  1754. uint32_t budget)
  1755. {
  1756. void *tx_comp_hal_desc;
  1757. uint8_t buffer_src;
  1758. uint8_t pool_id;
  1759. uint32_t tx_desc_id;
  1760. struct dp_tx_desc_s *tx_desc = NULL;
  1761. struct dp_tx_desc_s *head_desc = NULL;
  1762. struct dp_tx_desc_s *tail_desc = NULL;
  1763. uint32_t num_processed;
  1764. void *hal_srng = soc->tx_comp_ring[ring_id].hal_srng;
  1765. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  1766. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1767. "%s %d : HAL RING Access Failed -- %p\n",
  1768. __func__, __LINE__, hal_srng);
  1769. return 0;
  1770. }
  1771. num_processed = 0;
  1772. /* Find head descriptor from completion ring */
  1773. while (qdf_likely(tx_comp_hal_desc =
  1774. hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
  1775. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  1776. /* If this buffer was not released by TQM or FW, then it is not
  1777. * Tx completion indication, skip to next descriptor */
  1778. if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  1779. (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  1780. QDF_TRACE(QDF_MODULE_ID_DP,
  1781. QDF_TRACE_LEVEL_ERROR,
  1782. "Tx comp release_src != TQM | FW");
  1783. /* TODO Handle Freeing of the buffer in descriptor */
  1784. continue;
  1785. }
  1786. /* Get descriptor id */
  1787. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  1788. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  1789. DP_TX_DESC_ID_POOL_OS;
  1790. /* Pool ID is out of limit. Error */
  1791. if (pool_id > wlan_cfg_get_num_tx_desc_pool(
  1792. soc->wlan_cfg_ctx)) {
  1793. QDF_TRACE(QDF_MODULE_ID_DP,
  1794. QDF_TRACE_LEVEL_FATAL,
  1795. "TX COMP pool id %d not valid",
  1796. pool_id);
  1797. /* Check if assert aborts execution, if not handle
  1798. * return here */
  1799. QDF_ASSERT(0);
  1800. }
  1801. /* Find Tx descriptor */
  1802. tx_desc = dp_tx_desc_find(soc, pool_id,
  1803. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  1804. DP_TX_DESC_ID_PAGE_OS,
  1805. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  1806. DP_TX_DESC_ID_OFFSET_OS);
  1807. /* Pool id is not matching. Error */
  1808. if (tx_desc && (tx_desc->pool_id != pool_id)) {
  1809. QDF_TRACE(QDF_MODULE_ID_DP,
  1810. QDF_TRACE_LEVEL_FATAL,
  1811. "Tx Comp pool id %d not matched %d",
  1812. pool_id, tx_desc->pool_id);
  1813. /* Check if assert aborts execution, if not handle
  1814. * return here */
  1815. QDF_ASSERT(0);
  1816. }
  1817. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  1818. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  1819. QDF_TRACE(QDF_MODULE_ID_DP,
  1820. QDF_TRACE_LEVEL_FATAL,
  1821. "Txdesc invalid, flgs = %x,id = %d",
  1822. tx_desc->flags, tx_desc_id);
  1823. qdf_assert_always(0);
  1824. }
  1825. /*
  1826. * If the release source is FW, process the HTT
  1827. * status
  1828. */
  1829. if (qdf_unlikely(buffer_src ==
  1830. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  1831. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  1832. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  1833. htt_tx_status);
  1834. dp_tx_process_htt_completion(tx_desc,
  1835. htt_tx_status);
  1836. } else {
  1837. tx_desc->next = NULL;
  1838. /* First ring descriptor on the cycle */
  1839. if (!head_desc) {
  1840. head_desc = tx_desc;
  1841. } else {
  1842. tail_desc->next = tx_desc;
  1843. }
  1844. tail_desc = tx_desc;
  1845. /* Collect hw completion contents */
  1846. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  1847. &tx_desc->comp, soc->process_tx_status);
  1848. }
  1849. num_processed++;
  1850. /*
  1851. * Processed packet count is more than given quota
  1852. * stop to processing
  1853. */
  1854. if (num_processed >= budget)
  1855. break;
  1856. }
  1857. hal_srng_access_end(soc->hal_soc, hal_srng);
  1858. /* Process the reaped descriptors */
  1859. if (head_desc)
  1860. dp_tx_comp_process_desc(soc, head_desc);
  1861. return num_processed;
  1862. }
  1863. /**
  1864. * dp_tx_vdev_attach() - attach vdev to dp tx
  1865. * @vdev: virtual device instance
  1866. *
  1867. * Return: QDF_STATUS_SUCCESS: success
  1868. * QDF_STATUS_E_RESOURCES: Error return
  1869. */
  1870. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  1871. {
  1872. /*
  1873. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  1874. */
  1875. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  1876. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  1877. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  1878. vdev->vdev_id);
  1879. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
  1880. DP_SW2HW_MACID(vdev->pdev->pdev_id));
  1881. /*
  1882. * Set HTT Extension Valid bit to 0 by default
  1883. */
  1884. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  1885. dp_tx_vdev_update_search_flags(vdev);
  1886. return QDF_STATUS_SUCCESS;
  1887. }
  1888. /**
  1889. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  1890. * @vdev: virtual device instance
  1891. *
  1892. * Return: void
  1893. *
  1894. */
  1895. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  1896. {
  1897. /*
  1898. * Enable both AddrY (SA based search) and AddrX (Da based search)
  1899. * for TDLS link
  1900. *
  1901. * Enable AddrY (SA based search) only for non-WDS STA and
  1902. * ProxySTA VAP modes.
  1903. *
  1904. * In all other VAP modes, only DA based search should be
  1905. * enabled
  1906. */
  1907. if (vdev->opmode == wlan_op_mode_sta &&
  1908. vdev->tdls_link_connected)
  1909. vdev->hal_desc_addr_search_flags =
  1910. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  1911. else if ((vdev->opmode == wlan_op_mode_sta &&
  1912. (!vdev->wds_enabled || vdev->proxysta_vdev)))
  1913. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  1914. else
  1915. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  1916. }
  1917. /**
  1918. * dp_tx_vdev_detach() - detach vdev from dp tx
  1919. * @vdev: virtual device instance
  1920. *
  1921. * Return: QDF_STATUS_SUCCESS: success
  1922. * QDF_STATUS_E_RESOURCES: Error return
  1923. */
  1924. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  1925. {
  1926. return QDF_STATUS_SUCCESS;
  1927. }
  1928. /**
  1929. * dp_tx_pdev_attach() - attach pdev to dp tx
  1930. * @pdev: physical device instance
  1931. *
  1932. * Return: QDF_STATUS_SUCCESS: success
  1933. * QDF_STATUS_E_RESOURCES: Error return
  1934. */
  1935. QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
  1936. {
  1937. struct dp_soc *soc = pdev->soc;
  1938. /* Initialize Flow control counters */
  1939. qdf_atomic_init(&pdev->num_tx_exception);
  1940. qdf_atomic_init(&pdev->num_tx_outstanding);
  1941. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1942. /* Initialize descriptors in TCL Ring */
  1943. hal_tx_init_data_ring(soc->hal_soc,
  1944. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  1945. }
  1946. return QDF_STATUS_SUCCESS;
  1947. }
  1948. /**
  1949. * dp_tx_pdev_detach() - detach pdev from dp tx
  1950. * @pdev: physical device instance
  1951. *
  1952. * Return: QDF_STATUS_SUCCESS: success
  1953. * QDF_STATUS_E_RESOURCES: Error return
  1954. */
  1955. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
  1956. {
  1957. /* What should do here? */
  1958. return QDF_STATUS_SUCCESS;
  1959. }
  1960. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  1961. /* Pools will be allocated dynamically */
  1962. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  1963. int num_desc)
  1964. {
  1965. uint8_t i;
  1966. for (i = 0; i < num_pool; i++) {
  1967. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  1968. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  1969. }
  1970. return 0;
  1971. }
  1972. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  1973. {
  1974. uint8_t i;
  1975. for (i = 0; i < num_pool; i++)
  1976. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  1977. }
  1978. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  1979. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  1980. int num_desc)
  1981. {
  1982. uint8_t i;
  1983. /* Allocate software Tx descriptor pools */
  1984. for (i = 0; i < num_pool; i++) {
  1985. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  1986. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1987. "%s Tx Desc Pool alloc %d failed %p\n",
  1988. __func__, i, soc);
  1989. return ENOMEM;
  1990. }
  1991. }
  1992. return 0;
  1993. }
  1994. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  1995. {
  1996. uint8_t i;
  1997. for (i = 0; i < num_pool; i++) {
  1998. if (dp_tx_desc_pool_free(soc, i)) {
  1999. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2000. "%s Tx Desc Pool Free failed\n", __func__);
  2001. }
  2002. }
  2003. }
  2004. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  2005. /**
  2006. * dp_tx_soc_detach() - detach soc from dp tx
  2007. * @soc: core txrx main context
  2008. *
  2009. * This function will detach dp tx into main device context
  2010. * will free dp tx resource and initialize resources
  2011. *
  2012. * Return: QDF_STATUS_SUCCESS: success
  2013. * QDF_STATUS_E_RESOURCES: Error return
  2014. */
  2015. QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
  2016. {
  2017. uint8_t num_pool;
  2018. uint16_t num_desc;
  2019. uint16_t num_ext_desc;
  2020. uint8_t i;
  2021. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  2022. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  2023. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  2024. dp_tx_flow_control_deinit(soc);
  2025. dp_tx_delete_static_pools(soc, num_pool);
  2026. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2027. "%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
  2028. __func__, num_pool, num_desc);
  2029. for (i = 0; i < num_pool; i++) {
  2030. if (dp_tx_ext_desc_pool_free(soc, i)) {
  2031. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2032. "%s Tx Ext Desc Pool Free failed\n",
  2033. __func__);
  2034. return QDF_STATUS_E_RESOURCES;
  2035. }
  2036. }
  2037. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2038. "%s MSDU Ext Desc Pool %d Free descs = %d\n",
  2039. __func__, num_pool, num_ext_desc);
  2040. for (i = 0; i < num_pool; i++) {
  2041. dp_tx_tso_desc_pool_free(soc, i);
  2042. }
  2043. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2044. "%s TSO Desc Pool %d Free descs = %d\n",
  2045. __func__, num_pool, num_desc);
  2046. for (i = 0; i < num_pool; i++)
  2047. dp_tx_tso_num_seg_pool_free(soc, i);
  2048. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2049. "%s TSO Num of seg Desc Pool %d Free descs = %d\n",
  2050. __func__, num_pool, num_desc);
  2051. return QDF_STATUS_SUCCESS;
  2052. }
  2053. /**
  2054. * dp_tx_soc_attach() - attach soc to dp tx
  2055. * @soc: core txrx main context
  2056. *
  2057. * This function will attach dp tx into main device context
  2058. * will allocate dp tx resource and initialize resources
  2059. *
  2060. * Return: QDF_STATUS_SUCCESS: success
  2061. * QDF_STATUS_E_RESOURCES: Error return
  2062. */
  2063. QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
  2064. {
  2065. uint8_t i;
  2066. uint8_t num_pool;
  2067. uint32_t num_desc;
  2068. uint32_t num_ext_desc;
  2069. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  2070. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  2071. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  2072. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  2073. goto fail;
  2074. dp_tx_flow_control_init(soc);
  2075. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2076. "%s Tx Desc Alloc num_pool = %d, descs = %d\n",
  2077. __func__, num_pool, num_desc);
  2078. /* Allocate extension tx descriptor pools */
  2079. for (i = 0; i < num_pool; i++) {
  2080. if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
  2081. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2082. "MSDU Ext Desc Pool alloc %d failed %p\n",
  2083. i, soc);
  2084. goto fail;
  2085. }
  2086. }
  2087. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2088. "%s MSDU Ext Desc Alloc %d, descs = %d\n",
  2089. __func__, num_pool, num_ext_desc);
  2090. for (i = 0; i < num_pool; i++) {
  2091. if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
  2092. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2093. "TSO Desc Pool alloc %d failed %p\n",
  2094. i, soc);
  2095. goto fail;
  2096. }
  2097. }
  2098. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2099. "%s TSO Desc Alloc %d, descs = %d\n",
  2100. __func__, num_pool, num_desc);
  2101. for (i = 0; i < num_pool; i++) {
  2102. if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
  2103. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2104. "TSO Num of seg Pool alloc %d failed %p\n",
  2105. i, soc);
  2106. goto fail;
  2107. }
  2108. }
  2109. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2110. "%s TSO Num of seg pool Alloc %d, descs = %d\n",
  2111. __func__, num_pool, num_desc);
  2112. /* Initialize descriptors in TCL Rings */
  2113. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2114. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2115. hal_tx_init_data_ring(soc->hal_soc,
  2116. soc->tcl_data_ring[i].hal_srng);
  2117. }
  2118. }
  2119. /*
  2120. * todo - Add a runtime config option to enable this.
  2121. */
  2122. /*
  2123. * Due to multiple issues on NPR EMU, enable it selectively
  2124. * only for NPR EMU, should be removed, once NPR platforms
  2125. * are stable.
  2126. */
  2127. soc->process_tx_status = 1;
  2128. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2129. "%s HAL Tx init Success\n", __func__);
  2130. return QDF_STATUS_SUCCESS;
  2131. fail:
  2132. /* Detach will take care of freeing only allocated resources */
  2133. dp_tx_soc_detach(soc);
  2134. return QDF_STATUS_E_RESOURCES;
  2135. }
  2136. /*
  2137. * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
  2138. * pdev: pointer to DP PDEV structure
  2139. * seg_info_head: Pointer to the head of list
  2140. *
  2141. * return: void
  2142. */
  2143. static inline void dp_tx_me_mem_free(struct dp_pdev *pdev,
  2144. struct dp_tx_seg_info_s *seg_info_head)
  2145. {
  2146. struct dp_tx_me_buf_t *mc_uc_buf;
  2147. struct dp_tx_seg_info_s *seg_info_new = NULL;
  2148. qdf_nbuf_t nbuf = NULL;
  2149. uint64_t phy_addr;
  2150. while (seg_info_head) {
  2151. nbuf = seg_info_head->nbuf;
  2152. mc_uc_buf = (struct dp_tx_me_buf_t *)
  2153. seg_info_new->frags[0].vaddr;
  2154. phy_addr = seg_info_head->frags[0].paddr_hi;
  2155. phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
  2156. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  2157. phy_addr,
  2158. QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
  2159. dp_tx_me_free_buf(pdev, mc_uc_buf);
  2160. qdf_nbuf_free(nbuf);
  2161. seg_info_new = seg_info_head;
  2162. seg_info_head = seg_info_head->next;
  2163. qdf_mem_free(seg_info_new);
  2164. }
  2165. }
  2166. /**
  2167. * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
  2168. * @vdev: DP VDEV handle
  2169. * @nbuf: Multicast nbuf
  2170. * @newmac: Table of the clients to which packets have to be sent
  2171. * @new_mac_cnt: No of clients
  2172. *
  2173. * return: no of converted packets
  2174. */
  2175. uint16_t
  2176. dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
  2177. uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
  2178. {
  2179. struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
  2180. struct dp_pdev *pdev = vdev->pdev;
  2181. struct ether_header *eh;
  2182. uint8_t *data;
  2183. uint16_t len;
  2184. /* reference to frame dst addr */
  2185. uint8_t *dstmac;
  2186. /* copy of original frame src addr */
  2187. uint8_t srcmac[DP_MAC_ADDR_LEN];
  2188. /* local index into newmac */
  2189. uint8_t new_mac_idx = 0;
  2190. struct dp_tx_me_buf_t *mc_uc_buf;
  2191. qdf_nbuf_t nbuf_clone;
  2192. struct dp_tx_msdu_info_s msdu_info;
  2193. struct dp_tx_seg_info_s *seg_info_head = NULL;
  2194. struct dp_tx_seg_info_s *seg_info_tail = NULL;
  2195. struct dp_tx_seg_info_s *seg_info_new;
  2196. struct dp_tx_frag_info_s data_frag;
  2197. qdf_dma_addr_t paddr_data;
  2198. qdf_dma_addr_t paddr_mcbuf = 0;
  2199. uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
  2200. QDF_STATUS status;
  2201. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  2202. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2203. eh = (struct ether_header *) nbuf;
  2204. qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
  2205. len = qdf_nbuf_len(nbuf);
  2206. data = qdf_nbuf_data(nbuf);
  2207. status = qdf_nbuf_map(vdev->osdev, nbuf,
  2208. QDF_DMA_TO_DEVICE);
  2209. if (status) {
  2210. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2211. "Mapping failure Error:%d", status);
  2212. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  2213. return 0;
  2214. }
  2215. paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
  2216. /*preparing data fragment*/
  2217. data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
  2218. data_frag.paddr_lo = (uint32_t)paddr_data;
  2219. data_frag.paddr_hi = ((uint64_t)paddr_data & 0xffffffff00000000) >> 32;
  2220. data_frag.len = len - DP_MAC_ADDR_LEN;
  2221. for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
  2222. dstmac = newmac[new_mac_idx];
  2223. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2224. "added mac addr (%pM)", dstmac);
  2225. /* Check for NULL Mac Address */
  2226. if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
  2227. continue;
  2228. /* frame to self mac. skip */
  2229. if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
  2230. continue;
  2231. /*
  2232. * TODO: optimize to avoid malloc in per-packet path
  2233. * For eg. seg_pool can be made part of vdev structure
  2234. */
  2235. seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
  2236. if (!seg_info_new) {
  2237. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2238. "alloc failed");
  2239. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
  2240. goto fail_seg_alloc;
  2241. }
  2242. mc_uc_buf = dp_tx_me_alloc_buf(pdev);
  2243. if (mc_uc_buf == NULL)
  2244. goto fail_buf_alloc;
  2245. /*
  2246. * TODO: Check if we need to clone the nbuf
  2247. * Or can we just use the reference for all cases
  2248. */
  2249. if (new_mac_idx < (new_mac_cnt - 1)) {
  2250. nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
  2251. if (nbuf_clone == NULL) {
  2252. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
  2253. goto fail_clone;
  2254. }
  2255. } else {
  2256. /*
  2257. * Update the ref
  2258. * to account for frame sent without cloning
  2259. */
  2260. qdf_nbuf_ref(nbuf);
  2261. nbuf_clone = nbuf;
  2262. }
  2263. qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
  2264. status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
  2265. QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
  2266. &paddr_mcbuf);
  2267. if (status) {
  2268. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2269. "Mapping failure Error:%d", status);
  2270. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  2271. goto fail_map;
  2272. }
  2273. seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
  2274. seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
  2275. seg_info_new->frags[0].paddr_hi =
  2276. ((u64)paddr_mcbuf & 0xffffffff00000000) >> 32;
  2277. seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
  2278. seg_info_new->frags[1] = data_frag;
  2279. seg_info_new->nbuf = nbuf_clone;
  2280. seg_info_new->frag_cnt = 2;
  2281. seg_info_new->total_len = len;
  2282. seg_info_new->next = NULL;
  2283. if (seg_info_head == NULL)
  2284. seg_info_head = seg_info_new;
  2285. else
  2286. seg_info_tail->next = seg_info_new;
  2287. seg_info_tail = seg_info_new;
  2288. }
  2289. if (!seg_info_head)
  2290. return 0;
  2291. msdu_info.u.sg_info.curr_seg = seg_info_head;
  2292. msdu_info.num_seg = new_mac_cnt;
  2293. msdu_info.frm_type = dp_tx_frm_me;
  2294. DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
  2295. dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2296. while (seg_info_head->next) {
  2297. seg_info_new = seg_info_head;
  2298. seg_info_head = seg_info_head->next;
  2299. qdf_mem_free(seg_info_new);
  2300. }
  2301. qdf_mem_free(seg_info_head);
  2302. return new_mac_cnt;
  2303. fail_map:
  2304. qdf_nbuf_free(nbuf_clone);
  2305. fail_clone:
  2306. dp_tx_me_free_buf(pdev, mc_uc_buf);
  2307. fail_buf_alloc:
  2308. qdf_mem_free(seg_info_new);
  2309. fail_seg_alloc:
  2310. dp_tx_me_mem_free(pdev, seg_info_head);
  2311. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  2312. return 0;
  2313. }