ol_tx.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368
  1. /*
  2. * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /* OS abstraction libraries */
  27. #include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
  28. #include <cdf_atomic.h> /* cdf_atomic_read, etc. */
  29. #include <cdf_util.h> /* cdf_unlikely */
  30. /* APIs for other modules */
  31. #include <htt.h> /* HTT_TX_EXT_TID_MGMT */
  32. #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
  33. #include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
  34. #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
  35. /* internal header files relevant for all systems */
  36. #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
  37. #include <ol_txrx_types.h> /* pdev stats */
  38. #include <ol_tx_desc.h> /* ol_tx_desc */
  39. #include <ol_tx_send.h> /* ol_tx_send */
  40. #include <ol_txrx.h>
  41. /* internal header files relevant only for HL systems */
  42. #include <ol_tx_queue.h> /* ol_tx_enqueue */
  43. /* internal header files relevant only for specific systems (Pronto) */
  44. #include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
  45. #include <ol_tx.h>
  46. #ifdef WLAN_FEATURE_FASTPATH
  47. #include <hif.h> /* HIF_DEVICE */
  48. #include <htc_api.h> /* Layering violation, but required for fast path */
  49. #include <htt_internal.h>
  50. #include <htt_types.h> /* htc_endpoint */
  51. int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
  52. unsigned int num_msdus, unsigned int transfer_id);
  53. #endif /* WLAN_FEATURE_FASTPATH */
  54. /*
  55. * The TXRX module doesn't accept tx frames unless the target has
  56. * enough descriptors for them.
  57. * For LL, the TXRX descriptor pool is sized to match the target's
  58. * descriptor pool. Hence, if the descriptor allocation in TXRX
  59. * succeeds, that guarantees that the target has room to accept
  60. * the new tx frame.
  61. */
  62. #define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
  63. do { \
  64. struct ol_txrx_pdev_t *pdev = vdev->pdev; \
  65. (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
  66. tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
  67. if (cdf_unlikely(!tx_desc)) { \
  68. TXRX_STATS_MSDU_LIST_INCR( \
  69. pdev, tx.dropped.host_reject, msdu); \
  70. return msdu; /* the list of unaccepted MSDUs */ \
  71. } \
  72. } while (0)
  73. #define ol_tx_prepare_tso(vdev, msdu, msdu_info) \
  74. do { \
  75. msdu_info.tso_info.curr_seg = NULL; \
  76. if (cdf_nbuf_is_tso(msdu)) { \
  77. int num_seg = cdf_nbuf_get_tso_num_seg(msdu); \
  78. msdu_info.tso_info.tso_seg_list = NULL; \
  79. msdu_info.tso_info.num_segs = num_seg; \
  80. while (num_seg) { \
  81. struct cdf_tso_seg_elem_t *tso_seg = \
  82. ol_tso_alloc_segment(vdev->pdev); \
  83. if (tso_seg) { \
  84. tso_seg->next = \
  85. msdu_info.tso_info.tso_seg_list; \
  86. msdu_info.tso_info.tso_seg_list \
  87. = tso_seg; \
  88. num_seg--; \
  89. } else {\
  90. cdf_print("TSO seg alloc failed!\n"); \
  91. } \
  92. } \
  93. cdf_nbuf_get_tso_info(vdev->pdev->osdev, \
  94. msdu, &msdu_info.tso_info); \
  95. msdu_info.tso_info.curr_seg = \
  96. msdu_info.tso_info.tso_seg_list; \
  97. num_seg = msdu_info.tso_info.num_segs; \
  98. } else { \
  99. msdu_info.tso_info.is_tso = 0; \
  100. msdu_info.tso_info.num_segs = 1; \
  101. } \
  102. } while (0)
  103. /**
  104. * ol_tx_send_data_frame() - send data frame
  105. * @sta_id: sta id
  106. * @skb: skb
  107. * @proto_type: proto type
  108. *
  109. * Return: skb/NULL for success
  110. */
  111. cdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, cdf_nbuf_t skb,
  112. uint8_t proto_type)
  113. {
  114. void *cdf_ctx = cds_get_context(CDF_MODULE_ID_CDF_DEVICE);
  115. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  116. struct ol_txrx_peer_t *peer;
  117. cdf_nbuf_t ret;
  118. CDF_STATUS status;
  119. if (cdf_unlikely(!pdev)) {
  120. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
  121. "%s:pdev is null", __func__);
  122. return skb;
  123. }
  124. if (cdf_unlikely(!cdf_ctx)) {
  125. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  126. "%s:cdf_ctx is null", __func__);
  127. return skb;
  128. }
  129. if (sta_id >= WLAN_MAX_STA_COUNT) {
  130. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
  131. "%s:Invalid sta id", __func__);
  132. return skb;
  133. }
  134. peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
  135. if (!peer) {
  136. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
  137. "%s:Invalid peer", __func__);
  138. return skb;
  139. }
  140. if (peer->state < ol_txrx_peer_state_conn) {
  141. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
  142. "%s: station to be yet registered..dropping pkt", __func__);
  143. return skb;
  144. }
  145. status = cdf_nbuf_map_single(cdf_ctx, skb, CDF_DMA_TO_DEVICE);
  146. if (cdf_unlikely(status != CDF_STATUS_SUCCESS)) {
  147. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
  148. "%s: nbuf map failed", __func__);
  149. return skb;
  150. }
  151. cdf_nbuf_trace_set_proto_type(skb, proto_type);
  152. if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
  153. && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
  154. && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
  155. cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
  156. /* Terminate the (single-element) list of tx frames */
  157. cdf_nbuf_set_next(skb, NULL);
  158. ret = OL_TX_LL(peer->vdev, skb);
  159. if (ret) {
  160. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
  161. "%s: Failed to tx", __func__);
  162. cdf_nbuf_unmap_single(cdf_ctx, ret, CDF_DMA_TO_DEVICE);
  163. return ret;
  164. }
  165. return NULL;
  166. }
  167. #ifdef IPA_OFFLOAD
  168. /**
  169. * ol_tx_send_ipa_data_frame() - send IPA data frame
  170. * @vdev: vdev
  171. * @skb: skb
  172. *
  173. * Return: skb/ NULL is for success
  174. */
  175. cdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
  176. cdf_nbuf_t skb)
  177. {
  178. ol_txrx_pdev_handle pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  179. cdf_nbuf_t ret;
  180. if (cdf_unlikely(!pdev)) {
  181. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  182. "%s: pdev is NULL", __func__);
  183. return skb;
  184. }
  185. if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
  186. && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
  187. && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
  188. cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
  189. /* Terminate the (single-element) list of tx frames */
  190. cdf_nbuf_set_next(skb, NULL);
  191. ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
  192. if (ret) {
  193. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  194. "%s: Failed to tx", __func__);
  195. return ret;
  196. }
  197. return NULL;
  198. }
  199. #endif
  200. #if defined(FEATURE_TSO)
  201. cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
  202. {
  203. cdf_nbuf_t msdu = msdu_list;
  204. struct ol_txrx_msdu_info_t msdu_info;
  205. msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
  206. msdu_info.htt.action.tx_comp_req = 0;
  207. /*
  208. * The msdu_list variable could be used instead of the msdu var,
  209. * but just to clarify which operations are done on a single MSDU
  210. * vs. a list of MSDUs, use a distinct variable for single MSDUs
  211. * within the list.
  212. */
  213. while (msdu) {
  214. cdf_nbuf_t next;
  215. struct ol_tx_desc_t *tx_desc;
  216. int segments = 1;
  217. msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
  218. msdu_info.peer = NULL;
  219. ol_tx_prepare_tso(vdev, msdu, msdu_info);
  220. segments = msdu_info.tso_info.num_segs;
  221. /*
  222. * The netbuf may get linked into a different list inside the
  223. * ol_tx_send function, so store the next pointer before the
  224. * tx_send call.
  225. */
  226. next = cdf_nbuf_next(msdu);
  227. /* init the current segment to the 1st segment in the list */
  228. while (segments) {
  229. if (msdu_info.tso_info.curr_seg)
  230. NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
  231. curr_seg->seg.tso_frags[0].paddr_low_32;
  232. segments--;
  233. /**
  234. * if this is a jumbo nbuf, then increment the number
  235. * of nbuf users for each additional segment of the msdu.
  236. * This will ensure that the skb is freed only after
  237. * receiving tx completion for all segments of an nbuf
  238. */
  239. if (segments)
  240. cdf_nbuf_inc_users(msdu);
  241. ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
  242. /*
  243. * If debug display is enabled, show the meta-data being
  244. * downloaded to the target via the HTT tx descriptor.
  245. */
  246. htt_tx_desc_display(tx_desc->htt_tx_desc);
  247. ol_tx_send(vdev->pdev, tx_desc, msdu);
  248. if (msdu_info.tso_info.curr_seg) {
  249. msdu_info.tso_info.curr_seg =
  250. msdu_info.tso_info.curr_seg->next;
  251. }
  252. cdf_nbuf_dec_num_frags(msdu);
  253. if (msdu_info.tso_info.is_tso) {
  254. TXRX_STATS_TSO_INC_SEG(vdev->pdev);
  255. TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
  256. }
  257. } /* while segments */
  258. msdu = next;
  259. if (msdu_info.tso_info.is_tso) {
  260. TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
  261. TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
  262. }
  263. } /* while msdus */
  264. return NULL; /* all MSDUs were accepted */
  265. }
  266. #else /* TSO */
  267. cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
  268. {
  269. cdf_nbuf_t msdu = msdu_list;
  270. struct ol_txrx_msdu_info_t msdu_info;
  271. msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
  272. msdu_info.htt.action.tx_comp_req = 0;
  273. msdu_info.tso_info.is_tso = 0;
  274. /*
  275. * The msdu_list variable could be used instead of the msdu var,
  276. * but just to clarify which operations are done on a single MSDU
  277. * vs. a list of MSDUs, use a distinct variable for single MSDUs
  278. * within the list.
  279. */
  280. while (msdu) {
  281. cdf_nbuf_t next;
  282. struct ol_tx_desc_t *tx_desc;
  283. msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
  284. msdu_info.peer = NULL;
  285. ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
  286. /*
  287. * If debug display is enabled, show the meta-data being
  288. * downloaded to the target via the HTT tx descriptor.
  289. */
  290. htt_tx_desc_display(tx_desc->htt_tx_desc);
  291. /*
  292. * The netbuf may get linked into a different list inside the
  293. * ol_tx_send function, so store the next pointer before the
  294. * tx_send call.
  295. */
  296. next = cdf_nbuf_next(msdu);
  297. ol_tx_send(vdev->pdev, tx_desc, msdu);
  298. msdu = next;
  299. }
  300. return NULL; /* all MSDUs were accepted */
  301. }
  302. #endif /* TSO */
  303. #ifdef WLAN_FEATURE_FASTPATH
  304. /**
  305. * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
  306. *
  307. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  308. * inforamtion.
  309. *
  310. * @pdev: pointer to ol pdev handle
  311. * @vdev: pointer to ol vdev handle
  312. * @msdu: linked list of msdu packets
  313. * @pkt_download_len: packet download length
  314. * @ep_id: endpoint ID
  315. * @msdu_info: Handle to msdu_info
  316. *
  317. * Return: Pointer to Tx descriptor
  318. */
  319. static inline struct ol_tx_desc_t *
  320. ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
  321. ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu,
  322. uint32_t pkt_download_len, uint32_t ep_id,
  323. struct ol_txrx_msdu_info_t *msdu_info)
  324. {
  325. struct ol_tx_desc_t *tx_desc = NULL;
  326. uint32_t *htt_tx_desc;
  327. void *htc_hdr_vaddr;
  328. u_int32_t num_frags, i;
  329. tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
  330. if (cdf_unlikely(!tx_desc))
  331. return NULL;
  332. tx_desc->netbuf = msdu;
  333. if (msdu_info->tso_info.is_tso) {
  334. tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
  335. tx_desc->pkt_type = ol_tx_frm_tso;
  336. TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
  337. } else {
  338. tx_desc->pkt_type = ol_tx_frm_std;
  339. }
  340. htt_tx_desc = tx_desc->htt_tx_desc;
  341. /* Make sure frags num is set to 0 */
  342. /*
  343. * Do this here rather than in hardstart, so
  344. * that we can hopefully take only one cache-miss while
  345. * accessing skb->cb.
  346. */
  347. /* HTT Header */
  348. /* TODO : Take care of multiple fragments */
  349. /* TODO: Precompute and store paddr in ol_tx_desc_t */
  350. /* Virtual address of the HTT/HTC header, added by driver */
  351. htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
  352. htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
  353. tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
  354. &msdu_info->htt, &msdu_info->tso_info,
  355. NULL, vdev->opmode == wlan_op_mode_ocb);
  356. num_frags = cdf_nbuf_get_num_frags(msdu);
  357. /* num_frags are expected to be 2 max */
  358. num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS) ?
  359. CVG_NBUF_MAX_EXTRA_FRAGS : num_frags;
  360. #if defined(HELIUMPLUS_PADDR64)
  361. /*
  362. * Use num_frags - 1, since 1 frag is used to store
  363. * the HTT/HTC descriptor
  364. * Refer to htt_tx_desc_init()
  365. */
  366. htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
  367. num_frags - 1);
  368. #else /* ! defined(HELIUMPLUSPADDR64) */
  369. htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
  370. num_frags-1);
  371. #endif /* defined(HELIUMPLUS_PADDR64) */
  372. if (msdu_info->tso_info.is_tso) {
  373. htt_tx_desc_fill_tso_info(pdev->htt_pdev,
  374. tx_desc->htt_frag_desc, &msdu_info->tso_info);
  375. TXRX_STATS_TSO_SEG_UPDATE(pdev,
  376. msdu_info->tso_info.curr_seg->seg);
  377. } else {
  378. for (i = 1; i < num_frags; i++) {
  379. cdf_size_t frag_len;
  380. u_int32_t frag_paddr;
  381. frag_len = cdf_nbuf_get_frag_len(msdu, i);
  382. frag_paddr = cdf_nbuf_get_frag_paddr_lo(msdu, i);
  383. #if defined(HELIUMPLUS_PADDR64)
  384. htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
  385. i - 1, frag_paddr, frag_len);
  386. #if defined(HELIUMPLUS_DEBUG)
  387. cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
  388. __func__, __LINE__, tx_desc->htt_frag_desc,
  389. frag_paddr, frag_len);
  390. dump_pkt(netbuf, frag_paddr, 64);
  391. #endif /* HELIUMPLUS_DEBUG */
  392. #else /* ! defined(HELIUMPLUSPADDR64) */
  393. htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
  394. i - 1, frag_paddr, frag_len);
  395. #endif /* defined(HELIUMPLUS_PADDR64) */
  396. }
  397. }
  398. /*
  399. * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
  400. * this is not required. We still have to mark the swap bit correctly,
  401. * when posting to the ring
  402. */
  403. /* Check to make sure, data download length is correct */
  404. /*
  405. * TODO : Can we remove this check and always download a fixed length ?
  406. * */
  407. if (cdf_unlikely(cdf_nbuf_len(msdu) < pkt_download_len))
  408. pkt_download_len = cdf_nbuf_len(msdu);
  409. /* Fill the HTC header information */
  410. /*
  411. * Passing 0 as the seq_no field, we can probably get away
  412. * with it for the time being, since this is not checked in f/w
  413. */
  414. /* TODO : Prefill this, look at multi-fragment case */
  415. HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
  416. return tx_desc;
  417. }
  418. #if defined(FEATURE_TSO)
  419. /**
  420. * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
  421. *
  422. * @vdev: handle to ol_txrx_vdev_t
  423. * @msdu_list: msdu list to be sent out.
  424. *
  425. * Return: on success return NULL, pointer to nbuf when it fails to send.
  426. */
  427. cdf_nbuf_t
  428. ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
  429. {
  430. cdf_nbuf_t msdu = msdu_list;
  431. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  432. uint32_t pkt_download_len =
  433. ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
  434. uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
  435. struct ol_txrx_msdu_info_t msdu_info;
  436. msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
  437. msdu_info.htt.action.tx_comp_req = 0;
  438. /*
  439. * The msdu_list variable could be used instead of the msdu var,
  440. * but just to clarify which operations are done on a single MSDU
  441. * vs. a list of MSDUs, use a distinct variable for single MSDUs
  442. * within the list.
  443. */
  444. while (msdu) {
  445. cdf_nbuf_t next;
  446. struct ol_tx_desc_t *tx_desc;
  447. int segments = 1;
  448. msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
  449. msdu_info.peer = NULL;
  450. ol_tx_prepare_tso(vdev, msdu, msdu_info);
  451. segments = msdu_info.tso_info.num_segs;
  452. /*
  453. * The netbuf may get linked into a different list
  454. * inside the ce_send_fast function, so store the next
  455. * pointer before the ce_send call.
  456. */
  457. next = cdf_nbuf_next(msdu);
  458. /* init the current segment to the 1st segment in the list */
  459. while (segments) {
  460. if (msdu_info.tso_info.curr_seg)
  461. NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
  462. curr_seg->seg.tso_frags[0].paddr_low_32;
  463. segments--;
  464. /**
  465. * if this is a jumbo nbuf, then increment the number
  466. * of nbuf users for each additional segment of the msdu.
  467. * This will ensure that the skb is freed only after
  468. * receiving tx completion for all segments of an nbuf
  469. */
  470. if (segments)
  471. cdf_nbuf_inc_users(msdu);
  472. msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
  473. msdu_info.htt.info.vdev_id = vdev->vdev_id;
  474. msdu_info.htt.action.cksum_offload =
  475. cdf_nbuf_get_tx_cksum(msdu);
  476. switch (cdf_nbuf_get_exemption_type(msdu)) {
  477. case CDF_NBUF_EXEMPT_NO_EXEMPTION:
  478. case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
  479. /* We want to encrypt this frame */
  480. msdu_info.htt.action.do_encrypt = 1;
  481. break;
  482. case CDF_NBUF_EXEMPT_ALWAYS:
  483. /* We don't want to encrypt this frame */
  484. msdu_info.htt.action.do_encrypt = 0;
  485. break;
  486. default:
  487. msdu_info.htt.action.do_encrypt = 1;
  488. cdf_assert(0);
  489. break;
  490. }
  491. tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
  492. pkt_download_len, ep_id,
  493. &msdu_info);
  494. if (cdf_likely(tx_desc)) {
  495. /*
  496. * If debug display is enabled, show the meta
  497. * data being downloaded to the target via the
  498. * HTT tx descriptor.
  499. */
  500. htt_tx_desc_display(tx_desc->htt_tx_desc);
  501. if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu,
  502. 1, ep_id))) {
  503. /*
  504. * The packet could not be sent.
  505. * Free the descriptor, return the
  506. * packet to the caller.
  507. */
  508. ol_tx_desc_free(pdev, tx_desc);
  509. return msdu;
  510. }
  511. if (msdu_info.tso_info.curr_seg) {
  512. msdu_info.tso_info.curr_seg =
  513. msdu_info.tso_info.curr_seg->next;
  514. }
  515. if (msdu_info.tso_info.is_tso) {
  516. cdf_nbuf_dec_num_frags(msdu);
  517. TXRX_STATS_TSO_INC_SEG(vdev->pdev);
  518. TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
  519. }
  520. } else {
  521. TXRX_STATS_MSDU_LIST_INCR(
  522. pdev, tx.dropped.host_reject, msdu);
  523. /* the list of unaccepted MSDUs */
  524. return msdu;
  525. }
  526. } /* while segments */
  527. msdu = next;
  528. if (msdu_info.tso_info.is_tso) {
  529. TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
  530. TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
  531. }
  532. } /* while msdus */
  533. return NULL; /* all MSDUs were accepted */
  534. }
  535. #else
  536. cdf_nbuf_t
  537. ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
  538. {
  539. cdf_nbuf_t msdu = msdu_list;
  540. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  541. uint32_t pkt_download_len =
  542. ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
  543. uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
  544. struct ol_txrx_msdu_info_t msdu_info;
  545. msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
  546. msdu_info.htt.action.tx_comp_req = 0;
  547. msdu_info.tso_info.is_tso = 0;
  548. /*
  549. * The msdu_list variable could be used instead of the msdu var,
  550. * but just to clarify which operations are done on a single MSDU
  551. * vs. a list of MSDUs, use a distinct variable for single MSDUs
  552. * within the list.
  553. */
  554. while (msdu) {
  555. cdf_nbuf_t next;
  556. struct ol_tx_desc_t *tx_desc;
  557. msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
  558. msdu_info.peer = NULL;
  559. msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
  560. msdu_info.htt.info.vdev_id = vdev->vdev_id;
  561. msdu_info.htt.action.cksum_offload =
  562. cdf_nbuf_get_tx_cksum(msdu);
  563. switch (cdf_nbuf_get_exemption_type(msdu)) {
  564. case CDF_NBUF_EXEMPT_NO_EXEMPTION:
  565. case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
  566. /* We want to encrypt this frame */
  567. msdu_info.htt.action.do_encrypt = 1;
  568. break;
  569. case CDF_NBUF_EXEMPT_ALWAYS:
  570. /* We don't want to encrypt this frame */
  571. msdu_info.htt.action.do_encrypt = 0;
  572. break;
  573. default:
  574. msdu_info.htt.action.do_encrypt = 1;
  575. cdf_assert(0);
  576. break;
  577. }
  578. tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
  579. pkt_download_len, ep_id,
  580. &msdu_info);
  581. if (cdf_likely(tx_desc)) {
  582. /*
  583. * If debug display is enabled, show the meta-data being
  584. * downloaded to the target via the HTT tx descriptor.
  585. */
  586. htt_tx_desc_display(tx_desc->htt_tx_desc);
  587. /*
  588. * The netbuf may get linked into a different list
  589. * inside the ce_send_fast function, so store the next
  590. * pointer before the ce_send call.
  591. */
  592. next = cdf_nbuf_next(msdu);
  593. if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
  594. ep_id))) {
  595. /* The packet could not be sent */
  596. /* Free the descriptor, return the packet to the
  597. * caller */
  598. ol_tx_desc_free(pdev, tx_desc);
  599. return msdu;
  600. }
  601. msdu = next;
  602. } else {
  603. TXRX_STATS_MSDU_LIST_INCR(
  604. pdev, tx.dropped.host_reject, msdu);
  605. return msdu; /* the list of unaccepted MSDUs */
  606. }
  607. }
  608. return NULL; /* all MSDUs were accepted */
  609. }
  610. #endif /* FEATURE_TSO */
  611. #endif /* WLAN_FEATURE_FASTPATH */
  612. #ifdef WLAN_FEATURE_FASTPATH
  613. /**
  614. * ol_tx_ll_wrapper() wrapper to ol_tx_ll
  615. *
  616. */
  617. static inline cdf_nbuf_t
  618. ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
  619. {
  620. struct ol_softc *hif_device =
  621. (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
  622. if (cdf_likely(hif_device && hif_device->fastpath_mode_on))
  623. msdu_list = ol_tx_ll_fast(vdev, msdu_list);
  624. else
  625. msdu_list = ol_tx_ll(vdev, msdu_list);
  626. return msdu_list;
  627. }
  628. #else
  629. static inline cdf_nbuf_t
  630. ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
  631. {
  632. return ol_tx_ll(vdev, msdu_list);
  633. }
  634. #endif /* WLAN_FEATURE_FASTPATH */
  635. #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
  636. #define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
  637. #define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
  638. static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
  639. {
  640. int max_to_accept;
  641. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  642. if (vdev->ll_pause.paused_reason) {
  643. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  644. return;
  645. }
  646. /*
  647. * Send as much of the backlog as possible, but leave some margin
  648. * of unallocated tx descriptors that can be used for new frames
  649. * being transmitted by other vdevs.
  650. * Ideally there would be a scheduler, which would not only leave
  651. * some margin for new frames for other vdevs, but also would
  652. * fairly apportion the tx descriptors between multiple vdevs that
  653. * have backlogs in their pause queues.
  654. * However, the fairness benefit of having a scheduler for frames
  655. * from multiple vdev's pause queues is not sufficient to outweigh
  656. * the extra complexity.
  657. */
  658. max_to_accept = vdev->pdev->tx_desc.num_free -
  659. OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
  660. while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
  661. cdf_nbuf_t tx_msdu;
  662. max_to_accept--;
  663. vdev->ll_pause.txq.depth--;
  664. tx_msdu = vdev->ll_pause.txq.head;
  665. if (tx_msdu) {
  666. vdev->ll_pause.txq.head = cdf_nbuf_next(tx_msdu);
  667. if (NULL == vdev->ll_pause.txq.head)
  668. vdev->ll_pause.txq.tail = NULL;
  669. cdf_nbuf_set_next(tx_msdu, NULL);
  670. NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
  671. NBUF_TX_PKT_TXRX_DEQUEUE);
  672. tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
  673. /*
  674. * It is unexpected that ol_tx_ll would reject the frame
  675. * since we checked that there's room for it, though
  676. * there's an infinitesimal possibility that between the
  677. * time we checked the room available and now, a
  678. * concurrent batch of tx frames used up all the room.
  679. * For simplicity, just drop the frame.
  680. */
  681. if (tx_msdu) {
  682. cdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
  683. CDF_DMA_TO_DEVICE);
  684. cdf_nbuf_tx_free(tx_msdu, NBUF_PKT_ERROR);
  685. }
  686. }
  687. }
  688. if (vdev->ll_pause.txq.depth) {
  689. cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
  690. cdf_softirq_timer_start(&vdev->ll_pause.timer,
  691. OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
  692. vdev->ll_pause.is_q_timer_on = true;
  693. if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
  694. vdev->ll_pause.q_overflow_cnt++;
  695. }
  696. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  697. }
  698. static cdf_nbuf_t
  699. ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
  700. cdf_nbuf_t msdu_list, uint8_t start_timer)
  701. {
  702. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  703. while (msdu_list &&
  704. vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
  705. cdf_nbuf_t next = cdf_nbuf_next(msdu_list);
  706. NBUF_UPDATE_TX_PKT_COUNT(msdu_list, NBUF_TX_PKT_TXRX_ENQUEUE);
  707. DPTRACE(cdf_dp_trace(msdu_list,
  708. CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
  709. (uint8_t *)(cdf_nbuf_data(msdu_list)),
  710. sizeof(cdf_nbuf_data(msdu_list))));
  711. vdev->ll_pause.txq.depth++;
  712. if (!vdev->ll_pause.txq.head) {
  713. vdev->ll_pause.txq.head = msdu_list;
  714. vdev->ll_pause.txq.tail = msdu_list;
  715. } else {
  716. cdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
  717. }
  718. vdev->ll_pause.txq.tail = msdu_list;
  719. msdu_list = next;
  720. }
  721. if (vdev->ll_pause.txq.tail)
  722. cdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
  723. if (start_timer) {
  724. cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
  725. cdf_softirq_timer_start(&vdev->ll_pause.timer,
  726. OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
  727. vdev->ll_pause.is_q_timer_on = true;
  728. }
  729. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  730. return msdu_list;
  731. }
  732. /*
  733. * Store up the tx frame in the vdev's tx queue if the vdev is paused.
  734. * If there are too many frames in the tx queue, reject it.
  735. */
  736. cdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
  737. {
  738. uint16_t eth_type;
  739. uint32_t paused_reason;
  740. if (msdu_list == NULL)
  741. return NULL;
  742. paused_reason = vdev->ll_pause.paused_reason;
  743. if (paused_reason) {
  744. if (cdf_unlikely((paused_reason &
  745. OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
  746. paused_reason)) {
  747. eth_type = (((struct ethernet_hdr_t *)
  748. cdf_nbuf_data(msdu_list))->
  749. ethertype[0] << 8) |
  750. (((struct ethernet_hdr_t *)
  751. cdf_nbuf_data(msdu_list))->ethertype[1]);
  752. if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
  753. msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
  754. return msdu_list;
  755. }
  756. }
  757. msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
  758. } else {
  759. if (vdev->ll_pause.txq.depth > 0 ||
  760. vdev->pdev->tx_throttle.current_throttle_level !=
  761. THROTTLE_LEVEL_0) {
  762. /* not paused, but there is a backlog of frms
  763. from a prior pause or throttle off phase */
  764. msdu_list = ol_tx_vdev_pause_queue_append(
  765. vdev, msdu_list, 0);
  766. /* if throttle is disabled or phase is "on",
  767. send the frame */
  768. if (vdev->pdev->tx_throttle.current_throttle_level ==
  769. THROTTLE_LEVEL_0 ||
  770. vdev->pdev->tx_throttle.current_throttle_phase ==
  771. THROTTLE_PHASE_ON) {
  772. /* send as many frames as possible
  773. from the vdevs backlog */
  774. ol_tx_vdev_ll_pause_queue_send_base(vdev);
  775. }
  776. } else {
  777. /* not paused, no throttle and no backlog -
  778. send the new frames */
  779. msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
  780. }
  781. }
  782. return msdu_list;
  783. }
  784. /*
  785. * Run through the transmit queues for all the vdevs and
  786. * send the pending frames
  787. */
  788. void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
  789. {
  790. int max_to_send; /* tracks how many frames have been sent */
  791. cdf_nbuf_t tx_msdu;
  792. struct ol_txrx_vdev_t *vdev = NULL;
  793. uint8_t more;
  794. if (NULL == pdev)
  795. return;
  796. if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
  797. return;
  798. /* ensure that we send no more than tx_threshold frames at once */
  799. max_to_send = pdev->tx_throttle.tx_threshold;
  800. /* round robin through the vdev queues for the given pdev */
  801. /* Potential improvement: download several frames from the same vdev
  802. at a time, since it is more likely that those frames could be
  803. aggregated together, remember which vdev was serviced last,
  804. so the next call this function can resume the round-robin
  805. traversing where the current invocation left off */
  806. do {
  807. more = 0;
  808. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  809. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  810. if (vdev->ll_pause.txq.depth) {
  811. if (vdev->ll_pause.paused_reason) {
  812. cdf_spin_unlock_bh(&vdev->ll_pause.
  813. mutex);
  814. continue;
  815. }
  816. tx_msdu = vdev->ll_pause.txq.head;
  817. if (NULL == tx_msdu) {
  818. cdf_spin_unlock_bh(&vdev->ll_pause.
  819. mutex);
  820. continue;
  821. }
  822. max_to_send--;
  823. vdev->ll_pause.txq.depth--;
  824. vdev->ll_pause.txq.head =
  825. cdf_nbuf_next(tx_msdu);
  826. if (NULL == vdev->ll_pause.txq.head)
  827. vdev->ll_pause.txq.tail = NULL;
  828. cdf_nbuf_set_next(tx_msdu, NULL);
  829. tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
  830. /*
  831. * It is unexpected that ol_tx_ll would reject
  832. * the frame, since we checked that there's
  833. * room for it, though there's an infinitesimal
  834. * possibility that between the time we checked
  835. * the room available and now, a concurrent
  836. * batch of tx frames used up all the room.
  837. * For simplicity, just drop the frame.
  838. */
  839. if (tx_msdu) {
  840. cdf_nbuf_unmap(pdev->osdev, tx_msdu,
  841. CDF_DMA_TO_DEVICE);
  842. cdf_nbuf_tx_free(tx_msdu,
  843. NBUF_PKT_ERROR);
  844. }
  845. }
  846. /*check if there are more msdus to transmit */
  847. if (vdev->ll_pause.txq.depth)
  848. more = 1;
  849. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  850. }
  851. } while (more && max_to_send);
  852. vdev = NULL;
  853. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  854. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  855. if (vdev->ll_pause.txq.depth) {
  856. cdf_softirq_timer_cancel(&pdev->tx_throttle.tx_timer);
  857. cdf_softirq_timer_start(
  858. &pdev->tx_throttle.tx_timer,
  859. OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
  860. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  861. return;
  862. }
  863. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  864. }
  865. }
  866. void ol_tx_vdev_ll_pause_queue_send(void *context)
  867. {
  868. struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
  869. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  870. if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
  871. pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
  872. return;
  873. ol_tx_vdev_ll_pause_queue_send_base(vdev);
  874. }
  875. #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
  876. static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
  877. {
  878. return
  879. tx_spec &
  880. (ol_tx_spec_raw | ol_tx_spec_no_aggr | ol_tx_spec_no_encrypt);
  881. }
  882. static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
  883. {
  884. uint8_t sub_type = 0x1; /* 802.11 MAC header present */
  885. if (tx_spec & ol_tx_spec_no_aggr)
  886. sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
  887. if (tx_spec & ol_tx_spec_no_encrypt)
  888. sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
  889. if (tx_spec & ol_tx_spec_nwifi_no_encrypt)
  890. sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
  891. return sub_type;
  892. }
  893. cdf_nbuf_t
  894. ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
  895. enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
  896. {
  897. cdf_nbuf_t msdu = msdu_list;
  898. htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
  899. struct ol_txrx_msdu_info_t msdu_info;
  900. msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
  901. msdu_info.htt.action.tx_comp_req = 0;
  902. /*
  903. * The msdu_list variable could be used instead of the msdu var,
  904. * but just to clarify which operations are done on a single MSDU
  905. * vs. a list of MSDUs, use a distinct variable for single MSDUs
  906. * within the list.
  907. */
  908. while (msdu) {
  909. cdf_nbuf_t next;
  910. struct ol_tx_desc_t *tx_desc;
  911. msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
  912. msdu_info.peer = NULL;
  913. msdu_info.tso_info.is_tso = 0;
  914. ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
  915. /*
  916. * The netbuf may get linked into a different list inside the
  917. * ol_tx_send function, so store the next pointer before the
  918. * tx_send call.
  919. */
  920. next = cdf_nbuf_next(msdu);
  921. if (tx_spec != ol_tx_spec_std) {
  922. if (tx_spec & ol_tx_spec_no_free) {
  923. tx_desc->pkt_type = ol_tx_frm_no_free;
  924. } else if (tx_spec & ol_tx_spec_tso) {
  925. tx_desc->pkt_type = ol_tx_frm_tso;
  926. } else if (tx_spec & ol_tx_spec_nwifi_no_encrypt) {
  927. uint8_t sub_type =
  928. ol_txrx_tx_raw_subtype(tx_spec);
  929. htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
  930. htt_pkt_type_native_wifi,
  931. sub_type);
  932. } else if (ol_txrx_tx_is_raw(tx_spec)) {
  933. /* different types of raw frames */
  934. uint8_t sub_type =
  935. ol_txrx_tx_raw_subtype(tx_spec);
  936. htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
  937. htt_pkt_type_raw, sub_type);
  938. }
  939. }
  940. /*
  941. * If debug display is enabled, show the meta-data being
  942. * downloaded to the target via the HTT tx descriptor.
  943. */
  944. htt_tx_desc_display(tx_desc->htt_tx_desc);
  945. ol_tx_send(vdev->pdev, tx_desc, msdu);
  946. msdu = next;
  947. }
  948. return NULL; /* all MSDUs were accepted */
  949. }
  950. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  951. #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
  952. do { \
  953. if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
  954. cdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
  955. ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
  956. if (tx_msdu_info.peer) { \
  957. /* remove the peer reference added above */ \
  958. ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
  959. } \
  960. goto MSDU_LOOP_BOTTOM; \
  961. } \
  962. } while (0)
  963. #else
  964. #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
  965. #endif
  966. /* tx filtering is handled within the target FW */
  967. #define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
  968. /**
  969. * parse_ocb_tx_header() - Function to check for OCB
  970. * TX control header on a packet and extract it if present
  971. *
  972. * @msdu: Pointer to OS packet (cdf_nbuf_t)
  973. */
  974. #define OCB_HEADER_VERSION 1
  975. bool parse_ocb_tx_header(cdf_nbuf_t msdu,
  976. struct ocb_tx_ctrl_hdr_t *tx_ctrl)
  977. {
  978. struct ether_header *eth_hdr_p;
  979. struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
  980. /* Check if TX control header is present */
  981. eth_hdr_p = (struct ether_header *) cdf_nbuf_data(msdu);
  982. if (eth_hdr_p->ether_type != CDF_SWAP_U16(ETHERTYPE_OCB_TX))
  983. /* TX control header is not present. Nothing to do.. */
  984. return true;
  985. /* Remove the ethernet header */
  986. cdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
  987. /* Parse the TX control header */
  988. tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) cdf_nbuf_data(msdu);
  989. if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
  990. if (tx_ctrl)
  991. cdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
  992. sizeof(*tx_ctrl_hdr));
  993. } else {
  994. /* The TX control header is invalid. */
  995. return false;
  996. }
  997. /* Remove the TX control header */
  998. cdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
  999. return true;
  1000. }
  1001. cdf_nbuf_t
  1002. ol_tx_non_std(ol_txrx_vdev_handle vdev,
  1003. enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
  1004. {
  1005. return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
  1006. }
  1007. void
  1008. ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
  1009. ol_txrx_data_tx_cb callback, void *ctxt)
  1010. {
  1011. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  1012. pdev->tx_data_callback.func = callback;
  1013. pdev->tx_data_callback.ctxt = ctxt;
  1014. }
  1015. void
  1016. ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
  1017. uint8_t type,
  1018. ol_txrx_mgmt_tx_cb download_cb,
  1019. ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
  1020. {
  1021. TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
  1022. pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
  1023. pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
  1024. pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
  1025. }
  1026. #if defined(HELIUMPLUS_PADDR64)
  1027. void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
  1028. {
  1029. uint32_t *frag_ptr_i_p;
  1030. int i;
  1031. cdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
  1032. tx_desc, tx_desc->id);
  1033. cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%x\n",
  1034. tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
  1035. cdf_print("%s %d: Fragment Descriptor 0x%p\n",
  1036. __func__, __LINE__, tx_desc->htt_frag_desc);
  1037. /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
  1038. is already de-referrable (=> in virtual address space) */
  1039. frag_ptr_i_p = tx_desc->htt_frag_desc;
  1040. /* Dump 6 words of TSO flags */
  1041. print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
  1042. DUMP_PREFIX_NONE, 8, 4,
  1043. frag_ptr_i_p, 24, true);
  1044. frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
  1045. i = 0;
  1046. while (*frag_ptr_i_p) {
  1047. print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
  1048. DUMP_PREFIX_NONE, 8, 4,
  1049. frag_ptr_i_p, 8, true);
  1050. i++;
  1051. if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
  1052. break;
  1053. else /* jump to next pointer - skip length */
  1054. frag_ptr_i_p += 2;
  1055. }
  1056. return;
  1057. }
  1058. #endif /* HELIUMPLUS_PADDR64 */
  1059. int
  1060. ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
  1061. cdf_nbuf_t tx_mgmt_frm,
  1062. uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
  1063. {
  1064. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  1065. struct ol_tx_desc_t *tx_desc;
  1066. struct ol_txrx_msdu_info_t tx_msdu_info;
  1067. tx_msdu_info.tso_info.is_tso = 0;
  1068. tx_msdu_info.htt.action.use_6mbps = use_6mbps;
  1069. tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
  1070. tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
  1071. tx_msdu_info.htt.action.do_tx_complete =
  1072. pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
  1073. /*
  1074. * FIX THIS: l2_hdr_type should only specify L2 header type
  1075. * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
  1076. * that is a combination of L2 header type and 802.11 frame type.
  1077. * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
  1078. * But if the 802.11 frame type is "data", then the HTT pkt type is
  1079. * the L2 header type (more or less): 802.3 vs. Native WiFi
  1080. * (basic 802.11).
  1081. * (Or the header type can be "raw", which is any version of the 802.11
  1082. * header, and also implies that some of the offloaded tx data
  1083. * processing steps may not apply.)
  1084. * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
  1085. * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
  1086. * needs to overload the l2_hdr_type to indicate whether the frame is
  1087. * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
  1088. * To fix this, the msdu_info's l2_hdr_type should be left specifying
  1089. * just the L2 header type. For mgmt frames, there should be a
  1090. * separate function to patch the HTT pkt type to store a "mgmt" value
  1091. * rather than the L2 header type. Then the HTT pkt type can be
  1092. * programmed efficiently for data frames, and the msdu_info's
  1093. * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
  1094. * frame type rather than the L2 header type.
  1095. */
  1096. /*
  1097. * FIX THIS: remove duplication of htt_frm_type_mgmt and
  1098. * htt_pkt_type_mgmt
  1099. * The htt module expects a "enum htt_pkt_type" value.
  1100. * The htt_dxe module expects a "enum htt_frm_type" value.
  1101. * This needs to be cleaned up, so both versions of htt use a
  1102. * consistent method of specifying the frame type.
  1103. */
  1104. #ifdef QCA_SUPPORT_INTEGRATED_SOC
  1105. /* tx mgmt frames always come with a 802.11 header */
  1106. tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
  1107. tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
  1108. #else
  1109. tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
  1110. tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
  1111. #endif
  1112. tx_msdu_info.peer = NULL;
  1113. cdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, CDF_DMA_TO_DEVICE);
  1114. /* For LL tx_comp_req is not used so initialized to 0 */
  1115. tx_msdu_info.htt.action.tx_comp_req = 0;
  1116. tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
  1117. /* FIX THIS -
  1118. * The FW currently has trouble using the host's fragments table
  1119. * for management frames. Until this is fixed, rather than
  1120. * specifying the fragment table to the FW, specify just the
  1121. * address of the initial fragment.
  1122. */
  1123. #if defined(HELIUMPLUS_PADDR64)
  1124. /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
  1125. tx_desc); */
  1126. #endif /* defined(HELIUMPLUS_PADDR64) */
  1127. if (tx_desc) {
  1128. /*
  1129. * Following the call to ol_tx_desc_ll, frag 0 is the
  1130. * HTT tx HW descriptor, and the frame payload is in
  1131. * frag 1.
  1132. */
  1133. htt_tx_desc_frags_table_set(
  1134. pdev->htt_pdev,
  1135. tx_desc->htt_tx_desc,
  1136. cdf_nbuf_get_frag_paddr_lo(tx_mgmt_frm, 1),
  1137. 0, 0);
  1138. #if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
  1139. dump_frag_desc(
  1140. "after htt_tx_desc_frags_table_set",
  1141. tx_desc);
  1142. #endif /* defined(HELIUMPLUS_PADDR64) */
  1143. }
  1144. if (!tx_desc) {
  1145. cdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
  1146. CDF_DMA_TO_DEVICE);
  1147. return -EINVAL; /* can't accept the tx mgmt frame */
  1148. }
  1149. TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
  1150. TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
  1151. tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
  1152. htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
  1153. NBUF_SET_PACKET_TRACK(tx_desc->netbuf, NBUF_TX_PKT_MGMT_TRACK);
  1154. ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
  1155. htt_pkt_type_mgmt);
  1156. return 0; /* accepted the tx mgmt frame */
  1157. }
  1158. void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
  1159. {
  1160. htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
  1161. }
  1162. cdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
  1163. cdf_nbuf_t msdu, uint16_t peer_id)
  1164. {
  1165. struct ol_tx_desc_t *tx_desc;
  1166. struct ol_txrx_msdu_info_t msdu_info;
  1167. msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
  1168. msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
  1169. msdu_info.peer = NULL;
  1170. msdu_info.htt.action.tx_comp_req = 0;
  1171. msdu_info.tso_info.is_tso = 0;
  1172. ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
  1173. HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
  1174. htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
  1175. ol_tx_send(vdev->pdev, tx_desc, msdu);
  1176. return NULL;
  1177. }
  1178. #if defined(FEATURE_TSO)
  1179. void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
  1180. {
  1181. int i;
  1182. struct cdf_tso_seg_elem_t *c_element;
  1183. c_element = cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
  1184. pdev->tso_seg_pool.freelist = c_element;
  1185. for (i = 0; i < (num_seg - 1); i++) {
  1186. c_element->next =
  1187. cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
  1188. c_element = c_element->next;
  1189. c_element->next = NULL;
  1190. }
  1191. pdev->tso_seg_pool.pool_size = num_seg;
  1192. cdf_spinlock_init(&pdev->tso_seg_pool.tso_mutex);
  1193. }
  1194. void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
  1195. {
  1196. int i;
  1197. struct cdf_tso_seg_elem_t *c_element;
  1198. struct cdf_tso_seg_elem_t *temp;
  1199. cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
  1200. c_element = pdev->tso_seg_pool.freelist;
  1201. for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
  1202. temp = c_element->next;
  1203. cdf_mem_free(c_element);
  1204. c_element = temp;
  1205. if (!c_element)
  1206. break;
  1207. }
  1208. pdev->tso_seg_pool.freelist = NULL;
  1209. pdev->tso_seg_pool.num_free = 0;
  1210. pdev->tso_seg_pool.pool_size = 0;
  1211. cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
  1212. cdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
  1213. }
  1214. #endif /* FEATURE_TSO */