ol_tx_send.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. /*
  2. * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <cdf_atomic.h> /* cdf_atomic_inc, etc. */
  27. #include <cdf_lock.h> /* cdf_os_spinlock */
  28. #include <cdf_time.h> /* cdf_system_ticks, etc. */
  29. #include <cdf_nbuf.h> /* cdf_nbuf_t */
  30. #include <cdf_net_types.h> /* ADF_NBUF_TX_EXT_TID_INVALID */
  31. #include <cds_queue.h> /* TAILQ */
  32. #ifdef QCA_COMPUTE_TX_DELAY
  33. #include <ieee80211.h> /* ieee80211_frame, etc. */
  34. #include <enet.h> /* ethernet_hdr_t, etc. */
  35. #include <ipv6_defs.h> /* ipv6_traffic_class */
  36. #endif
  37. #include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */
  38. #include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */
  39. #include <ol_txrx_htt_api.h> /* htt_tx_status */
  40. #include <ol_ctrl_txrx_api.h>
  41. #include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */
  42. #include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
  43. #ifdef QCA_COMPUTE_TX_DELAY
  44. #endif
  45. #include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
  46. #include <ol_osif_txrx_api.h>
  47. #include <ol_tx.h> /* ol_tx_reinject */
  48. #include <ol_cfg.h> /* ol_cfg_is_high_latency */
  49. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  50. #include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
  51. #endif
  52. #ifdef TX_CREDIT_RECLAIM_SUPPORT
  53. #define OL_TX_CREDIT_RECLAIM(pdev) \
  54. do { \
  55. if (cdf_atomic_read(&pdev->target_tx_credit) < \
  56. ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) { \
  57. ol_osif_ath_tasklet(pdev->osdev); \
  58. } \
  59. } while (0)
  60. #else
  61. #define OL_TX_CREDIT_RECLAIM(pdev)
  62. #endif /* TX_CREDIT_RECLAIM_SUPPORT */
  63. #if defined(TX_CREDIT_RECLAIM_SUPPORT)
  64. /*
  65. * HL needs to keep track of the amount of credit available to download
  66. * tx frames to the target - the download scheduler decides when to
  67. * download frames, and which frames to download, based on the credit
  68. * availability.
  69. * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
  70. * of the target_tx_credit, to determine when to poll for tx completion
  71. * messages.
  72. */
  73. #define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) \
  74. cdf_atomic_add( \
  75. factor * htt_tx_msdu_credit(msdu), &pdev->target_tx_credit)
  76. #define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) \
  77. OL_TX_TARGET_CREDIT_ADJUST(-1, pdev, msdu)
  78. #define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) \
  79. OL_TX_TARGET_CREDIT_ADJUST(1, pdev, msdu)
  80. #define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) \
  81. cdf_atomic_add(-1 * delta, &pdev->target_tx_credit)
  82. #define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) \
  83. cdf_atomic_add(delta, &pdev->target_tx_credit)
  84. #else
  85. /*
  86. * LL does not need to keep track of target credit.
  87. * Since the host tx descriptor pool size matches the target's,
  88. * we know the target has space for the new tx frame if the host's
  89. * tx descriptor allocation succeeded.
  90. */
  91. #define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) /* no-op */
  92. #define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) /* no-op */
  93. #define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) /* no-op */
  94. #define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) /* no-op */
  95. #define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) /* no-op */
  96. #endif
  97. #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
  98. #define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev) \
  99. do { \
  100. struct ol_txrx_vdev_t *vdev; \
  101. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { \
  102. if (cdf_atomic_read(&vdev->os_q_paused) && \
  103. (vdev->tx_fl_hwm != 0)) { \
  104. cdf_spin_lock(&pdev->tx_mutex); \
  105. if (pdev->tx_desc.num_free > \
  106. vdev->tx_fl_hwm) { \
  107. cdf_atomic_set(&vdev->os_q_paused, 0); \
  108. cdf_spin_unlock(&pdev->tx_mutex); \
  109. ol_txrx_flow_control_cb(vdev, true);\
  110. } \
  111. else { \
  112. cdf_spin_unlock(&pdev->tx_mutex); \
  113. } \
  114. } \
  115. } \
  116. } while (0)
  117. #else
  118. #define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev)
  119. #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
  120. static inline uint16_t
  121. ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
  122. struct ol_tx_desc_t *tx_desc, cdf_nbuf_t msdu)
  123. {
  124. int msdu_credit_consumed;
  125. TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", cdf_nbuf_len(msdu));
  126. TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
  127. cdf_atomic_read(&pdev->target_tx_credit),
  128. cdf_atomic_read(&pdev->target_tx_credit) - 1,
  129. cdf_nbuf_len(msdu));
  130. msdu_credit_consumed = htt_tx_msdu_credit(msdu);
  131. OL_TX_TARGET_CREDIT_DECR_INT(pdev, msdu_credit_consumed);
  132. OL_TX_CREDIT_RECLAIM(pdev);
  133. /*
  134. * When the tx frame is downloaded to the target, there are two
  135. * outstanding references:
  136. * 1. The host download SW (HTT, HTC, HIF)
  137. * This reference is cleared by the ol_tx_send_done callback
  138. * functions.
  139. * 2. The target FW
  140. * This reference is cleared by the ol_tx_completion_handler
  141. * function.
  142. * It is extremely probable that the download completion is processed
  143. * before the tx completion message. However, under exceptional
  144. * conditions the tx completion may be processed first. Thus, rather
  145. * that assuming that reference (1) is done before reference (2),
  146. * explicit reference tracking is needed.
  147. * Double-increment the ref count to account for both references
  148. * described above.
  149. */
  150. OL_TX_DESC_REF_INIT(tx_desc);
  151. OL_TX_DESC_REF_INC(tx_desc);
  152. OL_TX_DESC_REF_INC(tx_desc);
  153. return msdu_credit_consumed;
  154. }
  155. void
  156. ol_tx_send(struct ol_txrx_pdev_t *pdev,
  157. struct ol_tx_desc_t *tx_desc, cdf_nbuf_t msdu)
  158. {
  159. int msdu_credit_consumed;
  160. uint16_t id;
  161. int failed;
  162. msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
  163. id = ol_tx_desc_id(pdev, tx_desc);
  164. NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_TXRX);
  165. DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
  166. (uint8_t *)(cdf_nbuf_data(msdu)),
  167. sizeof(cdf_nbuf_data(msdu))));
  168. failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
  169. if (cdf_unlikely(failed)) {
  170. OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
  171. ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
  172. }
  173. }
  174. void
  175. ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
  176. cdf_nbuf_t head_msdu, int num_msdus)
  177. {
  178. cdf_nbuf_t rejected;
  179. OL_TX_CREDIT_RECLAIM(pdev);
  180. rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
  181. while (cdf_unlikely(rejected)) {
  182. struct ol_tx_desc_t *tx_desc;
  183. uint16_t *msdu_id_storage;
  184. cdf_nbuf_t next;
  185. next = cdf_nbuf_next(rejected);
  186. msdu_id_storage = ol_tx_msdu_id_storage(rejected);
  187. tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
  188. OL_TX_TARGET_CREDIT_INCR(pdev, rejected);
  189. ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
  190. rejected = next;
  191. }
  192. }
  193. void
  194. ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
  195. struct ol_tx_desc_t *tx_desc,
  196. cdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
  197. {
  198. int msdu_credit_consumed;
  199. uint16_t id;
  200. int failed;
  201. msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
  202. id = ol_tx_desc_id(pdev, tx_desc);
  203. NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_TXRX);
  204. failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
  205. if (failed) {
  206. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  207. "Error: freeing tx frame after htt_tx failed");
  208. OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
  209. ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
  210. }
  211. }
  212. static inline void
  213. ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
  214. A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
  215. {
  216. struct ol_tx_desc_t *tx_desc;
  217. tx_desc = ol_tx_desc_find(pdev, msdu_id);
  218. cdf_assert(tx_desc);
  219. /*
  220. * If the download is done for
  221. * the Management frame then
  222. * call the download callback if registered
  223. */
  224. if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
  225. int tx_mgmt_index = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
  226. ol_txrx_mgmt_tx_cb download_cb =
  227. pdev->tx_mgmt.callbacks[tx_mgmt_index].download_cb;
  228. if (download_cb) {
  229. download_cb(pdev->tx_mgmt.callbacks[tx_mgmt_index].ctxt,
  230. tx_desc->netbuf, status != A_OK);
  231. }
  232. }
  233. if (status != A_OK) {
  234. OL_TX_TARGET_CREDIT_INCR(pdev, msdu);
  235. ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
  236. 1 /* download err */);
  237. } else {
  238. if (OL_TX_DESC_NO_REFS(tx_desc)) {
  239. /*
  240. * The decremented value was zero - free the frame.
  241. * Use the tx status recorded previously during
  242. * tx completion handling.
  243. */
  244. ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
  245. tx_desc->status !=
  246. htt_tx_status_ok);
  247. }
  248. }
  249. }
  250. void
  251. ol_tx_download_done_ll(void *pdev,
  252. A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
  253. {
  254. ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
  255. msdu_id);
  256. }
  257. void
  258. ol_tx_download_done_hl_retain(void *txrx_pdev,
  259. A_STATUS status,
  260. cdf_nbuf_t msdu, uint16_t msdu_id)
  261. {
  262. struct ol_txrx_pdev_t *pdev = txrx_pdev;
  263. ol_tx_download_done_base(pdev, status, msdu, msdu_id);
  264. }
  265. void
  266. ol_tx_download_done_hl_free(void *txrx_pdev,
  267. A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
  268. {
  269. struct ol_txrx_pdev_t *pdev = txrx_pdev;
  270. struct ol_tx_desc_t *tx_desc;
  271. tx_desc = ol_tx_desc_find(pdev, msdu_id);
  272. cdf_assert(tx_desc);
  273. ol_tx_download_done_base(pdev, status, msdu, msdu_id);
  274. if ((tx_desc->pkt_type != ol_tx_frm_no_free) &&
  275. (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) {
  276. cdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
  277. ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK);
  278. }
  279. }
  280. void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta)
  281. {
  282. cdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit);
  283. }
  284. void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta)
  285. {
  286. TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
  287. cdf_atomic_read(&pdev->target_tx_credit),
  288. credit_delta,
  289. cdf_atomic_read(&pdev->target_tx_credit) +
  290. credit_delta);
  291. cdf_atomic_add(credit_delta, &pdev->target_tx_credit);
  292. }
  293. #ifdef QCA_COMPUTE_TX_DELAY
  294. static void
  295. ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
  296. enum htt_tx_status status,
  297. uint16_t *desc_ids, int num_msdus);
  298. #define OL_TX_DELAY_COMPUTE ol_tx_delay_compute
  299. #else
  300. #define OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus) /* no-op */
  301. #endif /* QCA_COMPUTE_TX_DELAY */
  302. #ifndef OL_TX_RESTORE_HDR
  303. #define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
  304. #endif
  305. /*
  306. * The following macros could have been inline functions too.
  307. * The only rationale for choosing macros, is to force the compiler to inline
  308. * the implementation, which cannot be controlled for actual "inline" functions,
  309. * since "inline" is only a hint to the compiler.
  310. * In the performance path, we choose to force the inlining, in preference to
  311. * type-checking offered by the actual inlined functions.
  312. */
  313. #define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \
  314. TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem)
  315. #ifndef ATH_11AC_TXCOMPACT
  316. #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
  317. _lcl_freelist, _tx_desc_last) \
  318. do { \
  319. cdf_atomic_init(&(_tx_desc)->ref_cnt); \
  320. /* restore orginal hdr offset */ \
  321. OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
  322. cdf_nbuf_unmap((_pdev)->osdev, (_netbuf), CDF_DMA_TO_DEVICE); \
  323. cdf_nbuf_free((_netbuf)); \
  324. ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
  325. (_lcl_freelist); \
  326. if (cdf_unlikely(!lcl_freelist)) { \
  327. (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
  328. (_tx_desc); \
  329. } \
  330. (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
  331. } while (0)
  332. #else /*!ATH_11AC_TXCOMPACT */
  333. #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
  334. _lcl_freelist, _tx_desc_last) \
  335. do { \
  336. /* restore orginal hdr offset */ \
  337. OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
  338. cdf_nbuf_unmap((_pdev)->osdev, (_netbuf), CDF_DMA_TO_DEVICE); \
  339. cdf_nbuf_free((_netbuf)); \
  340. ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
  341. (_lcl_freelist); \
  342. if (cdf_unlikely(!lcl_freelist)) { \
  343. (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
  344. (_tx_desc); \
  345. } \
  346. (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
  347. } while (0)
  348. #endif /*!ATH_11AC_TXCOMPACT */
  349. #ifdef QCA_TX_SINGLE_COMPLETIONS
  350. #ifdef QCA_TX_STD_PATH_ONLY
  351. #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
  352. _netbuf, _lcl_freelist, \
  353. _tx_desc_last, _status) \
  354. ol_tx_msdu_complete_single((_pdev), (_tx_desc), \
  355. (_netbuf), (_lcl_freelist), \
  356. _tx_desc_last)
  357. #else /* !QCA_TX_STD_PATH_ONLY */
  358. #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
  359. _netbuf, _lcl_freelist, \
  360. _tx_desc_last, _status) \
  361. do { \
  362. if (cdf_likely((_tx_desc)->pkt_type == ol_tx_frm_std)) { \
  363. ol_tx_msdu_complete_single((_pdev), (_tx_desc),\
  364. (_netbuf), (_lcl_freelist), \
  365. (_tx_desc_last)); \
  366. } else { \
  367. ol_tx_desc_frame_free_nonstd( \
  368. (_pdev), (_tx_desc), \
  369. (_status) != htt_tx_status_ok); \
  370. } \
  371. } while (0)
  372. #endif /* !QCA_TX_STD_PATH_ONLY */
  373. #else /* !QCA_TX_SINGLE_COMPLETIONS */
  374. #ifdef QCA_TX_STD_PATH_ONLY
  375. #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
  376. _netbuf, _lcl_freelist, \
  377. _tx_desc_last, _status) \
  378. ol_tx_msdus_complete_batch((_pdev), (_tx_desc), (_tx_descs), (_status))
  379. #else /* !QCA_TX_STD_PATH_ONLY */
  380. #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
  381. _netbuf, _lcl_freelist, \
  382. _tx_desc_last, _status) \
  383. do { \
  384. if (cdf_likely((_tx_desc)->pkt_type == ol_tx_frm_std)) { \
  385. ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \
  386. (_tx_descs), (_status)); \
  387. } else { \
  388. ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \
  389. (_status) != \
  390. htt_tx_status_ok); \
  391. } \
  392. } while (0)
  393. #endif /* !QCA_TX_STD_PATH_ONLY */
  394. #endif /* QCA_TX_SINGLE_COMPLETIONS */
  395. void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
  396. {
  397. int i = 0;
  398. struct ol_tx_desc_t *tx_desc;
  399. for (i = 0; i < pdev->tx_desc.pool_size; i++) {
  400. tx_desc = ol_tx_desc_find(pdev, i);
  401. /*
  402. * Confirm that each tx descriptor is "empty", i.e. it has
  403. * no tx frame attached.
  404. * In particular, check that there are no frames that have
  405. * been given to the target to transmit, for which the
  406. * target has never provided a response.
  407. */
  408. if (cdf_atomic_read(&tx_desc->ref_cnt)) {
  409. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  410. "Warning: freeing tx frame "
  411. "(no tx completion from the target)\n");
  412. ol_tx_desc_frame_free_nonstd(pdev,
  413. tx_desc, 1);
  414. }
  415. }
  416. }
  417. void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
  418. {
  419. ol_tx_target_credit_update(pdev, credits);
  420. /* UNPAUSE OS Q */
  421. OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
  422. }
  423. /* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
  424. ol_tx_completion_handler().
  425. * any change in ol_tx_completion_handler() must be mirrored in
  426. ol_tx_inspect_handler().
  427. */
  428. void
  429. ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
  430. int num_msdus,
  431. enum htt_tx_status status, void *tx_desc_id_iterator)
  432. {
  433. int i;
  434. uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
  435. uint16_t tx_desc_id;
  436. struct ol_tx_desc_t *tx_desc;
  437. char *trace_str;
  438. uint32_t byte_cnt = 0;
  439. cdf_nbuf_t netbuf;
  440. union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
  441. union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
  442. ol_tx_desc_list tx_descs;
  443. TAILQ_INIT(&tx_descs);
  444. OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus);
  445. trace_str = (status) ? "OT:C:F:" : "OT:C:S:";
  446. for (i = 0; i < num_msdus; i++) {
  447. tx_desc_id = desc_ids[i];
  448. tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
  449. tx_desc->status = status;
  450. netbuf = tx_desc->netbuf;
  451. cdf_runtime_pm_put();
  452. cdf_nbuf_trace_update(netbuf, trace_str);
  453. /* Per SDU update of byte count */
  454. byte_cnt += cdf_nbuf_len(netbuf);
  455. if (OL_TX_DESC_NO_REFS(tx_desc)) {
  456. ol_tx_statistics(
  457. pdev->ctrl_pdev,
  458. HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
  459. (tx_desc->
  460. htt_tx_desc))),
  461. status != htt_tx_status_ok);
  462. ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
  463. lcl_freelist, tx_desc_last, status);
  464. }
  465. NBUF_UPDATE_TX_PKT_COUNT(netbuf, NBUF_TX_PKT_FREE);
  466. #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
  467. tx_desc->pkt_type = 0xff;
  468. #ifdef QCA_COMPUTE_TX_DELAY
  469. tx_desc->entry_timestamp_ticks = 0xffffffff;
  470. #endif
  471. #endif
  472. }
  473. /* One shot protected access to pdev freelist, when setup */
  474. if (lcl_freelist) {
  475. cdf_spin_lock(&pdev->tx_mutex);
  476. tx_desc_last->next = pdev->tx_desc.freelist;
  477. pdev->tx_desc.freelist = lcl_freelist;
  478. pdev->tx_desc.num_free += (uint16_t) num_msdus;
  479. cdf_spin_unlock(&pdev->tx_mutex);
  480. } else {
  481. ol_tx_desc_frame_list_free(pdev, &tx_descs,
  482. status != htt_tx_status_ok);
  483. }
  484. OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
  485. /* UNPAUSE OS Q */
  486. OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
  487. /* Do one shot statistics */
  488. TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
  489. }
  490. /*
  491. * ol_tx_single_completion_handler performs the same tx completion
  492. * processing as ol_tx_completion_handler, but for a single frame.
  493. * ol_tx_completion_handler is optimized to handle batch completions
  494. * as efficiently as possible; in contrast ol_tx_single_completion_handler
  495. * handles single frames as simply and generally as possible.
  496. * Thus, this ol_tx_single_completion_handler function is suitable for
  497. * intermittent usage, such as for tx mgmt frames.
  498. */
  499. void
  500. ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
  501. enum htt_tx_status status, uint16_t tx_desc_id)
  502. {
  503. struct ol_tx_desc_t *tx_desc;
  504. cdf_nbuf_t netbuf;
  505. tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
  506. tx_desc->status = status;
  507. netbuf = tx_desc->netbuf;
  508. NBUF_UPDATE_TX_PKT_COUNT(netbuf, NBUF_TX_PKT_FREE);
  509. /* Do one shot statistics */
  510. TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, cdf_nbuf_len(netbuf));
  511. if (OL_TX_DESC_NO_REFS(tx_desc)) {
  512. ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
  513. status != htt_tx_status_ok);
  514. }
  515. TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
  516. cdf_atomic_read(&pdev->target_tx_credit),
  517. 1, cdf_atomic_read(&pdev->target_tx_credit) + 1);
  518. cdf_atomic_add(1, &pdev->target_tx_credit);
  519. }
  520. /* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
  521. ol_tx_completion_handler().
  522. * any change in ol_tx_completion_handler() must be mirrored here.
  523. */
  524. void
  525. ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
  526. int num_msdus, void *tx_desc_id_iterator)
  527. {
  528. uint16_t vdev_id, i;
  529. struct ol_txrx_vdev_t *vdev;
  530. uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
  531. uint16_t tx_desc_id;
  532. struct ol_tx_desc_t *tx_desc;
  533. union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
  534. union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
  535. cdf_nbuf_t netbuf;
  536. ol_tx_desc_list tx_descs;
  537. TAILQ_INIT(&tx_descs);
  538. for (i = 0; i < num_msdus; i++) {
  539. tx_desc_id = desc_ids[i];
  540. tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
  541. netbuf = tx_desc->netbuf;
  542. /* find the "vdev" this tx_desc belongs to */
  543. vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
  544. (tx_desc->htt_tx_desc)));
  545. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  546. if (vdev->vdev_id == vdev_id)
  547. break;
  548. }
  549. /* vdev now points to the vdev for this descriptor. */
  550. #ifndef ATH_11AC_TXCOMPACT
  551. /* save this multicast packet to local free list */
  552. if (cdf_atomic_dec_and_test(&tx_desc->ref_cnt))
  553. #endif
  554. {
  555. /* for this function only, force htt status to be
  556. "htt_tx_status_ok"
  557. * for graceful freeing of this multicast frame
  558. */
  559. ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
  560. lcl_freelist, tx_desc_last,
  561. htt_tx_status_ok);
  562. }
  563. }
  564. if (lcl_freelist) {
  565. cdf_spin_lock(&pdev->tx_mutex);
  566. tx_desc_last->next = pdev->tx_desc.freelist;
  567. pdev->tx_desc.freelist = lcl_freelist;
  568. cdf_spin_unlock(&pdev->tx_mutex);
  569. } else {
  570. ol_tx_desc_frame_list_free(pdev, &tx_descs,
  571. htt_tx_status_discard);
  572. }
  573. TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n",
  574. cdf_atomic_read(&pdev->target_tx_credit),
  575. num_msdus,
  576. cdf_atomic_read(&pdev->target_tx_credit) +
  577. num_msdus);
  578. OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
  579. }
  580. #ifdef QCA_COMPUTE_TX_DELAY
  581. void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval)
  582. {
  583. pdev->tx_delay.avg_period_ticks = cdf_system_msecs_to_ticks(interval);
  584. }
  585. void
  586. ol_tx_packet_count(ol_txrx_pdev_handle pdev,
  587. uint16_t *out_packet_count,
  588. uint16_t *out_packet_loss_count, int category)
  589. {
  590. *out_packet_count = pdev->packet_count[category];
  591. *out_packet_loss_count = pdev->packet_loss_count[category];
  592. pdev->packet_count[category] = 0;
  593. pdev->packet_loss_count[category] = 0;
  594. }
  595. uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
  596. {
  597. uint32_t sum32;
  598. int shift = 0;
  599. /*
  600. * To avoid doing a 64-bit divide, shift the sum down until it is
  601. * no more than 32 bits (and shift the denominator to match).
  602. */
  603. while ((sum >> 32) != 0) {
  604. sum >>= 1;
  605. shift++;
  606. }
  607. sum32 = (uint32_t) sum;
  608. num >>= shift;
  609. return (sum32 + (num >> 1)) / num; /* round to nearest */
  610. }
  611. void
  612. ol_tx_delay(ol_txrx_pdev_handle pdev,
  613. uint32_t *queue_delay_microsec,
  614. uint32_t *tx_delay_microsec, int category)
  615. {
  616. int index;
  617. uint32_t avg_delay_ticks;
  618. struct ol_tx_delay_data *data;
  619. cdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
  620. cdf_spin_lock_bh(&pdev->tx_delay.mutex);
  621. index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
  622. data = &pdev->tx_delay.cats[category].copies[index];
  623. if (data->avgs.transmit_num > 0) {
  624. avg_delay_ticks =
  625. ol_tx_delay_avg(data->avgs.transmit_sum_ticks,
  626. data->avgs.transmit_num);
  627. *tx_delay_microsec =
  628. cdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
  629. } else {
  630. /*
  631. * This case should only happen if there's a query
  632. * within 5 sec after the first tx data frame.
  633. */
  634. *tx_delay_microsec = 0;
  635. }
  636. if (data->avgs.queue_num > 0) {
  637. avg_delay_ticks =
  638. ol_tx_delay_avg(data->avgs.queue_sum_ticks,
  639. data->avgs.queue_num);
  640. *queue_delay_microsec =
  641. cdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
  642. } else {
  643. /*
  644. * This case should only happen if there's a query
  645. * within 5 sec after the first tx data frame.
  646. */
  647. *queue_delay_microsec = 0;
  648. }
  649. cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
  650. }
  651. void
  652. ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
  653. uint16_t *report_bin_values, int category)
  654. {
  655. int index, i, j;
  656. struct ol_tx_delay_data *data;
  657. cdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
  658. cdf_spin_lock_bh(&pdev->tx_delay.mutex);
  659. index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
  660. data = &pdev->tx_delay.cats[category].copies[index];
  661. for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) {
  662. uint16_t internal_bin_sum = 0;
  663. while (j < (1 << i))
  664. internal_bin_sum += data->hist_bins_queue[j++];
  665. report_bin_values[i] = internal_bin_sum;
  666. }
  667. report_bin_values[i] = data->hist_bins_queue[j]; /* overflow */
  668. cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
  669. }
  670. #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
  671. static inline uint8_t *ol_tx_dest_addr_find(struct ol_txrx_pdev_t *pdev,
  672. cdf_nbuf_t tx_nbuf)
  673. {
  674. uint8_t *hdr_ptr;
  675. void *datap = cdf_nbuf_data(tx_nbuf);
  676. if (pdev->frame_format == wlan_frm_fmt_raw) {
  677. /* adjust hdr_ptr to RA */
  678. struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
  679. hdr_ptr = wh->i_addr1;
  680. } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
  681. /* adjust hdr_ptr to RA */
  682. struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
  683. hdr_ptr = wh->i_addr1;
  684. } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
  685. hdr_ptr = datap;
  686. } else {
  687. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  688. "Invalid standard frame type: %d",
  689. pdev->frame_format);
  690. cdf_assert(0);
  691. hdr_ptr = NULL;
  692. }
  693. return hdr_ptr;
  694. }
  695. static uint8_t
  696. ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
  697. cdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
  698. {
  699. uint16_t ethertype;
  700. uint8_t *dest_addr, *l3_hdr;
  701. int is_mgmt, is_mcast;
  702. int l2_hdr_size;
  703. dest_addr = ol_tx_dest_addr_find(pdev, msdu);
  704. if (NULL == dest_addr)
  705. return ADF_NBUF_TX_EXT_TID_INVALID;
  706. is_mcast = IEEE80211_IS_MULTICAST(dest_addr);
  707. is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE;
  708. if (is_mgmt) {
  709. return (is_mcast) ?
  710. OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT :
  711. HTT_TX_EXT_TID_MGMT;
  712. }
  713. if (is_mcast)
  714. return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST;
  715. if (pdev->frame_format == wlan_frm_fmt_802_3) {
  716. struct ethernet_hdr_t *enet_hdr;
  717. enet_hdr = (struct ethernet_hdr_t *)cdf_nbuf_data(msdu);
  718. l2_hdr_size = sizeof(struct ethernet_hdr_t);
  719. ethertype =
  720. (enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
  721. if (!IS_ETHERTYPE(ethertype)) {
  722. struct llc_snap_hdr_t *llc_hdr;
  723. llc_hdr = (struct llc_snap_hdr_t *)
  724. (cdf_nbuf_data(msdu) + l2_hdr_size);
  725. l2_hdr_size += sizeof(struct llc_snap_hdr_t);
  726. ethertype =
  727. (llc_hdr->ethertype[0] << 8) | llc_hdr->
  728. ethertype[1];
  729. }
  730. } else {
  731. struct llc_snap_hdr_t *llc_hdr;
  732. l2_hdr_size = sizeof(struct ieee80211_frame);
  733. llc_hdr = (struct llc_snap_hdr_t *)(cdf_nbuf_data(msdu)
  734. + l2_hdr_size);
  735. l2_hdr_size += sizeof(struct llc_snap_hdr_t);
  736. ethertype =
  737. (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
  738. }
  739. l3_hdr = cdf_nbuf_data(msdu) + l2_hdr_size;
  740. if (ETHERTYPE_IPV4 == ethertype) {
  741. return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
  742. } else if (ETHERTYPE_IPV6 == ethertype) {
  743. return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) &
  744. 0x7;
  745. } else {
  746. return ADF_NBUF_TX_EXT_TID_INVALID;
  747. }
  748. }
  749. #endif
  750. static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
  751. {
  752. #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
  753. struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
  754. uint8_t tid;
  755. cdf_nbuf_t msdu = tx_desc->netbuf;
  756. tid = cdf_nbuf_get_tid(msdu);
  757. if (tid == ADF_NBUF_TX_EXT_TID_INVALID) {
  758. tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
  759. if (tid == ADF_NBUF_TX_EXT_TID_INVALID) {
  760. /* TID could not be determined
  761. (this is not an IP frame?) */
  762. return -EINVAL;
  763. }
  764. }
  765. return tid;
  766. #else
  767. return 0;
  768. #endif
  769. }
  770. static inline int
  771. ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)
  772. {
  773. int bin;
  774. /*
  775. * For speed, multiply and shift to approximate a divide. This causes
  776. * a small error, but the approximation error should be much less
  777. * than the other uncertainties in the tx delay computation.
  778. */
  779. bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >>
  780. pdev->tx_delay.hist_internal_bin_width_shift;
  781. if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS)
  782. bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1;
  783. return bin;
  784. }
  785. static void
  786. ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
  787. enum htt_tx_status status,
  788. uint16_t *desc_ids, int num_msdus)
  789. {
  790. int i, index, cat;
  791. uint32_t now_ticks = cdf_system_ticks();
  792. uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks;
  793. uint32_t avg_time_ticks;
  794. struct ol_tx_delay_data *data;
  795. cdf_assert(num_msdus > 0);
  796. /*
  797. * keep static counters for total packet and lost packets
  798. * reset them in ol_tx_delay(), function used to fetch the stats
  799. */
  800. cat = ol_tx_delay_category(pdev, desc_ids[0]);
  801. if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
  802. return;
  803. pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus;
  804. if (status != htt_tx_status_ok) {
  805. for (i = 0; i < num_msdus; i++) {
  806. cat = ol_tx_delay_category(pdev, desc_ids[i]);
  807. if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
  808. return;
  809. pdev->packet_loss_count[cat]++;
  810. }
  811. return;
  812. }
  813. /* since we may switch the ping-pong index, provide mutex w. readers */
  814. cdf_spin_lock_bh(&pdev->tx_delay.mutex);
  815. index = pdev->tx_delay.cats[cat].in_progress_idx;
  816. data = &pdev->tx_delay.cats[cat].copies[index];
  817. if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) {
  818. tx_delay_transmit_ticks =
  819. now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks;
  820. /*
  821. * We'd like to account for the number of MSDUs that were
  822. * transmitted together, but we don't know this. All we know
  823. * is the number of MSDUs that were acked together.
  824. * Since the frame error rate is small, this is nearly the same
  825. * as the number of frames transmitted together.
  826. */
  827. data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks;
  828. data->avgs.transmit_num += num_msdus;
  829. }
  830. pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
  831. for (i = 0; i < num_msdus; i++) {
  832. int bin;
  833. uint16_t id = desc_ids[i];
  834. struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
  835. tx_delay_queue_ticks =
  836. now_ticks - tx_desc->entry_timestamp_ticks;
  837. data->avgs.queue_sum_ticks += tx_delay_queue_ticks;
  838. data->avgs.queue_num++;
  839. bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks);
  840. data->hist_bins_queue[bin]++;
  841. }
  842. /* check if it's time to start a new average */
  843. avg_time_ticks =
  844. now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks;
  845. if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) {
  846. pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks;
  847. index = 1 - index;
  848. pdev->tx_delay.cats[cat].in_progress_idx = index;
  849. cdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index],
  850. sizeof(pdev->tx_delay.cats[cat].copies[index]));
  851. }
  852. cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
  853. }
  854. #endif /* QCA_COMPUTE_TX_DELAY */