ol_tx_desc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <cdf_net_types.h> /* CDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
  27. #include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
  28. #include <cdf_util.h> /* cdf_assert */
  29. #include <cdf_lock.h> /* cdf_spinlock */
  30. #ifdef QCA_COMPUTE_TX_DELAY
  31. #include <cdf_time.h> /* cdf_system_ticks */
  32. #endif
  33. #include <ol_htt_tx_api.h> /* htt_tx_desc_id */
  34. #include <ol_txrx_types.h> /* ol_txrx_pdev_t */
  35. #include <ol_tx_desc.h>
  36. #include <ol_txrx_internal.h>
  37. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  38. #include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
  39. #endif
  40. #include <ol_txrx.h>
  41. #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
  42. extern uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
  43. #endif
  44. #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
  45. static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
  46. struct ol_tx_desc_t *tx_desc)
  47. {
  48. if (tx_desc->pkt_type != 0xff) {
  49. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  50. "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p",
  51. __func__, tx_desc->pkt_type, pdev);
  52. cdf_assert(0);
  53. }
  54. if ((uint32_t *) tx_desc->htt_tx_desc <
  55. g_dbg_htt_desc_start_addr
  56. || (uint32_t *) tx_desc->htt_tx_desc >
  57. g_dbg_htt_desc_end_addr) {
  58. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  59. "%s Potential htt_desc curruption:0x%p pdev:0x%p\n",
  60. __func__, tx_desc->htt_tx_desc, pdev);
  61. cdf_assert(0);
  62. }
  63. }
  64. static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
  65. {
  66. tx_desc->pkt_type = 0xff;
  67. }
  68. #ifdef QCA_COMPUTE_TX_DELAY
  69. static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
  70. {
  71. if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
  72. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n",
  73. __func__, tx_desc->entry_timestamp_ticks);
  74. cdf_assert(0);
  75. }
  76. tx_desc->entry_timestamp_ticks = cdf_system_ticks();
  77. }
  78. static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
  79. {
  80. tx_desc->entry_timestamp_ticks = 0xffffffff;
  81. }
  82. #endif
  83. #else
  84. static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
  85. struct ol_tx_desc_t *tx_desc)
  86. {
  87. return;
  88. }
  89. static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
  90. {
  91. return;
  92. }
  93. static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
  94. {
  95. return;
  96. }
  97. static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
  98. {
  99. return;
  100. }
  101. #endif
  102. #ifndef QCA_LL_TX_FLOW_CONTROL_V2
  103. /**
  104. * ol_tx_desc_alloc() - allocate descriptor from freelist
  105. * @pdev: pdev handle
  106. * @vdev: vdev handle
  107. *
  108. * Return: tx descriptor pointer/ NULL in case of error
  109. */
  110. static
  111. struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
  112. struct ol_txrx_vdev_t *vdev)
  113. {
  114. struct ol_tx_desc_t *tx_desc = NULL;
  115. cdf_spin_lock_bh(&pdev->tx_mutex);
  116. if (pdev->tx_desc.freelist) {
  117. tx_desc = ol_tx_get_desc_global_pool(pdev);
  118. ol_tx_desc_sanity_checks(pdev, tx_desc);
  119. ol_tx_desc_compute_delay(tx_desc);
  120. }
  121. cdf_spin_unlock_bh(&pdev->tx_mutex);
  122. return tx_desc;
  123. }
  124. /**
  125. * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
  126. * @pdev: pdev handler
  127. * @vdev: vdev handler
  128. * @msdu_info: msdu handler
  129. *
  130. * Return: tx descriptor or NULL
  131. */
  132. struct ol_tx_desc_t *
  133. ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
  134. struct ol_txrx_vdev_t *vdev,
  135. struct ol_txrx_msdu_info_t *msdu_info)
  136. {
  137. return ol_tx_desc_alloc(pdev, vdev);
  138. }
  139. #else
  140. /**
  141. * ol_tx_desc_alloc() -allocate tx descriptor
  142. * @pdev: pdev handler
  143. * @vdev: vdev handler
  144. * @pool: flow pool
  145. *
  146. * Return: tx descriptor or NULL
  147. */
  148. static
  149. struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
  150. struct ol_txrx_vdev_t *vdev,
  151. struct ol_tx_flow_pool_t *pool)
  152. {
  153. struct ol_tx_desc_t *tx_desc = NULL;
  154. if (pool) {
  155. cdf_spin_lock_bh(&pool->flow_pool_lock);
  156. if (pool->avail_desc) {
  157. tx_desc = ol_tx_get_desc_flow_pool(pool);
  158. if (cdf_unlikely(pool->avail_desc < pool->stop_th)) {
  159. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  160. cdf_spin_unlock_bh(&pool->flow_pool_lock);
  161. /* pause network queues */
  162. pdev->pause_cb(vdev->vdev_id,
  163. WLAN_STOP_ALL_NETIF_QUEUE,
  164. WLAN_DATA_FLOW_CONTROL);
  165. } else {
  166. cdf_spin_unlock_bh(&pool->flow_pool_lock);
  167. }
  168. ol_tx_desc_sanity_checks(pdev, tx_desc);
  169. ol_tx_desc_compute_delay(tx_desc);
  170. } else {
  171. cdf_spin_unlock_bh(&pool->flow_pool_lock);
  172. pdev->pool_stats.pkt_drop_no_desc++;
  173. }
  174. } else {
  175. pdev->pool_stats.pkt_drop_no_pool++;
  176. }
  177. return tx_desc;
  178. }
  179. /**
  180. * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
  181. * @pdev: pdev handler
  182. * @vdev: vdev handler
  183. * @msdu_info: msdu handler
  184. *
  185. * Return: tx descriptor or NULL
  186. */
  187. #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
  188. struct ol_tx_desc_t *
  189. ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
  190. struct ol_txrx_vdev_t *vdev,
  191. struct ol_txrx_msdu_info_t *msdu_info)
  192. {
  193. if (cdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt))
  194. return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool);
  195. else
  196. return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
  197. }
  198. #else
  199. struct ol_tx_desc_t *
  200. ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
  201. struct ol_txrx_vdev_t *vdev,
  202. struct ol_txrx_msdu_info_t *msdu_info)
  203. {
  204. return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
  205. }
  206. #endif
  207. #endif
  208. #ifndef QCA_LL_TX_FLOW_CONTROL_V2
  209. /**
  210. * ol_tx_desc_free() - put descriptor to freelist
  211. * @pdev: pdev handle
  212. * @tx_desc: tx descriptor
  213. *
  214. * Return: None
  215. */
  216. void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
  217. {
  218. cdf_spin_lock_bh(&pdev->tx_mutex);
  219. #if defined(FEATURE_TSO)
  220. if (tx_desc->pkt_type == ol_tx_frm_tso) {
  221. if (cdf_unlikely(tx_desc->tso_desc == NULL))
  222. cdf_print("%s %d TSO desc is NULL!\n",
  223. __func__, __LINE__);
  224. else
  225. ol_tso_free_segment(pdev, tx_desc->tso_desc);
  226. }
  227. #endif
  228. ol_tx_desc_reset_pkt_type(tx_desc);
  229. ol_tx_desc_reset_timestamp(tx_desc);
  230. ol_tx_put_desc_global_pool(pdev, tx_desc);
  231. cdf_spin_unlock_bh(&pdev->tx_mutex);
  232. }
  233. #else
  234. /**
  235. * ol_tx_desc_free() - put descriptor to pool freelist
  236. * @pdev: pdev handle
  237. * @tx_desc: tx descriptor
  238. *
  239. * Return: None
  240. */
  241. void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
  242. {
  243. struct ol_tx_flow_pool_t *pool = tx_desc->pool;
  244. #if defined(FEATURE_TSO)
  245. if (tx_desc->pkt_type == ol_tx_frm_tso) {
  246. if (cdf_unlikely(tx_desc->tso_desc == NULL))
  247. cdf_print("%s %d TSO desc is NULL!\n",
  248. __func__, __LINE__);
  249. else
  250. ol_tso_free_segment(pdev, tx_desc->tso_desc);
  251. }
  252. #endif
  253. ol_tx_desc_reset_pkt_type(tx_desc);
  254. ol_tx_desc_reset_timestamp(tx_desc);
  255. cdf_spin_lock_bh(&pool->flow_pool_lock);
  256. ol_tx_put_desc_flow_pool(pool, tx_desc);
  257. switch (pool->status) {
  258. case FLOW_POOL_ACTIVE_PAUSED:
  259. if (pool->avail_desc > pool->start_th) {
  260. pdev->pause_cb(pool->member_flow_id,
  261. WLAN_WAKE_ALL_NETIF_QUEUE,
  262. WLAN_DATA_FLOW_CONTROL);
  263. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  264. }
  265. break;
  266. case FLOW_POOL_INVALID:
  267. if (pool->avail_desc == pool->flow_pool_size) {
  268. cdf_spin_unlock_bh(&pool->flow_pool_lock);
  269. ol_tx_free_invalid_flow_pool(pool);
  270. cdf_print("%s %d pool is INVALID State!!\n",
  271. __func__, __LINE__);
  272. return;
  273. }
  274. break;
  275. case FLOW_POOL_ACTIVE_UNPAUSED:
  276. break;
  277. default:
  278. cdf_print("%s %d pool is INACTIVE State!!\n",
  279. __func__, __LINE__);
  280. break;
  281. };
  282. cdf_spin_unlock_bh(&pool->flow_pool_lock);
  283. }
  284. #endif
  285. extern void
  286. dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc);
  287. void
  288. dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
  289. {
  290. cdf_print("%s: Pkt: VA 0x%p PA 0x%x len %d\n", __func__,
  291. cdf_nbuf_data(nbuf), nbuf_paddr, len);
  292. print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_NONE, 16, 4,
  293. cdf_nbuf_data(nbuf), len, true);
  294. }
  295. const uint32_t htt_to_ce_pkt_type[] = {
  296. [htt_pkt_type_raw] = tx_pkt_type_raw,
  297. [htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi,
  298. [htt_pkt_type_ethernet] = tx_pkt_type_802_3,
  299. [htt_pkt_type_mgmt] = tx_pkt_type_mgmt,
  300. [htt_pkt_type_eth2] = tx_pkt_type_eth2,
  301. [htt_pkt_num_types] = 0xffffffff
  302. };
  303. struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
  304. struct ol_txrx_vdev_t *vdev,
  305. cdf_nbuf_t netbuf,
  306. struct ol_txrx_msdu_info_t *msdu_info)
  307. {
  308. struct ol_tx_desc_t *tx_desc;
  309. unsigned int i;
  310. uint32_t num_frags;
  311. msdu_info->htt.info.vdev_id = vdev->vdev_id;
  312. msdu_info->htt.action.cksum_offload = cdf_nbuf_get_tx_cksum(netbuf);
  313. switch (cdf_nbuf_get_exemption_type(netbuf)) {
  314. case CDF_NBUF_EXEMPT_NO_EXEMPTION:
  315. case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
  316. /* We want to encrypt this frame */
  317. msdu_info->htt.action.do_encrypt = 1;
  318. break;
  319. case CDF_NBUF_EXEMPT_ALWAYS:
  320. /* We don't want to encrypt this frame */
  321. msdu_info->htt.action.do_encrypt = 0;
  322. break;
  323. default:
  324. cdf_assert(0);
  325. break;
  326. }
  327. /* allocate the descriptor */
  328. tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
  329. if (!tx_desc)
  330. return NULL;
  331. /* initialize the SW tx descriptor */
  332. tx_desc->netbuf = netbuf;
  333. if (msdu_info->tso_info.is_tso) {
  334. tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
  335. tx_desc->pkt_type = ol_tx_frm_tso;
  336. TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
  337. } else {
  338. tx_desc->pkt_type = ol_tx_frm_std;
  339. }
  340. /* initialize the HW tx descriptor */
  341. htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
  342. tx_desc->htt_tx_desc_paddr,
  343. ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
  344. &msdu_info->tso_info,
  345. NULL, vdev->opmode == wlan_op_mode_ocb);
  346. /*
  347. * Initialize the fragmentation descriptor.
  348. * Skip the prefix fragment (HTT tx descriptor) that was added
  349. * during the call to htt_tx_desc_init above.
  350. */
  351. num_frags = cdf_nbuf_get_num_frags(netbuf);
  352. /* num_frags are expected to be 2 max */
  353. num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS)
  354. ? CVG_NBUF_MAX_EXTRA_FRAGS
  355. : num_frags;
  356. #if defined(HELIUMPLUS_PADDR64)
  357. /*
  358. * Use num_frags - 1, since 1 frag is used to store
  359. * the HTT/HTC descriptor
  360. * Refer to htt_tx_desc_init()
  361. */
  362. htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
  363. num_frags - 1);
  364. #else /* ! defined(HELIUMPLUSPADDR64) */
  365. htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
  366. num_frags - 1);
  367. #endif /* defined(HELIUMPLUS_PADDR64) */
  368. if (msdu_info->tso_info.is_tso) {
  369. htt_tx_desc_fill_tso_info(pdev->htt_pdev,
  370. tx_desc->htt_frag_desc, &msdu_info->tso_info);
  371. TXRX_STATS_TSO_SEG_UPDATE(pdev,
  372. msdu_info->tso_info.curr_seg->seg);
  373. } else {
  374. for (i = 1; i < num_frags; i++) {
  375. cdf_size_t frag_len;
  376. uint32_t frag_paddr;
  377. frag_len = cdf_nbuf_get_frag_len(netbuf, i);
  378. frag_paddr = cdf_nbuf_get_frag_paddr_lo(netbuf, i);
  379. #if defined(HELIUMPLUS_PADDR64)
  380. htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
  381. frag_paddr, frag_len);
  382. #if defined(HELIUMPLUS_DEBUG)
  383. cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
  384. __func__, __LINE__, tx_desc->htt_frag_desc,
  385. frag_paddr, frag_len);
  386. dump_pkt(netbuf, frag_paddr, 64);
  387. #endif /* HELIUMPLUS_DEBUG */
  388. #else /* ! defined(HELIUMPLUSPADDR64) */
  389. htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1,
  390. frag_paddr, frag_len);
  391. #endif /* defined(HELIUMPLUS_PADDR64) */
  392. }
  393. }
  394. #if defined(HELIUMPLUS_DEBUG)
  395. dump_frag_desc("ol_tx_desc_ll()", tx_desc);
  396. #endif
  397. return tx_desc;
  398. }
  399. void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
  400. ol_tx_desc_list *tx_descs, int had_error)
  401. {
  402. struct ol_tx_desc_t *tx_desc, *tmp;
  403. cdf_nbuf_t msdus = NULL;
  404. TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
  405. cdf_nbuf_t msdu = tx_desc->netbuf;
  406. cdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
  407. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  408. /* restore original hdr offset */
  409. OL_TX_RESTORE_HDR(tx_desc, msdu);
  410. #endif
  411. cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_TO_DEVICE);
  412. /* free the tx desc */
  413. ol_tx_desc_free(pdev, tx_desc);
  414. /* link the netbuf into a list to free as a batch */
  415. cdf_nbuf_set_next(msdu, msdus);
  416. msdus = msdu;
  417. }
  418. /* free the netbufs as a batch */
  419. cdf_nbuf_tx_free(msdus, had_error);
  420. }
  421. void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
  422. struct ol_tx_desc_t *tx_desc, int had_error)
  423. {
  424. int mgmt_type;
  425. ol_txrx_mgmt_tx_cb ota_ack_cb;
  426. char *trace_str;
  427. cdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
  428. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  429. /* restore original hdr offset */
  430. OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
  431. #endif
  432. trace_str = (had_error) ? "OT:C:F:" : "OT:C:S:";
  433. cdf_nbuf_trace_update(tx_desc->netbuf, trace_str);
  434. if (tx_desc->pkt_type == ol_tx_frm_no_free) {
  435. /* free the tx desc but don't unmap or free the frame */
  436. if (pdev->tx_data_callback.func) {
  437. cdf_nbuf_set_next(tx_desc->netbuf, NULL);
  438. pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
  439. tx_desc->netbuf, had_error);
  440. ol_tx_desc_free(pdev, tx_desc);
  441. return;
  442. }
  443. /* let the code below unmap and free the frame */
  444. }
  445. cdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, CDF_DMA_TO_DEVICE);
  446. /* check the frame type to see what kind of special steps are needed */
  447. if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
  448. (tx_desc->pkt_type != 0xff)) {
  449. uint32_t frag_desc_paddr_lo = 0;
  450. #if defined(HELIUMPLUS_PADDR64)
  451. frag_desc_paddr_lo = tx_desc->htt_frag_desc_paddr;
  452. /* FIX THIS -
  453. * The FW currently has trouble using the host's fragments
  454. * table for management frames. Until this is fixed,
  455. * rather than specifying the fragment table to the FW,
  456. * the host SW will specify just the address of the initial
  457. * fragment.
  458. * Now that the mgmt frame is done, the HTT tx desc's frags
  459. * table pointer needs to be reset.
  460. */
  461. #if defined(HELIUMPLUS_DEBUG)
  462. cdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
  463. __func__, __LINE__, tx_desc->id,
  464. frag_desc_paddr_lo);
  465. #endif /* HELIUMPLUS_DEBUG */
  466. #endif /* HELIUMPLUS_PADDR64 */
  467. htt_tx_desc_frags_table_set(pdev->htt_pdev,
  468. tx_desc->htt_tx_desc, 0,
  469. frag_desc_paddr_lo, 1);
  470. mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
  471. /*
  472. * we already checked the value when the mgmt frame was
  473. * provided to the txrx layer.
  474. * no need to check it a 2nd time.
  475. */
  476. ota_ack_cb = pdev->tx_mgmt.callbacks[mgmt_type].ota_ack_cb;
  477. if (ota_ack_cb) {
  478. void *ctxt;
  479. ctxt = pdev->tx_mgmt.callbacks[mgmt_type].ctxt;
  480. ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
  481. }
  482. /* free the netbuf */
  483. cdf_nbuf_free(tx_desc->netbuf);
  484. } else {
  485. /* single regular frame */
  486. cdf_nbuf_set_next(tx_desc->netbuf, NULL);
  487. cdf_nbuf_tx_free(tx_desc->netbuf, had_error);
  488. }
  489. /* free the tx desc */
  490. ol_tx_desc_free(pdev, tx_desc);
  491. }
  492. #if defined(FEATURE_TSO)
  493. /**
  494. * htt_tso_alloc_segment() - function to allocate a TSO segment
  495. * element
  496. * @pdev: HTT pdev
  497. * @tso_seg: This is the output. The TSO segment element.
  498. *
  499. * Allocates a TSO segment element from the free list held in
  500. * the HTT pdev
  501. *
  502. * Return: none
  503. */
  504. struct cdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
  505. {
  506. struct cdf_tso_seg_elem_t *tso_seg = NULL;
  507. cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
  508. if (pdev->tso_seg_pool.freelist) {
  509. pdev->tso_seg_pool.num_free--;
  510. tso_seg = pdev->tso_seg_pool.freelist;
  511. pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
  512. }
  513. cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
  514. return tso_seg;
  515. }
  516. /**
  517. * ol_tso_free_segment() - function to free a TSO segment
  518. * element
  519. * @pdev: HTT pdev
  520. * @tso_seg: The TSO segment element to be freed
  521. *
  522. * Returns a TSO segment element to the free list held in the
  523. * HTT pdev
  524. *
  525. * Return: none
  526. */
  527. void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
  528. struct cdf_tso_seg_elem_t *tso_seg)
  529. {
  530. cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
  531. tso_seg->next = pdev->tso_seg_pool.freelist;
  532. pdev->tso_seg_pool.freelist = tso_seg;
  533. pdev->tso_seg_pool.num_free++;
  534. cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
  535. }
  536. #endif