dp_tx_desc.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef DP_TX_DESC_H
  19. #define DP_TX_DESC_H
  20. #include "dp_types.h"
  21. #include "dp_tx.h"
  22. #include "dp_internal.h"
  23. #ifdef TX_PER_PDEV_DESC_POOL
  24. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  25. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  26. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  27. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
  28. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  29. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  30. #else
  31. #ifdef TX_PER_VDEV_DESC_POOL
  32. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  33. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  34. #endif /* TX_PER_VDEV_DESC_POOL */
  35. #endif /* TX_PER_PDEV_DESC_POOL */
  36. /**
  37. * 21 bits cookie
  38. * 2 bits pool id 0 ~ 3,
  39. * 10 bits page id 0 ~ 1023
  40. * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
  41. */
  42. /* ???Ring ID needed??? */
  43. #define DP_TX_DESC_ID_POOL_MASK 0x018000
  44. #define DP_TX_DESC_ID_POOL_OS 15
  45. #define DP_TX_DESC_ID_PAGE_MASK 0x007FE0
  46. #define DP_TX_DESC_ID_PAGE_OS 5
  47. #define DP_TX_DESC_ID_OFFSET_MASK 0x00001F
  48. #define DP_TX_DESC_ID_OFFSET_OS 0
  49. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  50. #define TX_DESC_LOCK_CREATE(lock)
  51. #define TX_DESC_LOCK_DESTROY(lock)
  52. #define TX_DESC_LOCK_LOCK(lock)
  53. #define TX_DESC_LOCK_UNLOCK(lock)
  54. #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  55. #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock)
  56. #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
  57. #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock)
  58. #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock)
  59. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  60. #define MAX_POOL_BUFF_COUNT 10000
  61. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  62. uint16_t num_elem);
  63. QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  64. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  65. uint16_t num_elem);
  66. QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  67. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  68. uint16_t num_elem);
  69. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  70. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  71. uint16_t num_elem);
  72. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
  73. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  74. void dp_tx_flow_control_init(struct dp_soc *);
  75. void dp_tx_flow_control_deinit(struct dp_soc *);
  76. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
  77. tx_pause_callback pause_cb);
  78. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
  79. uint8_t vdev_id);
  80. void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
  81. uint8_t vdev_id);
  82. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
  83. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  84. uint8_t flow_pool_id, uint16_t flow_pool_size);
  85. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  86. uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
  87. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  88. uint8_t flow_type, uint8_t flow_pool_id);
  89. /**
  90. * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
  91. * @pool: flow pool
  92. *
  93. * Caller needs to take lock and do sanity checks.
  94. *
  95. * Return: tx descriptor
  96. */
  97. static inline
  98. struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
  99. {
  100. struct dp_tx_desc_s *tx_desc = pool->freelist;
  101. pool->freelist = pool->freelist->next;
  102. pool->avail_desc--;
  103. return tx_desc;
  104. }
  105. /**
  106. * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
  107. * @pool: flow pool
  108. * @tx_desc: tx descriptor
  109. *
  110. * Caller needs to take lock and do sanity checks.
  111. *
  112. * Return: none
  113. */
  114. static inline
  115. void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
  116. struct dp_tx_desc_s *tx_desc)
  117. {
  118. tx_desc->next = pool->freelist;
  119. pool->freelist = tx_desc;
  120. pool->avail_desc++;
  121. }
  122. /**
  123. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  124. *
  125. * @soc Handle to DP SoC structure
  126. * @pool_id
  127. *
  128. * Return:
  129. */
  130. static inline struct dp_tx_desc_s *
  131. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  132. {
  133. struct dp_tx_desc_s *tx_desc = NULL;
  134. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  135. if (pool) {
  136. qdf_spin_lock_bh(&pool->flow_pool_lock);
  137. if (pool->avail_desc) {
  138. tx_desc = dp_tx_get_desc_flow_pool(pool);
  139. tx_desc->pool_id = desc_pool_id;
  140. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  141. if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
  142. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  143. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  144. /* pause network queues */
  145. soc->pause_cb(desc_pool_id,
  146. WLAN_STOP_ALL_NETIF_QUEUE,
  147. WLAN_DATA_FLOW_CONTROL);
  148. } else {
  149. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  150. }
  151. } else {
  152. pool->pkt_drop_no_desc++;
  153. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  154. }
  155. } else {
  156. soc->pool_stats.pkt_drop_no_pool++;
  157. }
  158. return tx_desc;
  159. }
  160. /**
  161. * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
  162. *
  163. * @soc Handle to DP SoC structure
  164. * @pool_id
  165. * @tx_desc
  166. *
  167. * Return: None
  168. */
  169. static inline void
  170. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  171. uint8_t desc_pool_id)
  172. {
  173. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  174. qdf_spin_lock_bh(&pool->flow_pool_lock);
  175. tx_desc->flags = 0;
  176. dp_tx_put_desc_flow_pool(pool, tx_desc);
  177. switch (pool->status) {
  178. case FLOW_POOL_ACTIVE_PAUSED:
  179. if (pool->avail_desc > pool->start_th) {
  180. soc->pause_cb(pool->flow_pool_id,
  181. WLAN_WAKE_ALL_NETIF_QUEUE,
  182. WLAN_DATA_FLOW_CONTROL);
  183. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  184. }
  185. break;
  186. case FLOW_POOL_INVALID:
  187. if (pool->avail_desc == pool->pool_size) {
  188. dp_tx_desc_pool_free(soc, desc_pool_id);
  189. pool->status = FLOW_POOL_INACTIVE;
  190. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  191. qdf_print("%s %d pool is freed!!\n",
  192. __func__, __LINE__);
  193. return;
  194. }
  195. break;
  196. case FLOW_POOL_ACTIVE_UNPAUSED:
  197. break;
  198. default:
  199. qdf_print("%s %d pool is INACTIVE State!!\n",
  200. __func__, __LINE__);
  201. break;
  202. };
  203. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  204. }
  205. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  206. static inline void dp_tx_flow_control_init(struct dp_soc *handle)
  207. {
  208. }
  209. static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
  210. {
  211. }
  212. static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
  213. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
  214. uint16_t flow_pool_size)
  215. {
  216. return QDF_STATUS_SUCCESS;
  217. }
  218. static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
  219. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
  220. {
  221. }
  222. /**
  223. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  224. *
  225. * @param soc Handle to DP SoC structure
  226. * @param pool_id
  227. *
  228. * Return:
  229. */
  230. static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
  231. uint8_t desc_pool_id)
  232. {
  233. struct dp_tx_desc_s *tx_desc = NULL;
  234. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  235. tx_desc = soc->tx_desc[desc_pool_id].freelist;
  236. /* Pool is exhausted */
  237. if (!tx_desc) {
  238. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  239. return NULL;
  240. }
  241. soc->tx_desc[desc_pool_id].freelist =
  242. soc->tx_desc[desc_pool_id].freelist->next;
  243. soc->tx_desc[desc_pool_id].num_allocated++;
  244. soc->tx_desc[desc_pool_id].num_free--;
  245. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  246. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  247. return tx_desc;
  248. }
  249. /**
  250. * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
  251. * from given pool
  252. * @soc: Handle to DP SoC structure
  253. * @pool_id: pool id should pick up
  254. * @num_requested: number of required descriptor
  255. *
  256. * allocate multiple tx descriptor and make a link
  257. *
  258. * Return: h_desc first descriptor pointer
  259. */
  260. static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
  261. struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
  262. {
  263. struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
  264. uint8_t count;
  265. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  266. if ((num_requested == 0) ||
  267. (soc->tx_desc[desc_pool_id].num_free < num_requested)) {
  268. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  269. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  270. "%s, No Free Desc: Available(%d) num_requested(%d)",
  271. __func__, soc->tx_desc[desc_pool_id].num_free,
  272. num_requested);
  273. return NULL;
  274. }
  275. h_desc = soc->tx_desc[desc_pool_id].freelist;
  276. /* h_desc should never be NULL since num_free > requested */
  277. qdf_assert_always(h_desc);
  278. c_desc = h_desc;
  279. for (count = 0; count < (num_requested - 1); count++) {
  280. c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  281. c_desc = c_desc->next;
  282. }
  283. soc->tx_desc[desc_pool_id].num_free -= count;
  284. soc->tx_desc[desc_pool_id].num_allocated += count;
  285. soc->tx_desc[desc_pool_id].freelist = c_desc->next;
  286. c_desc->next = NULL;
  287. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  288. return h_desc;
  289. }
  290. /**
  291. * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
  292. *
  293. * @soc Handle to DP SoC structure
  294. * @pool_id
  295. * @tx_desc
  296. */
  297. static inline void
  298. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  299. uint8_t desc_pool_id)
  300. {
  301. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  302. tx_desc->flags = 0;
  303. tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
  304. soc->tx_desc[desc_pool_id].freelist = tx_desc;
  305. soc->tx_desc[desc_pool_id].num_allocated--;
  306. soc->tx_desc[desc_pool_id].num_free++;
  307. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  308. }
  309. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  310. /**
  311. * dp_tx_desc_find() - find dp tx descriptor from cokie
  312. * @soc - handle for the device sending the data
  313. * @tx_desc_id - the ID of the descriptor in question
  314. * @return the descriptor object that has the specified ID
  315. *
  316. * Use a tx descriptor ID to find the corresponding descriptor object.
  317. *
  318. */
  319. static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
  320. uint8_t pool_id, uint16_t page_id, uint16_t offset)
  321. {
  322. struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  323. return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
  324. tx_desc_pool->elem_size * offset;
  325. }
  326. /**
  327. * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
  328. * @soc: handle for the device sending the data
  329. * @pool_id: target pool id
  330. *
  331. * Return: None
  332. */
  333. static inline
  334. struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
  335. uint8_t desc_pool_id)
  336. {
  337. struct dp_tx_ext_desc_elem_s *c_elem;
  338. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  339. if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
  340. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  341. return NULL;
  342. }
  343. c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
  344. soc->tx_ext_desc[desc_pool_id].freelist =
  345. soc->tx_ext_desc[desc_pool_id].freelist->next;
  346. soc->tx_ext_desc[desc_pool_id].num_free--;
  347. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  348. return c_elem;
  349. }
  350. /**
  351. * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
  352. * @soc: handle for the device sending the data
  353. * @pool_id: target pool id
  354. * @elem: ext descriptor pointer should release
  355. *
  356. * Return: None
  357. */
  358. static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
  359. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
  360. {
  361. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  362. elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
  363. soc->tx_ext_desc[desc_pool_id].freelist = elem;
  364. soc->tx_ext_desc[desc_pool_id].num_free++;
  365. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  366. return;
  367. }
  368. /**
  369. * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
  370. * attach it to free list
  371. * @soc: Handle to DP SoC structure
  372. * @desc_pool_id: pool id should pick up
  373. * @elem: tx descriptor should be freed
  374. * @num_free: number of descriptors should be freed
  375. *
  376. * Return: none
  377. */
  378. static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
  379. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
  380. uint8_t num_free)
  381. {
  382. struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
  383. uint8_t freed = num_free;
  384. /* caller should always guarantee atleast list of num_free nodes */
  385. qdf_assert_always(head);
  386. head = elem;
  387. c_elem = head;
  388. tail = head;
  389. while (c_elem && freed) {
  390. tail = c_elem;
  391. c_elem = c_elem->next;
  392. freed--;
  393. }
  394. /* caller should always guarantee atleast list of num_free nodes */
  395. qdf_assert_always(tail);
  396. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  397. tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
  398. soc->tx_ext_desc[desc_pool_id].freelist = head;
  399. soc->tx_ext_desc[desc_pool_id].num_free += num_free;
  400. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  401. return;
  402. }
  403. #if defined(FEATURE_TSO)
  404. /**
  405. * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
  406. * @soc: device soc instance
  407. * @pool_id: pool id should pick up tso descriptor
  408. *
  409. * Allocates a TSO segment element from the free list held in
  410. * the soc
  411. *
  412. * Return: tso_seg, tso segment memory pointer
  413. */
  414. static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
  415. struct dp_soc *soc, uint8_t pool_id)
  416. {
  417. struct qdf_tso_seg_elem_t *tso_seg = NULL;
  418. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  419. if (soc->tx_tso_desc[pool_id].freelist) {
  420. soc->tx_tso_desc[pool_id].num_free--;
  421. tso_seg = soc->tx_tso_desc[pool_id].freelist;
  422. soc->tx_tso_desc[pool_id].freelist =
  423. soc->tx_tso_desc[pool_id].freelist->next;
  424. }
  425. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  426. return tso_seg;
  427. }
  428. /**
  429. * dp_tx_tso_desc_free() - function to free a TSO segment
  430. * @soc: device soc instance
  431. * @pool_id: pool id should pick up tso descriptor
  432. * @tso_seg: tso segment memory pointer
  433. *
  434. * Returns a TSO segment element to the free list held in the
  435. * HTT pdev
  436. *
  437. * Return: none
  438. */
  439. static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
  440. uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
  441. {
  442. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  443. tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
  444. soc->tx_tso_desc[pool_id].freelist = tso_seg;
  445. soc->tx_tso_desc[pool_id].num_free++;
  446. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  447. }
  448. static inline
  449. struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc,
  450. uint8_t pool_id)
  451. {
  452. struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
  453. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  454. if (soc->tx_tso_num_seg[pool_id].freelist) {
  455. soc->tx_tso_num_seg[pool_id].num_free--;
  456. tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
  457. soc->tx_tso_num_seg[pool_id].freelist =
  458. soc->tx_tso_num_seg[pool_id].freelist->next;
  459. }
  460. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  461. return tso_num_seg;
  462. }
  463. static inline
  464. void dp_tso_num_seg_free(struct dp_soc *soc,
  465. uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
  466. {
  467. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  468. tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
  469. soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
  470. soc->tx_tso_num_seg[pool_id].num_free++;
  471. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  472. }
  473. #endif
  474. /*
  475. * dp_tx_me_alloc_buf() Alloc descriptor from me pool
  476. * @pdev DP_PDEV handle for datapath
  477. *
  478. * Return:dp_tx_me_buf_t(buf)
  479. */
  480. static inline struct dp_tx_me_buf_t*
  481. dp_tx_me_alloc_buf(struct dp_pdev *pdev)
  482. {
  483. struct dp_tx_me_buf_t *buf = NULL;
  484. qdf_spin_lock_bh(&pdev->tx_mutex);
  485. if (pdev->me_buf.freelist) {
  486. buf = pdev->me_buf.freelist;
  487. pdev->me_buf.freelist = pdev->me_buf.freelist->next;
  488. pdev->me_buf.buf_in_use++;
  489. } else {
  490. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  491. "Error allocating memory in pool");
  492. qdf_spin_unlock_bh(&pdev->tx_mutex);
  493. return NULL;
  494. }
  495. qdf_spin_unlock_bh(&pdev->tx_mutex);
  496. return buf;
  497. }
  498. /*
  499. * dp_tx_me_free_buf() - Free me descriptor and add it to pool
  500. * @pdev: DP_PDEV handle for datapath
  501. * @buf : Allocated ME BUF
  502. *
  503. * Return:void
  504. */
  505. static inline void
  506. dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
  507. {
  508. qdf_spin_lock_bh(&pdev->tx_mutex);
  509. buf->next = pdev->me_buf.freelist;
  510. pdev->me_buf.freelist = buf;
  511. pdev->me_buf.buf_in_use--;
  512. qdf_spin_unlock_bh(&pdev->tx_mutex);
  513. }
  514. #endif /* DP_TX_DESC_H */