dp_tx_desc.h 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef DP_TX_DESC_H
  20. #define DP_TX_DESC_H
  21. #include "dp_types.h"
  22. #include "dp_tx.h"
  23. #include "dp_internal.h"
  24. /**
  25. * 21 bits cookie
  26. * 2 bits pool id 0 ~ 3,
  27. * 10 bits page id 0 ~ 1023
  28. * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
  29. */
  30. /* ???Ring ID needed??? */
  31. #define DP_TX_DESC_ID_POOL_MASK 0x018000
  32. #define DP_TX_DESC_ID_POOL_OS 15
  33. #define DP_TX_DESC_ID_PAGE_MASK 0x007FE0
  34. #define DP_TX_DESC_ID_PAGE_OS 5
  35. #define DP_TX_DESC_ID_OFFSET_MASK 0x00001F
  36. #define DP_TX_DESC_ID_OFFSET_OS 0
  37. /**
  38. * Compilation assert on tx desc size
  39. *
  40. * if assert is hit please update POOL_MASK,
  41. * PAGE_MASK according to updated size
  42. *
  43. * for current PAGE mask allowed size range of tx_desc
  44. * is between 128 and 256
  45. */
  46. QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
  47. ((sizeof(struct dp_tx_desc_s)) <=
  48. (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
  49. ((sizeof(struct dp_tx_desc_s)) >
  50. (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
  51. );
  52. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  53. #define TX_DESC_LOCK_CREATE(lock)
  54. #define TX_DESC_LOCK_DESTROY(lock)
  55. #define TX_DESC_LOCK_LOCK(lock)
  56. #define TX_DESC_LOCK_UNLOCK(lock)
  57. #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
  58. ((pool)->status == FLOW_POOL_INACTIVE)
  59. #ifdef QCA_AC_BASED_FLOW_CONTROL
  60. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  61. dp_tx_flow_pool_member_clean(_tx_desc_pool)
  62. #else /* !QCA_AC_BASED_FLOW_CONTROL */
  63. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  64. do { \
  65. (_tx_desc_pool)->elem_size = 0; \
  66. (_tx_desc_pool)->freelist = NULL; \
  67. (_tx_desc_pool)->pool_size = 0; \
  68. (_tx_desc_pool)->avail_desc = 0; \
  69. (_tx_desc_pool)->start_th = 0; \
  70. (_tx_desc_pool)->stop_th = 0; \
  71. (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \
  72. } while (0)
  73. #endif /* QCA_AC_BASED_FLOW_CONTROL */
  74. #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  75. #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock)
  76. #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
  77. #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock)
  78. #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock)
  79. #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
  80. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  81. do { \
  82. (_tx_desc_pool)->elem_size = 0; \
  83. (_tx_desc_pool)->num_allocated = 0; \
  84. (_tx_desc_pool)->freelist = NULL; \
  85. (_tx_desc_pool)->elem_count = 0; \
  86. (_tx_desc_pool)->num_free = 0; \
  87. } while (0)
  88. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  89. #define MAX_POOL_BUFF_COUNT 10000
  90. #ifdef DP_TX_TRACKING
  91. static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
  92. uint32_t magic_pattern)
  93. {
  94. tx_desc->magic = magic_pattern;
  95. }
  96. #else
  97. static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
  98. uint32_t magic_pattern)
  99. {
  100. }
  101. #endif
  102. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  103. uint32_t num_elem);
  104. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  105. uint32_t num_elem);
  106. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  107. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  108. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  109. uint32_t num_elem);
  110. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  111. uint32_t num_elem);
  112. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  113. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  114. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  115. uint32_t num_elem);
  116. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  117. uint32_t num_elem);
  118. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  119. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  120. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  121. uint32_t num_elem);
  122. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
  123. uint32_t num_elem);
  124. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
  125. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  126. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  127. void dp_tx_flow_control_init(struct dp_soc *);
  128. void dp_tx_flow_control_deinit(struct dp_soc *);
  129. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
  130. tx_pause_callback pause_cb);
  131. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
  132. uint8_t vdev_id);
  133. void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
  134. uint8_t vdev_id);
  135. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
  136. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  137. uint8_t flow_pool_id, uint32_t flow_pool_size);
  138. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  139. uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
  140. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  141. uint8_t flow_type, uint8_t flow_pool_id);
  142. /**
  143. * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
  144. * @pool: flow pool
  145. *
  146. * Caller needs to take lock and do sanity checks.
  147. *
  148. * Return: tx descriptor
  149. */
  150. static inline
  151. struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
  152. {
  153. struct dp_tx_desc_s *tx_desc = pool->freelist;
  154. pool->freelist = pool->freelist->next;
  155. pool->avail_desc--;
  156. return tx_desc;
  157. }
  158. /**
  159. * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
  160. * @pool: flow pool
  161. * @tx_desc: tx descriptor
  162. *
  163. * Caller needs to take lock and do sanity checks.
  164. *
  165. * Return: none
  166. */
  167. static inline
  168. void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
  169. struct dp_tx_desc_s *tx_desc)
  170. {
  171. tx_desc->next = pool->freelist;
  172. pool->freelist = tx_desc;
  173. pool->avail_desc++;
  174. }
  175. #ifdef QCA_AC_BASED_FLOW_CONTROL
  176. /**
  177. * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
  178. *
  179. * @pool: flow pool
  180. *
  181. * Return: None
  182. */
  183. static inline void
  184. dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
  185. {
  186. pool->elem_size = 0;
  187. pool->freelist = NULL;
  188. pool->pool_size = 0;
  189. pool->avail_desc = 0;
  190. qdf_mem_zero(pool->start_th, FL_TH_MAX);
  191. qdf_mem_zero(pool->stop_th, FL_TH_MAX);
  192. pool->status = FLOW_POOL_INACTIVE;
  193. }
  194. /**
  195. * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
  196. *
  197. * @pool: flow pool
  198. * @avail_desc: available descriptor number
  199. *
  200. * Return: true if threshold is met, false if not
  201. */
  202. static inline bool
  203. dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
  204. {
  205. if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
  206. return true;
  207. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
  208. return true;
  209. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
  210. return true;
  211. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
  212. return true;
  213. else
  214. return false;
  215. }
  216. /**
  217. * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
  218. *
  219. * @soc: dp soc
  220. * @pool: flow pool
  221. */
  222. static inline void
  223. dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
  224. struct dp_tx_desc_pool_s *pool)
  225. {
  226. if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
  227. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  228. return;
  229. } else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
  230. pool->avail_desc > pool->stop_th[DP_TH_VI]) {
  231. pool->status = FLOW_POOL_BE_BK_PAUSED;
  232. } else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
  233. pool->avail_desc > pool->stop_th[DP_TH_VO]) {
  234. pool->status = FLOW_POOL_VI_PAUSED;
  235. } else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
  236. pool->avail_desc > pool->stop_th[DP_TH_HI]) {
  237. pool->status = FLOW_POOL_VO_PAUSED;
  238. } else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
  239. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  240. }
  241. switch (pool->status) {
  242. case FLOW_POOL_ACTIVE_PAUSED:
  243. soc->pause_cb(pool->flow_pool_id,
  244. WLAN_NETIF_PRIORITY_QUEUE_OFF,
  245. WLAN_DATA_FLOW_CTRL_PRI);
  246. fallthrough;
  247. case FLOW_POOL_VO_PAUSED:
  248. soc->pause_cb(pool->flow_pool_id,
  249. WLAN_NETIF_VO_QUEUE_OFF,
  250. WLAN_DATA_FLOW_CTRL_VO);
  251. fallthrough;
  252. case FLOW_POOL_VI_PAUSED:
  253. soc->pause_cb(pool->flow_pool_id,
  254. WLAN_NETIF_VI_QUEUE_OFF,
  255. WLAN_DATA_FLOW_CTRL_VI);
  256. fallthrough;
  257. case FLOW_POOL_BE_BK_PAUSED:
  258. soc->pause_cb(pool->flow_pool_id,
  259. WLAN_NETIF_BE_BK_QUEUE_OFF,
  260. WLAN_DATA_FLOW_CTRL_BE_BK);
  261. break;
  262. default:
  263. dp_err("Invalid pool staus:%u to adjust", pool->status);
  264. }
  265. }
  266. /**
  267. * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
  268. *
  269. * @soc: Handle to DP SoC structure
  270. * @desc_pool_id: ID of the flow control fool
  271. *
  272. * Return: TX descriptor allocated or NULL
  273. */
  274. static inline struct dp_tx_desc_s *
  275. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  276. {
  277. struct dp_tx_desc_s *tx_desc = NULL;
  278. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  279. bool is_pause = false;
  280. enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
  281. enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
  282. enum netif_reason_type reason;
  283. if (qdf_likely(pool)) {
  284. qdf_spin_lock_bh(&pool->flow_pool_lock);
  285. if (qdf_likely(pool->avail_desc &&
  286. pool->status != FLOW_POOL_INVALID &&
  287. pool->status != FLOW_POOL_INACTIVE)) {
  288. tx_desc = dp_tx_get_desc_flow_pool(pool);
  289. tx_desc->pool_id = desc_pool_id;
  290. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  291. dp_tx_desc_set_magic(tx_desc,
  292. DP_TX_MAGIC_PATTERN_INUSE);
  293. is_pause = dp_tx_is_threshold_reached(pool,
  294. pool->avail_desc);
  295. if (qdf_unlikely(pool->status ==
  296. FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
  297. dp_tx_adjust_flow_pool_state(soc, pool);
  298. is_pause = false;
  299. }
  300. if (qdf_unlikely(is_pause)) {
  301. switch (pool->status) {
  302. case FLOW_POOL_ACTIVE_UNPAUSED:
  303. /* pause network BE\BK queue */
  304. act = WLAN_NETIF_BE_BK_QUEUE_OFF;
  305. reason = WLAN_DATA_FLOW_CTRL_BE_BK;
  306. level = DP_TH_BE_BK;
  307. pool->status = FLOW_POOL_BE_BK_PAUSED;
  308. break;
  309. case FLOW_POOL_BE_BK_PAUSED:
  310. /* pause network VI queue */
  311. act = WLAN_NETIF_VI_QUEUE_OFF;
  312. reason = WLAN_DATA_FLOW_CTRL_VI;
  313. level = DP_TH_VI;
  314. pool->status = FLOW_POOL_VI_PAUSED;
  315. break;
  316. case FLOW_POOL_VI_PAUSED:
  317. /* pause network VO queue */
  318. act = WLAN_NETIF_VO_QUEUE_OFF;
  319. reason = WLAN_DATA_FLOW_CTRL_VO;
  320. level = DP_TH_VO;
  321. pool->status = FLOW_POOL_VO_PAUSED;
  322. break;
  323. case FLOW_POOL_VO_PAUSED:
  324. /* pause network HI PRI queue */
  325. act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
  326. reason = WLAN_DATA_FLOW_CTRL_PRI;
  327. level = DP_TH_HI;
  328. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  329. break;
  330. case FLOW_POOL_ACTIVE_PAUSED:
  331. act = WLAN_NETIF_ACTION_TYPE_NONE;
  332. break;
  333. default:
  334. dp_err_rl("pool status is %d!",
  335. pool->status);
  336. break;
  337. }
  338. if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
  339. pool->latest_pause_time[level] =
  340. qdf_get_system_timestamp();
  341. soc->pause_cb(desc_pool_id,
  342. act,
  343. reason);
  344. }
  345. }
  346. } else {
  347. pool->pkt_drop_no_desc++;
  348. }
  349. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  350. } else {
  351. soc->pool_stats.pkt_drop_no_pool++;
  352. }
  353. return tx_desc;
  354. }
  355. /**
  356. * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
  357. *
  358. * @soc: Handle to DP SoC structure
  359. * @tx_desc: the tx descriptor to be freed
  360. * @desc_pool_id: ID of the flow control fool
  361. *
  362. * Return: None
  363. */
  364. static inline void
  365. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  366. uint8_t desc_pool_id)
  367. {
  368. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  369. qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
  370. enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
  371. enum netif_reason_type reason;
  372. qdf_spin_lock_bh(&pool->flow_pool_lock);
  373. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  374. tx_desc->nbuf = NULL;
  375. tx_desc->flags = 0;
  376. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  377. dp_tx_put_desc_flow_pool(pool, tx_desc);
  378. switch (pool->status) {
  379. case FLOW_POOL_ACTIVE_PAUSED:
  380. if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
  381. act = WLAN_NETIF_PRIORITY_QUEUE_ON;
  382. reason = WLAN_DATA_FLOW_CTRL_PRI;
  383. pool->status = FLOW_POOL_VO_PAUSED;
  384. /* Update maxinum pause duration for HI queue */
  385. pause_dur = unpause_time -
  386. pool->latest_pause_time[DP_TH_HI];
  387. if (pool->max_pause_time[DP_TH_HI] < pause_dur)
  388. pool->max_pause_time[DP_TH_HI] = pause_dur;
  389. }
  390. break;
  391. case FLOW_POOL_VO_PAUSED:
  392. if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
  393. act = WLAN_NETIF_VO_QUEUE_ON;
  394. reason = WLAN_DATA_FLOW_CTRL_VO;
  395. pool->status = FLOW_POOL_VI_PAUSED;
  396. /* Update maxinum pause duration for VO queue */
  397. pause_dur = unpause_time -
  398. pool->latest_pause_time[DP_TH_VO];
  399. if (pool->max_pause_time[DP_TH_VO] < pause_dur)
  400. pool->max_pause_time[DP_TH_VO] = pause_dur;
  401. }
  402. break;
  403. case FLOW_POOL_VI_PAUSED:
  404. if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
  405. act = WLAN_NETIF_VI_QUEUE_ON;
  406. reason = WLAN_DATA_FLOW_CTRL_VI;
  407. pool->status = FLOW_POOL_BE_BK_PAUSED;
  408. /* Update maxinum pause duration for VI queue */
  409. pause_dur = unpause_time -
  410. pool->latest_pause_time[DP_TH_VI];
  411. if (pool->max_pause_time[DP_TH_VI] < pause_dur)
  412. pool->max_pause_time[DP_TH_VI] = pause_dur;
  413. }
  414. break;
  415. case FLOW_POOL_BE_BK_PAUSED:
  416. if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
  417. act = WLAN_NETIF_BE_BK_QUEUE_ON;
  418. reason = WLAN_DATA_FLOW_CTRL_BE_BK;
  419. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  420. /* Update maxinum pause duration for BE_BK queue */
  421. pause_dur = unpause_time -
  422. pool->latest_pause_time[DP_TH_BE_BK];
  423. if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
  424. pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
  425. }
  426. break;
  427. case FLOW_POOL_INVALID:
  428. if (pool->avail_desc == pool->pool_size) {
  429. dp_tx_desc_pool_deinit(soc, desc_pool_id);
  430. dp_tx_desc_pool_free(soc, desc_pool_id);
  431. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  432. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  433. "%s %d pool is freed!!",
  434. __func__, __LINE__);
  435. return;
  436. }
  437. break;
  438. case FLOW_POOL_ACTIVE_UNPAUSED:
  439. break;
  440. default:
  441. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  442. "%s %d pool is INACTIVE State!!",
  443. __func__, __LINE__);
  444. break;
  445. };
  446. if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
  447. soc->pause_cb(pool->flow_pool_id,
  448. act, reason);
  449. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  450. }
  451. #else /* QCA_AC_BASED_FLOW_CONTROL */
  452. static inline bool
  453. dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
  454. {
  455. if (qdf_unlikely(avail_desc < pool->stop_th))
  456. return true;
  457. else
  458. return false;
  459. }
  460. /**
  461. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  462. *
  463. * @soc Handle to DP SoC structure
  464. * @pool_id
  465. *
  466. * Return:
  467. */
  468. static inline struct dp_tx_desc_s *
  469. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  470. {
  471. struct dp_tx_desc_s *tx_desc = NULL;
  472. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  473. if (pool) {
  474. qdf_spin_lock_bh(&pool->flow_pool_lock);
  475. if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
  476. pool->avail_desc) {
  477. tx_desc = dp_tx_get_desc_flow_pool(pool);
  478. tx_desc->pool_id = desc_pool_id;
  479. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  480. dp_tx_desc_set_magic(tx_desc,
  481. DP_TX_MAGIC_PATTERN_INUSE);
  482. if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
  483. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  484. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  485. /* pause network queues */
  486. soc->pause_cb(desc_pool_id,
  487. WLAN_STOP_ALL_NETIF_QUEUE,
  488. WLAN_DATA_FLOW_CONTROL);
  489. } else {
  490. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  491. }
  492. } else {
  493. pool->pkt_drop_no_desc++;
  494. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  495. }
  496. } else {
  497. soc->pool_stats.pkt_drop_no_pool++;
  498. }
  499. return tx_desc;
  500. }
  501. /**
  502. * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
  503. *
  504. * @soc Handle to DP SoC structure
  505. * @pool_id
  506. * @tx_desc
  507. *
  508. * Return: None
  509. */
  510. static inline void
  511. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  512. uint8_t desc_pool_id)
  513. {
  514. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  515. qdf_spin_lock_bh(&pool->flow_pool_lock);
  516. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  517. tx_desc->nbuf = NULL;
  518. tx_desc->flags = 0;
  519. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  520. dp_tx_put_desc_flow_pool(pool, tx_desc);
  521. switch (pool->status) {
  522. case FLOW_POOL_ACTIVE_PAUSED:
  523. if (pool->avail_desc > pool->start_th) {
  524. soc->pause_cb(pool->flow_pool_id,
  525. WLAN_WAKE_ALL_NETIF_QUEUE,
  526. WLAN_DATA_FLOW_CONTROL);
  527. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  528. }
  529. break;
  530. case FLOW_POOL_INVALID:
  531. if (pool->avail_desc == pool->pool_size) {
  532. dp_tx_desc_pool_deinit(soc, desc_pool_id);
  533. dp_tx_desc_pool_free(soc, desc_pool_id);
  534. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  535. qdf_print("%s %d pool is freed!!",
  536. __func__, __LINE__);
  537. return;
  538. }
  539. break;
  540. case FLOW_POOL_ACTIVE_UNPAUSED:
  541. break;
  542. default:
  543. qdf_print("%s %d pool is INACTIVE State!!",
  544. __func__, __LINE__);
  545. break;
  546. };
  547. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  548. }
  549. #endif /* QCA_AC_BASED_FLOW_CONTROL */
  550. static inline bool
  551. dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  552. {
  553. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  554. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  555. DP_MOD_ID_CDP);
  556. struct dp_tx_desc_pool_s *pool;
  557. bool status;
  558. if (!vdev)
  559. return false;
  560. pool = vdev->pool;
  561. status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
  562. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  563. return status;
  564. }
  565. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  566. static inline void dp_tx_flow_control_init(struct dp_soc *handle)
  567. {
  568. }
  569. static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
  570. {
  571. }
  572. static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
  573. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
  574. uint32_t flow_pool_size)
  575. {
  576. return QDF_STATUS_SUCCESS;
  577. }
  578. static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
  579. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
  580. {
  581. }
  582. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  583. static inline
  584. void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
  585. {
  586. if (tx_desc)
  587. prefetch(tx_desc);
  588. }
  589. #else
  590. static inline
  591. void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
  592. {
  593. }
  594. #endif
  595. /**
  596. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  597. *
  598. * @param soc Handle to DP SoC structure
  599. * @param pool_id
  600. *
  601. * Return:
  602. */
  603. static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
  604. uint8_t desc_pool_id)
  605. {
  606. struct dp_tx_desc_s *tx_desc = NULL;
  607. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  608. TX_DESC_LOCK_LOCK(&pool->lock);
  609. tx_desc = pool->freelist;
  610. /* Pool is exhausted */
  611. if (!tx_desc) {
  612. TX_DESC_LOCK_UNLOCK(&pool->lock);
  613. return NULL;
  614. }
  615. pool->freelist = pool->freelist->next;
  616. pool->num_allocated++;
  617. pool->num_free--;
  618. dp_tx_prefetch_desc(pool->freelist);
  619. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  620. TX_DESC_LOCK_UNLOCK(&pool->lock);
  621. return tx_desc;
  622. }
  623. /**
  624. * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
  625. * from given pool
  626. * @soc: Handle to DP SoC structure
  627. * @pool_id: pool id should pick up
  628. * @num_requested: number of required descriptor
  629. *
  630. * allocate multiple tx descriptor and make a link
  631. *
  632. * Return: h_desc first descriptor pointer
  633. */
  634. static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
  635. struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
  636. {
  637. struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
  638. uint8_t count;
  639. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  640. TX_DESC_LOCK_LOCK(&pool->lock);
  641. if ((num_requested == 0) ||
  642. (pool->num_free < num_requested)) {
  643. TX_DESC_LOCK_UNLOCK(&pool->lock);
  644. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  645. "%s, No Free Desc: Available(%d) num_requested(%d)",
  646. __func__, pool->num_free,
  647. num_requested);
  648. return NULL;
  649. }
  650. h_desc = pool->freelist;
  651. /* h_desc should never be NULL since num_free > requested */
  652. qdf_assert_always(h_desc);
  653. c_desc = h_desc;
  654. for (count = 0; count < (num_requested - 1); count++) {
  655. c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  656. c_desc = c_desc->next;
  657. }
  658. pool->num_free -= count;
  659. pool->num_allocated += count;
  660. pool->freelist = c_desc->next;
  661. c_desc->next = NULL;
  662. TX_DESC_LOCK_UNLOCK(&pool->lock);
  663. return h_desc;
  664. }
  665. /**
  666. * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
  667. *
  668. * @soc Handle to DP SoC structure
  669. * @pool_id
  670. * @tx_desc
  671. */
  672. static inline void
  673. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  674. uint8_t desc_pool_id)
  675. {
  676. struct dp_tx_desc_pool_s *pool = NULL;
  677. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  678. tx_desc->nbuf = NULL;
  679. tx_desc->flags = 0;
  680. pool = &soc->tx_desc[desc_pool_id];
  681. TX_DESC_LOCK_LOCK(&pool->lock);
  682. tx_desc->next = pool->freelist;
  683. pool->freelist = tx_desc;
  684. pool->num_allocated--;
  685. pool->num_free++;
  686. TX_DESC_LOCK_UNLOCK(&pool->lock);
  687. }
  688. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  689. #ifdef QCA_DP_TX_DESC_ID_CHECK
  690. /**
  691. * dp_tx_is_desc_id_valid() - check is the tx desc id valid
  692. *
  693. * @soc Handle to DP SoC structure
  694. * @tx_desc_id
  695. *
  696. * Return: true or false
  697. */
  698. static inline bool
  699. dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
  700. {
  701. uint8_t pool_id;
  702. uint16_t page_id, offset;
  703. struct dp_tx_desc_pool_s *pool;
  704. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  705. DP_TX_DESC_ID_POOL_OS;
  706. /* Pool ID is out of limit */
  707. if (pool_id > wlan_cfg_get_num_tx_desc_pool(
  708. soc->wlan_cfg_ctx)) {
  709. QDF_TRACE(QDF_MODULE_ID_DP,
  710. QDF_TRACE_LEVEL_FATAL,
  711. "%s:Tx Comp pool id %d not valid",
  712. __func__,
  713. pool_id);
  714. goto warn_exit;
  715. }
  716. pool = &soc->tx_desc[pool_id];
  717. /* the pool is freed */
  718. if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
  719. QDF_TRACE(QDF_MODULE_ID_DP,
  720. QDF_TRACE_LEVEL_FATAL,
  721. "%s:the pool %d has been freed",
  722. __func__,
  723. pool_id);
  724. goto warn_exit;
  725. }
  726. page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  727. DP_TX_DESC_ID_PAGE_OS;
  728. /* the page id is out of limit */
  729. if (page_id >= pool->desc_pages.num_pages) {
  730. QDF_TRACE(QDF_MODULE_ID_DP,
  731. QDF_TRACE_LEVEL_FATAL,
  732. "%s:the page id %d invalid, pool id %d, num_page %d",
  733. __func__,
  734. page_id,
  735. pool_id,
  736. pool->desc_pages.num_pages);
  737. goto warn_exit;
  738. }
  739. offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  740. DP_TX_DESC_ID_OFFSET_OS;
  741. /* the offset is out of limit */
  742. if (offset >= pool->desc_pages.num_element_per_page) {
  743. QDF_TRACE(QDF_MODULE_ID_DP,
  744. QDF_TRACE_LEVEL_FATAL,
  745. "%s:offset %d invalid, pool%d,num_elem_per_page %d",
  746. __func__,
  747. offset,
  748. pool_id,
  749. pool->desc_pages.num_element_per_page);
  750. goto warn_exit;
  751. }
  752. return true;
  753. warn_exit:
  754. QDF_TRACE(QDF_MODULE_ID_DP,
  755. QDF_TRACE_LEVEL_FATAL,
  756. "%s:Tx desc id 0x%x not valid",
  757. __func__,
  758. tx_desc_id);
  759. qdf_assert_always(0);
  760. return false;
  761. }
  762. #else
  763. static inline bool
  764. dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
  765. {
  766. return true;
  767. }
  768. #endif /* QCA_DP_TX_DESC_ID_CHECK */
  769. #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
  770. static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
  771. struct dp_tx_desc_s *desc,
  772. uint8_t allow_fast_comp)
  773. {
  774. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
  775. qdf_likely(allow_fast_comp)) {
  776. desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
  777. }
  778. }
  779. #else
  780. static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
  781. struct dp_tx_desc_s *desc,
  782. uint8_t allow_fast_comp)
  783. {
  784. }
  785. #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
  786. /**
  787. * dp_tx_desc_find() - find dp tx descriptor from cokie
  788. * @soc - handle for the device sending the data
  789. * @tx_desc_id - the ID of the descriptor in question
  790. * @return the descriptor object that has the specified ID
  791. *
  792. * Use a tx descriptor ID to find the corresponding descriptor object.
  793. *
  794. */
  795. static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
  796. uint8_t pool_id, uint16_t page_id, uint16_t offset)
  797. {
  798. struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
  799. return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
  800. tx_desc_pool->elem_size * offset;
  801. }
  802. /**
  803. * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
  804. * @soc: handle for the device sending the data
  805. * @pool_id: target pool id
  806. *
  807. * Return: None
  808. */
  809. static inline
  810. struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
  811. uint8_t desc_pool_id)
  812. {
  813. struct dp_tx_ext_desc_elem_s *c_elem;
  814. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  815. if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
  816. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  817. return NULL;
  818. }
  819. c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
  820. soc->tx_ext_desc[desc_pool_id].freelist =
  821. soc->tx_ext_desc[desc_pool_id].freelist->next;
  822. soc->tx_ext_desc[desc_pool_id].num_free--;
  823. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  824. return c_elem;
  825. }
  826. /**
  827. * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
  828. * @soc: handle for the device sending the data
  829. * @pool_id: target pool id
  830. * @elem: ext descriptor pointer should release
  831. *
  832. * Return: None
  833. */
  834. static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
  835. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
  836. {
  837. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  838. elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
  839. soc->tx_ext_desc[desc_pool_id].freelist = elem;
  840. soc->tx_ext_desc[desc_pool_id].num_free++;
  841. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  842. return;
  843. }
  844. /**
  845. * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
  846. * attach it to free list
  847. * @soc: Handle to DP SoC structure
  848. * @desc_pool_id: pool id should pick up
  849. * @elem: tx descriptor should be freed
  850. * @num_free: number of descriptors should be freed
  851. *
  852. * Return: none
  853. */
  854. static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
  855. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
  856. uint8_t num_free)
  857. {
  858. struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
  859. uint8_t freed = num_free;
  860. /* caller should always guarantee atleast list of num_free nodes */
  861. qdf_assert_always(elem);
  862. head = elem;
  863. c_elem = head;
  864. tail = head;
  865. while (c_elem && freed) {
  866. tail = c_elem;
  867. c_elem = c_elem->next;
  868. freed--;
  869. }
  870. /* caller should always guarantee atleast list of num_free nodes */
  871. qdf_assert_always(tail);
  872. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  873. tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
  874. soc->tx_ext_desc[desc_pool_id].freelist = head;
  875. soc->tx_ext_desc[desc_pool_id].num_free += num_free;
  876. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  877. return;
  878. }
  879. #if defined(FEATURE_TSO)
  880. /**
  881. * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
  882. * @soc: device soc instance
  883. * @pool_id: pool id should pick up tso descriptor
  884. *
  885. * Allocates a TSO segment element from the free list held in
  886. * the soc
  887. *
  888. * Return: tso_seg, tso segment memory pointer
  889. */
  890. static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
  891. struct dp_soc *soc, uint8_t pool_id)
  892. {
  893. struct qdf_tso_seg_elem_t *tso_seg = NULL;
  894. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  895. if (soc->tx_tso_desc[pool_id].freelist) {
  896. soc->tx_tso_desc[pool_id].num_free--;
  897. tso_seg = soc->tx_tso_desc[pool_id].freelist;
  898. soc->tx_tso_desc[pool_id].freelist =
  899. soc->tx_tso_desc[pool_id].freelist->next;
  900. }
  901. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  902. return tso_seg;
  903. }
  904. /**
  905. * dp_tx_tso_desc_free() - function to free a TSO segment
  906. * @soc: device soc instance
  907. * @pool_id: pool id should pick up tso descriptor
  908. * @tso_seg: tso segment memory pointer
  909. *
  910. * Returns a TSO segment element to the free list held in the
  911. * HTT pdev
  912. *
  913. * Return: none
  914. */
  915. static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
  916. uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
  917. {
  918. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  919. tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
  920. soc->tx_tso_desc[pool_id].freelist = tso_seg;
  921. soc->tx_tso_desc[pool_id].num_free++;
  922. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  923. }
  924. static inline
  925. struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc,
  926. uint8_t pool_id)
  927. {
  928. struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
  929. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  930. if (soc->tx_tso_num_seg[pool_id].freelist) {
  931. soc->tx_tso_num_seg[pool_id].num_free--;
  932. tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
  933. soc->tx_tso_num_seg[pool_id].freelist =
  934. soc->tx_tso_num_seg[pool_id].freelist->next;
  935. }
  936. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  937. return tso_num_seg;
  938. }
  939. static inline
  940. void dp_tso_num_seg_free(struct dp_soc *soc,
  941. uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
  942. {
  943. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  944. tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
  945. soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
  946. soc->tx_tso_num_seg[pool_id].num_free++;
  947. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  948. }
  949. #endif
  950. /*
  951. * dp_tx_me_alloc_buf() Alloc descriptor from me pool
  952. * @pdev DP_PDEV handle for datapath
  953. *
  954. * Return:dp_tx_me_buf_t(buf)
  955. */
  956. static inline struct dp_tx_me_buf_t*
  957. dp_tx_me_alloc_buf(struct dp_pdev *pdev)
  958. {
  959. struct dp_tx_me_buf_t *buf = NULL;
  960. qdf_spin_lock_bh(&pdev->tx_mutex);
  961. if (pdev->me_buf.freelist) {
  962. buf = pdev->me_buf.freelist;
  963. pdev->me_buf.freelist = pdev->me_buf.freelist->next;
  964. pdev->me_buf.buf_in_use++;
  965. } else {
  966. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  967. "Error allocating memory in pool");
  968. qdf_spin_unlock_bh(&pdev->tx_mutex);
  969. return NULL;
  970. }
  971. qdf_spin_unlock_bh(&pdev->tx_mutex);
  972. return buf;
  973. }
  974. /*
  975. * dp_tx_me_free_buf() - Unmap the buffer holding the dest
  976. * address, free me descriptor and add it to the free-pool
  977. * @pdev: DP_PDEV handle for datapath
  978. * @buf : Allocated ME BUF
  979. *
  980. * Return:void
  981. */
  982. static inline void
  983. dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
  984. {
  985. /*
  986. * If the buf containing mac address was mapped,
  987. * it must be unmapped before freeing the me_buf.
  988. * The "paddr_macbuf" member in the me_buf structure
  989. * holds the mapped physical address and it must be
  990. * set to 0 after unmapping.
  991. */
  992. if (buf->paddr_macbuf) {
  993. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  994. buf->paddr_macbuf,
  995. QDF_DMA_TO_DEVICE,
  996. QDF_MAC_ADDR_SIZE);
  997. buf->paddr_macbuf = 0;
  998. }
  999. qdf_spin_lock_bh(&pdev->tx_mutex);
  1000. buf->next = pdev->me_buf.freelist;
  1001. pdev->me_buf.freelist = buf;
  1002. pdev->me_buf.buf_in_use--;
  1003. qdf_spin_unlock_bh(&pdev->tx_mutex);
  1004. }
  1005. #endif /* DP_TX_DESC_H */