dp_tx_desc.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef DP_TX_DESC_H
  20. #define DP_TX_DESC_H
  21. #include "dp_types.h"
  22. #include "dp_tx.h"
  23. #include "dp_internal.h"
  24. /**
  25. * 21 bits cookie
  26. * 2 bits pool id 0 ~ 3,
  27. * 10 bits page id 0 ~ 1023
  28. * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
  29. */
  30. /* ???Ring ID needed??? */
  31. #define DP_TX_DESC_ID_POOL_MASK 0x018000
  32. #define DP_TX_DESC_ID_POOL_OS 15
  33. #define DP_TX_DESC_ID_PAGE_MASK 0x007FE0
  34. #define DP_TX_DESC_ID_PAGE_OS 5
  35. #define DP_TX_DESC_ID_OFFSET_MASK 0x00001F
  36. #define DP_TX_DESC_ID_OFFSET_OS 0
  37. /**
  38. * Compilation assert on tx desc size
  39. *
  40. * if assert is hit please update POOL_MASK,
  41. * PAGE_MASK according to updated size
  42. *
  43. * for current PAGE mask allowed size range of tx_desc
  44. * is between 128 and 256
  45. */
  46. QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
  47. ((sizeof(struct dp_tx_desc_s)) <=
  48. (PAGE_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
  49. ((sizeof(struct dp_tx_desc_s)) >
  50. (PAGE_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1))));
  51. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  52. #define TX_DESC_LOCK_CREATE(lock)
  53. #define TX_DESC_LOCK_DESTROY(lock)
  54. #define TX_DESC_LOCK_LOCK(lock)
  55. #define TX_DESC_LOCK_UNLOCK(lock)
  56. #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
  57. ((pool)->status == FLOW_POOL_INACTIVE)
  58. #ifdef QCA_AC_BASED_FLOW_CONTROL
  59. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  60. dp_tx_flow_pool_member_clean(_tx_desc_pool)
  61. #else /* !QCA_AC_BASED_FLOW_CONTROL */
  62. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  63. do { \
  64. (_tx_desc_pool)->elem_size = 0; \
  65. (_tx_desc_pool)->freelist = NULL; \
  66. (_tx_desc_pool)->pool_size = 0; \
  67. (_tx_desc_pool)->avail_desc = 0; \
  68. (_tx_desc_pool)->start_th = 0; \
  69. (_tx_desc_pool)->stop_th = 0; \
  70. (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \
  71. } while (0)
  72. #endif /* QCA_AC_BASED_FLOW_CONTROL */
  73. #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  74. #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock)
  75. #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
  76. #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock)
  77. #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock)
  78. #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
  79. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  80. do { \
  81. (_tx_desc_pool)->elem_size = 0; \
  82. (_tx_desc_pool)->num_allocated = 0; \
  83. (_tx_desc_pool)->freelist = NULL; \
  84. (_tx_desc_pool)->elem_count = 0; \
  85. (_tx_desc_pool)->num_free = 0; \
  86. } while (0)
  87. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  88. #define MAX_POOL_BUFF_COUNT 10000
  89. #ifdef DP_TX_TRACKING
  90. static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
  91. uint32_t magic_pattern)
  92. {
  93. tx_desc->magic = magic_pattern;
  94. }
  95. #else
  96. static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
  97. uint32_t magic_pattern)
  98. {
  99. }
  100. #endif
  101. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  102. uint32_t num_elem);
  103. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  104. uint32_t num_elem);
  105. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  106. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  107. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  108. uint32_t num_elem);
  109. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  110. uint32_t num_elem);
  111. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  112. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  113. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  114. uint32_t num_elem);
  115. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  116. uint32_t num_elem);
  117. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  118. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  119. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  120. uint32_t num_elem);
  121. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
  122. uint32_t num_elem);
  123. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
  124. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  125. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  126. void dp_tx_flow_control_init(struct dp_soc *);
  127. void dp_tx_flow_control_deinit(struct dp_soc *);
  128. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
  129. tx_pause_callback pause_cb);
  130. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
  131. uint8_t vdev_id);
  132. void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
  133. uint8_t vdev_id);
  134. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
  135. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  136. uint8_t flow_pool_id, uint32_t flow_pool_size);
  137. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  138. uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
  139. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  140. uint8_t flow_type, uint8_t flow_pool_id);
  141. /**
  142. * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
  143. * @pool: flow pool
  144. *
  145. * Caller needs to take lock and do sanity checks.
  146. *
  147. * Return: tx descriptor
  148. */
  149. static inline
  150. struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
  151. {
  152. struct dp_tx_desc_s *tx_desc = pool->freelist;
  153. pool->freelist = pool->freelist->next;
  154. pool->avail_desc--;
  155. return tx_desc;
  156. }
  157. /**
  158. * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
  159. * @pool: flow pool
  160. * @tx_desc: tx descriptor
  161. *
  162. * Caller needs to take lock and do sanity checks.
  163. *
  164. * Return: none
  165. */
  166. static inline
  167. void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
  168. struct dp_tx_desc_s *tx_desc)
  169. {
  170. tx_desc->next = pool->freelist;
  171. pool->freelist = tx_desc;
  172. pool->avail_desc++;
  173. }
  174. #ifdef QCA_AC_BASED_FLOW_CONTROL
  175. /**
  176. * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
  177. *
  178. * @pool: flow pool
  179. *
  180. * Return: None
  181. */
  182. static inline void
  183. dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
  184. {
  185. pool->elem_size = 0;
  186. pool->freelist = NULL;
  187. pool->pool_size = 0;
  188. pool->avail_desc = 0;
  189. qdf_mem_zero(pool->start_th, FL_TH_MAX);
  190. qdf_mem_zero(pool->stop_th, FL_TH_MAX);
  191. pool->status = FLOW_POOL_INACTIVE;
  192. }
  193. /**
  194. * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
  195. *
  196. * @pool: flow pool
  197. * @avail_desc: available descriptor number
  198. *
  199. * Return: true if threshold is met, false if not
  200. */
  201. static inline bool
  202. dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
  203. {
  204. if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
  205. return true;
  206. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
  207. return true;
  208. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
  209. return true;
  210. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
  211. return true;
  212. else
  213. return false;
  214. }
  215. /**
  216. * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
  217. *
  218. * @soc: dp soc
  219. * @pool: flow pool
  220. */
  221. static inline void
  222. dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
  223. struct dp_tx_desc_pool_s *pool)
  224. {
  225. if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
  226. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  227. return;
  228. } else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
  229. pool->avail_desc > pool->stop_th[DP_TH_VI]) {
  230. pool->status = FLOW_POOL_BE_BK_PAUSED;
  231. } else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
  232. pool->avail_desc > pool->stop_th[DP_TH_VO]) {
  233. pool->status = FLOW_POOL_VI_PAUSED;
  234. } else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
  235. pool->avail_desc > pool->stop_th[DP_TH_HI]) {
  236. pool->status = FLOW_POOL_VO_PAUSED;
  237. } else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
  238. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  239. }
  240. switch (pool->status) {
  241. case FLOW_POOL_ACTIVE_PAUSED:
  242. soc->pause_cb(pool->flow_pool_id,
  243. WLAN_NETIF_PRIORITY_QUEUE_OFF,
  244. WLAN_DATA_FLOW_CTRL_PRI);
  245. /* fallthrough */
  246. case FLOW_POOL_VO_PAUSED:
  247. soc->pause_cb(pool->flow_pool_id,
  248. WLAN_NETIF_VO_QUEUE_OFF,
  249. WLAN_DATA_FLOW_CTRL_VO);
  250. /* fallthrough */
  251. case FLOW_POOL_VI_PAUSED:
  252. soc->pause_cb(pool->flow_pool_id,
  253. WLAN_NETIF_VI_QUEUE_OFF,
  254. WLAN_DATA_FLOW_CTRL_VI);
  255. /* fallthrough */
  256. case FLOW_POOL_BE_BK_PAUSED:
  257. soc->pause_cb(pool->flow_pool_id,
  258. WLAN_NETIF_BE_BK_QUEUE_OFF,
  259. WLAN_DATA_FLOW_CTRL_BE_BK);
  260. break;
  261. default:
  262. dp_err("Invalid pool staus:%u to adjust", pool->status);
  263. }
  264. }
  265. /**
  266. * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
  267. *
  268. * @soc: Handle to DP SoC structure
  269. * @desc_pool_id: ID of the flow control fool
  270. *
  271. * Return: TX descriptor allocated or NULL
  272. */
  273. static inline struct dp_tx_desc_s *
  274. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  275. {
  276. struct dp_tx_desc_s *tx_desc = NULL;
  277. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  278. bool is_pause = false;
  279. enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
  280. enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
  281. enum netif_reason_type reason;
  282. if (qdf_likely(pool)) {
  283. qdf_spin_lock_bh(&pool->flow_pool_lock);
  284. if (qdf_likely(pool->avail_desc &&
  285. pool->status != FLOW_POOL_INVALID &&
  286. pool->status != FLOW_POOL_INACTIVE)) {
  287. tx_desc = dp_tx_get_desc_flow_pool(pool);
  288. tx_desc->pool_id = desc_pool_id;
  289. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  290. dp_tx_desc_set_magic(tx_desc,
  291. DP_TX_MAGIC_PATTERN_INUSE);
  292. is_pause = dp_tx_is_threshold_reached(pool,
  293. pool->avail_desc);
  294. if (qdf_unlikely(pool->status ==
  295. FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
  296. dp_tx_adjust_flow_pool_state(soc, pool);
  297. is_pause = false;
  298. }
  299. if (qdf_unlikely(is_pause)) {
  300. switch (pool->status) {
  301. case FLOW_POOL_ACTIVE_UNPAUSED:
  302. /* pause network BE\BK queue */
  303. act = WLAN_NETIF_BE_BK_QUEUE_OFF;
  304. reason = WLAN_DATA_FLOW_CTRL_BE_BK;
  305. level = DP_TH_BE_BK;
  306. pool->status = FLOW_POOL_BE_BK_PAUSED;
  307. break;
  308. case FLOW_POOL_BE_BK_PAUSED:
  309. /* pause network VI queue */
  310. act = WLAN_NETIF_VI_QUEUE_OFF;
  311. reason = WLAN_DATA_FLOW_CTRL_VI;
  312. level = DP_TH_VI;
  313. pool->status = FLOW_POOL_VI_PAUSED;
  314. break;
  315. case FLOW_POOL_VI_PAUSED:
  316. /* pause network VO queue */
  317. act = WLAN_NETIF_VO_QUEUE_OFF;
  318. reason = WLAN_DATA_FLOW_CTRL_VO;
  319. level = DP_TH_VO;
  320. pool->status = FLOW_POOL_VO_PAUSED;
  321. break;
  322. case FLOW_POOL_VO_PAUSED:
  323. /* pause network HI PRI queue */
  324. act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
  325. reason = WLAN_DATA_FLOW_CTRL_PRI;
  326. level = DP_TH_HI;
  327. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  328. break;
  329. case FLOW_POOL_ACTIVE_PAUSED:
  330. act = WLAN_NETIF_ACTION_TYPE_NONE;
  331. break;
  332. default:
  333. dp_err_rl("pool status is %d!",
  334. pool->status);
  335. break;
  336. }
  337. if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
  338. pool->latest_pause_time[level] =
  339. qdf_get_system_timestamp();
  340. soc->pause_cb(desc_pool_id,
  341. act,
  342. reason);
  343. }
  344. }
  345. } else {
  346. pool->pkt_drop_no_desc++;
  347. }
  348. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  349. } else {
  350. soc->pool_stats.pkt_drop_no_pool++;
  351. }
  352. return tx_desc;
  353. }
  354. /**
  355. * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
  356. *
  357. * @soc: Handle to DP SoC structure
  358. * @tx_desc: the tx descriptor to be freed
  359. * @desc_pool_id: ID of the flow control fool
  360. *
  361. * Return: None
  362. */
  363. static inline void
  364. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  365. uint8_t desc_pool_id)
  366. {
  367. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  368. qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
  369. enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
  370. enum netif_reason_type reason;
  371. qdf_spin_lock_bh(&pool->flow_pool_lock);
  372. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  373. tx_desc->nbuf = NULL;
  374. tx_desc->flags = 0;
  375. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  376. tx_desc->timestamp = 0;
  377. dp_tx_put_desc_flow_pool(pool, tx_desc);
  378. switch (pool->status) {
  379. case FLOW_POOL_ACTIVE_PAUSED:
  380. if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
  381. act = WLAN_NETIF_PRIORITY_QUEUE_ON;
  382. reason = WLAN_DATA_FLOW_CTRL_PRI;
  383. pool->status = FLOW_POOL_VO_PAUSED;
  384. /* Update maxinum pause duration for HI queue */
  385. pause_dur = unpause_time -
  386. pool->latest_pause_time[DP_TH_HI];
  387. if (pool->max_pause_time[DP_TH_HI] < pause_dur)
  388. pool->max_pause_time[DP_TH_HI] = pause_dur;
  389. }
  390. break;
  391. case FLOW_POOL_VO_PAUSED:
  392. if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
  393. act = WLAN_NETIF_VO_QUEUE_ON;
  394. reason = WLAN_DATA_FLOW_CTRL_VO;
  395. pool->status = FLOW_POOL_VI_PAUSED;
  396. /* Update maxinum pause duration for VO queue */
  397. pause_dur = unpause_time -
  398. pool->latest_pause_time[DP_TH_VO];
  399. if (pool->max_pause_time[DP_TH_VO] < pause_dur)
  400. pool->max_pause_time[DP_TH_VO] = pause_dur;
  401. }
  402. break;
  403. case FLOW_POOL_VI_PAUSED:
  404. if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
  405. act = WLAN_NETIF_VI_QUEUE_ON;
  406. reason = WLAN_DATA_FLOW_CTRL_VI;
  407. pool->status = FLOW_POOL_BE_BK_PAUSED;
  408. /* Update maxinum pause duration for VI queue */
  409. pause_dur = unpause_time -
  410. pool->latest_pause_time[DP_TH_VI];
  411. if (pool->max_pause_time[DP_TH_VI] < pause_dur)
  412. pool->max_pause_time[DP_TH_VI] = pause_dur;
  413. }
  414. break;
  415. case FLOW_POOL_BE_BK_PAUSED:
  416. if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
  417. act = WLAN_NETIF_BE_BK_QUEUE_ON;
  418. reason = WLAN_DATA_FLOW_CTRL_BE_BK;
  419. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  420. /* Update maxinum pause duration for BE_BK queue */
  421. pause_dur = unpause_time -
  422. pool->latest_pause_time[DP_TH_BE_BK];
  423. if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
  424. pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
  425. }
  426. break;
  427. case FLOW_POOL_INVALID:
  428. if (pool->avail_desc == pool->pool_size) {
  429. dp_tx_desc_pool_deinit(soc, desc_pool_id);
  430. dp_tx_desc_pool_free(soc, desc_pool_id);
  431. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  432. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  433. "%s %d pool is freed!!",
  434. __func__, __LINE__);
  435. return;
  436. }
  437. break;
  438. case FLOW_POOL_ACTIVE_UNPAUSED:
  439. break;
  440. default:
  441. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  442. "%s %d pool is INACTIVE State!!",
  443. __func__, __LINE__);
  444. break;
  445. };
  446. if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
  447. soc->pause_cb(pool->flow_pool_id,
  448. act, reason);
  449. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  450. }
  451. #else /* QCA_AC_BASED_FLOW_CONTROL */
  452. static inline bool
  453. dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
  454. {
  455. if (qdf_unlikely(avail_desc < pool->stop_th))
  456. return true;
  457. else
  458. return false;
  459. }
  460. /**
  461. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  462. *
  463. * @soc Handle to DP SoC structure
  464. * @pool_id
  465. *
  466. * Return:
  467. */
  468. static inline struct dp_tx_desc_s *
  469. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  470. {
  471. struct dp_tx_desc_s *tx_desc = NULL;
  472. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  473. if (pool) {
  474. qdf_spin_lock_bh(&pool->flow_pool_lock);
  475. if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
  476. pool->avail_desc) {
  477. tx_desc = dp_tx_get_desc_flow_pool(pool);
  478. tx_desc->pool_id = desc_pool_id;
  479. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  480. dp_tx_desc_set_magic(tx_desc,
  481. DP_TX_MAGIC_PATTERN_INUSE);
  482. if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
  483. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  484. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  485. /* pause network queues */
  486. soc->pause_cb(desc_pool_id,
  487. WLAN_STOP_ALL_NETIF_QUEUE,
  488. WLAN_DATA_FLOW_CONTROL);
  489. } else {
  490. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  491. }
  492. /*
  493. * If one packet is going to be sent, PM usage count
  494. * needs to be incremented by one to prevent future
  495. * runtime suspend. This should be tied with the
  496. * success of allocating one descriptor. It will be
  497. * decremented after the packet has been sent.
  498. */
  499. hif_pm_runtime_get_noresume(
  500. soc->hif_handle,
  501. RTPM_ID_DP_TX_DESC_ALLOC_FREE);
  502. } else {
  503. pool->pkt_drop_no_desc++;
  504. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  505. }
  506. } else {
  507. soc->pool_stats.pkt_drop_no_pool++;
  508. }
  509. return tx_desc;
  510. }
  511. /**
  512. * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
  513. *
  514. * @soc Handle to DP SoC structure
  515. * @pool_id
  516. * @tx_desc
  517. *
  518. * Return: None
  519. */
  520. static inline void
  521. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  522. uint8_t desc_pool_id)
  523. {
  524. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  525. qdf_spin_lock_bh(&pool->flow_pool_lock);
  526. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  527. tx_desc->nbuf = NULL;
  528. tx_desc->flags = 0;
  529. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  530. tx_desc->timestamp = 0;
  531. dp_tx_put_desc_flow_pool(pool, tx_desc);
  532. switch (pool->status) {
  533. case FLOW_POOL_ACTIVE_PAUSED:
  534. if (pool->avail_desc > pool->start_th) {
  535. soc->pause_cb(pool->flow_pool_id,
  536. WLAN_WAKE_ALL_NETIF_QUEUE,
  537. WLAN_DATA_FLOW_CONTROL);
  538. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  539. }
  540. break;
  541. case FLOW_POOL_INVALID:
  542. if (pool->avail_desc == pool->pool_size) {
  543. dp_tx_desc_pool_deinit(soc, desc_pool_id);
  544. dp_tx_desc_pool_free(soc, desc_pool_id);
  545. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  546. qdf_print("%s %d pool is freed!!",
  547. __func__, __LINE__);
  548. goto out;
  549. }
  550. break;
  551. case FLOW_POOL_ACTIVE_UNPAUSED:
  552. break;
  553. default:
  554. qdf_print("%s %d pool is INACTIVE State!!",
  555. __func__, __LINE__);
  556. break;
  557. };
  558. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  559. out:
  560. /**
  561. * Decrement PM usage count if the packet has been sent. This
  562. * should be tied with the success of freeing one descriptor.
  563. */
  564. hif_pm_runtime_put(soc->hif_handle,
  565. RTPM_ID_DP_TX_DESC_ALLOC_FREE);
  566. }
  567. #endif /* QCA_AC_BASED_FLOW_CONTROL */
  568. static inline bool
  569. dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  570. {
  571. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  572. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  573. DP_MOD_ID_CDP);
  574. struct dp_tx_desc_pool_s *pool;
  575. bool status;
  576. if (!vdev)
  577. return false;
  578. pool = vdev->pool;
  579. status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
  580. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  581. return status;
  582. }
  583. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  584. static inline void dp_tx_flow_control_init(struct dp_soc *handle)
  585. {
  586. }
  587. static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
  588. {
  589. }
  590. static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
  591. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
  592. uint32_t flow_pool_size)
  593. {
  594. return QDF_STATUS_SUCCESS;
  595. }
  596. static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
  597. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
  598. {
  599. }
  600. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  601. static inline
  602. void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
  603. {
  604. if (tx_desc)
  605. prefetch(tx_desc);
  606. }
  607. #else
  608. static inline
  609. void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
  610. {
  611. }
  612. #endif
  613. /**
  614. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  615. *
  616. * @param soc Handle to DP SoC structure
  617. * @param pool_id
  618. *
  619. * Return:
  620. */
  621. static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
  622. uint8_t desc_pool_id)
  623. {
  624. struct dp_tx_desc_s *tx_desc = NULL;
  625. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  626. TX_DESC_LOCK_LOCK(&pool->lock);
  627. tx_desc = pool->freelist;
  628. /* Pool is exhausted */
  629. if (!tx_desc) {
  630. TX_DESC_LOCK_UNLOCK(&pool->lock);
  631. return NULL;
  632. }
  633. pool->freelist = pool->freelist->next;
  634. pool->num_allocated++;
  635. pool->num_free--;
  636. dp_tx_prefetch_desc(pool->freelist);
  637. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  638. TX_DESC_LOCK_UNLOCK(&pool->lock);
  639. return tx_desc;
  640. }
  641. /**
  642. * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
  643. * from given pool
  644. * @soc: Handle to DP SoC structure
  645. * @pool_id: pool id should pick up
  646. * @num_requested: number of required descriptor
  647. *
  648. * allocate multiple tx descriptor and make a link
  649. *
  650. * Return: h_desc first descriptor pointer
  651. */
  652. static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
  653. struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
  654. {
  655. struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
  656. uint8_t count;
  657. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  658. TX_DESC_LOCK_LOCK(&pool->lock);
  659. if ((num_requested == 0) ||
  660. (pool->num_free < num_requested)) {
  661. TX_DESC_LOCK_UNLOCK(&pool->lock);
  662. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  663. "%s, No Free Desc: Available(%d) num_requested(%d)",
  664. __func__, pool->num_free,
  665. num_requested);
  666. return NULL;
  667. }
  668. h_desc = pool->freelist;
  669. /* h_desc should never be NULL since num_free > requested */
  670. qdf_assert_always(h_desc);
  671. c_desc = h_desc;
  672. for (count = 0; count < (num_requested - 1); count++) {
  673. c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  674. c_desc = c_desc->next;
  675. }
  676. pool->num_free -= count;
  677. pool->num_allocated += count;
  678. pool->freelist = c_desc->next;
  679. c_desc->next = NULL;
  680. TX_DESC_LOCK_UNLOCK(&pool->lock);
  681. return h_desc;
  682. }
  683. /**
  684. * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
  685. *
  686. * @soc Handle to DP SoC structure
  687. * @pool_id
  688. * @tx_desc
  689. */
  690. static inline void
  691. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  692. uint8_t desc_pool_id)
  693. {
  694. struct dp_tx_desc_pool_s *pool = NULL;
  695. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  696. tx_desc->nbuf = NULL;
  697. tx_desc->flags = 0;
  698. pool = &soc->tx_desc[desc_pool_id];
  699. TX_DESC_LOCK_LOCK(&pool->lock);
  700. tx_desc->next = pool->freelist;
  701. pool->freelist = tx_desc;
  702. pool->num_allocated--;
  703. pool->num_free++;
  704. TX_DESC_LOCK_UNLOCK(&pool->lock);
  705. }
  706. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  707. #ifdef QCA_DP_TX_DESC_ID_CHECK
  708. /**
  709. * dp_tx_is_desc_id_valid() - check is the tx desc id valid
  710. *
  711. * @soc Handle to DP SoC structure
  712. * @tx_desc_id
  713. *
  714. * Return: true or false
  715. */
  716. static inline bool
  717. dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
  718. {
  719. uint8_t pool_id;
  720. uint16_t page_id, offset;
  721. struct dp_tx_desc_pool_s *pool;
  722. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  723. DP_TX_DESC_ID_POOL_OS;
  724. /* Pool ID is out of limit */
  725. if (pool_id > wlan_cfg_get_num_tx_desc_pool(
  726. soc->wlan_cfg_ctx)) {
  727. QDF_TRACE(QDF_MODULE_ID_DP,
  728. QDF_TRACE_LEVEL_FATAL,
  729. "%s:Tx Comp pool id %d not valid",
  730. __func__,
  731. pool_id);
  732. goto warn_exit;
  733. }
  734. pool = &soc->tx_desc[pool_id];
  735. /* the pool is freed */
  736. if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
  737. QDF_TRACE(QDF_MODULE_ID_DP,
  738. QDF_TRACE_LEVEL_FATAL,
  739. "%s:the pool %d has been freed",
  740. __func__,
  741. pool_id);
  742. goto warn_exit;
  743. }
  744. page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  745. DP_TX_DESC_ID_PAGE_OS;
  746. /* the page id is out of limit */
  747. if (page_id >= pool->desc_pages.num_pages) {
  748. QDF_TRACE(QDF_MODULE_ID_DP,
  749. QDF_TRACE_LEVEL_FATAL,
  750. "%s:the page id %d invalid, pool id %d, num_page %d",
  751. __func__,
  752. page_id,
  753. pool_id,
  754. pool->desc_pages.num_pages);
  755. goto warn_exit;
  756. }
  757. offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  758. DP_TX_DESC_ID_OFFSET_OS;
  759. /* the offset is out of limit */
  760. if (offset >= pool->desc_pages.num_element_per_page) {
  761. QDF_TRACE(QDF_MODULE_ID_DP,
  762. QDF_TRACE_LEVEL_FATAL,
  763. "%s:offset %d invalid, pool%d,num_elem_per_page %d",
  764. __func__,
  765. offset,
  766. pool_id,
  767. pool->desc_pages.num_element_per_page);
  768. goto warn_exit;
  769. }
  770. return true;
  771. warn_exit:
  772. QDF_TRACE(QDF_MODULE_ID_DP,
  773. QDF_TRACE_LEVEL_FATAL,
  774. "%s:Tx desc id 0x%x not valid",
  775. __func__,
  776. tx_desc_id);
  777. qdf_assert_always(0);
  778. return false;
  779. }
  780. #else
  781. static inline bool
  782. dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
  783. {
  784. return true;
  785. }
  786. #endif /* QCA_DP_TX_DESC_ID_CHECK */
  787. #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
  788. static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
  789. struct dp_tx_desc_s *desc,
  790. uint8_t allow_fast_comp)
  791. {
  792. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
  793. qdf_likely(allow_fast_comp)) {
  794. desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
  795. }
  796. }
  797. #else
  798. static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
  799. struct dp_tx_desc_s *desc,
  800. uint8_t allow_fast_comp)
  801. {
  802. }
  803. #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
  804. /**
  805. * dp_tx_desc_find() - find dp tx descriptor from cokie
  806. * @soc - handle for the device sending the data
  807. * @tx_desc_id - the ID of the descriptor in question
  808. * @return the descriptor object that has the specified ID
  809. *
  810. * Use a tx descriptor ID to find the corresponding descriptor object.
  811. *
  812. */
  813. static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
  814. uint8_t pool_id, uint16_t page_id, uint16_t offset)
  815. {
  816. struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
  817. return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
  818. tx_desc_pool->elem_size * offset;
  819. }
  820. /**
  821. * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
  822. * @soc: handle for the device sending the data
  823. * @pool_id: target pool id
  824. *
  825. * Return: None
  826. */
  827. static inline
  828. struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
  829. uint8_t desc_pool_id)
  830. {
  831. struct dp_tx_ext_desc_elem_s *c_elem;
  832. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  833. if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
  834. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  835. return NULL;
  836. }
  837. c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
  838. soc->tx_ext_desc[desc_pool_id].freelist =
  839. soc->tx_ext_desc[desc_pool_id].freelist->next;
  840. soc->tx_ext_desc[desc_pool_id].num_free--;
  841. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  842. return c_elem;
  843. }
  844. /**
  845. * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
  846. * @soc: handle for the device sending the data
  847. * @pool_id: target pool id
  848. * @elem: ext descriptor pointer should release
  849. *
  850. * Return: None
  851. */
  852. static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
  853. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
  854. {
  855. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  856. elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
  857. soc->tx_ext_desc[desc_pool_id].freelist = elem;
  858. soc->tx_ext_desc[desc_pool_id].num_free++;
  859. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  860. return;
  861. }
  862. /**
  863. * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
  864. * attach it to free list
  865. * @soc: Handle to DP SoC structure
  866. * @desc_pool_id: pool id should pick up
  867. * @elem: tx descriptor should be freed
  868. * @num_free: number of descriptors should be freed
  869. *
  870. * Return: none
  871. */
  872. static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
  873. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
  874. uint8_t num_free)
  875. {
  876. struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
  877. uint8_t freed = num_free;
  878. /* caller should always guarantee atleast list of num_free nodes */
  879. qdf_assert_always(elem);
  880. head = elem;
  881. c_elem = head;
  882. tail = head;
  883. while (c_elem && freed) {
  884. tail = c_elem;
  885. c_elem = c_elem->next;
  886. freed--;
  887. }
  888. /* caller should always guarantee atleast list of num_free nodes */
  889. qdf_assert_always(tail);
  890. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  891. tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
  892. soc->tx_ext_desc[desc_pool_id].freelist = head;
  893. soc->tx_ext_desc[desc_pool_id].num_free += num_free;
  894. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  895. return;
  896. }
  897. #if defined(FEATURE_TSO)
  898. /**
  899. * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
  900. * @soc: device soc instance
  901. * @pool_id: pool id should pick up tso descriptor
  902. *
  903. * Allocates a TSO segment element from the free list held in
  904. * the soc
  905. *
  906. * Return: tso_seg, tso segment memory pointer
  907. */
  908. static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
  909. struct dp_soc *soc, uint8_t pool_id)
  910. {
  911. struct qdf_tso_seg_elem_t *tso_seg = NULL;
  912. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  913. if (soc->tx_tso_desc[pool_id].freelist) {
  914. soc->tx_tso_desc[pool_id].num_free--;
  915. tso_seg = soc->tx_tso_desc[pool_id].freelist;
  916. soc->tx_tso_desc[pool_id].freelist =
  917. soc->tx_tso_desc[pool_id].freelist->next;
  918. }
  919. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  920. return tso_seg;
  921. }
  922. /**
  923. * dp_tx_tso_desc_free() - function to free a TSO segment
  924. * @soc: device soc instance
  925. * @pool_id: pool id should pick up tso descriptor
  926. * @tso_seg: tso segment memory pointer
  927. *
  928. * Returns a TSO segment element to the free list held in the
  929. * HTT pdev
  930. *
  931. * Return: none
  932. */
  933. static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
  934. uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
  935. {
  936. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  937. tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
  938. soc->tx_tso_desc[pool_id].freelist = tso_seg;
  939. soc->tx_tso_desc[pool_id].num_free++;
  940. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  941. }
  942. static inline
  943. struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc,
  944. uint8_t pool_id)
  945. {
  946. struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
  947. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  948. if (soc->tx_tso_num_seg[pool_id].freelist) {
  949. soc->tx_tso_num_seg[pool_id].num_free--;
  950. tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
  951. soc->tx_tso_num_seg[pool_id].freelist =
  952. soc->tx_tso_num_seg[pool_id].freelist->next;
  953. }
  954. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  955. return tso_num_seg;
  956. }
  957. static inline
  958. void dp_tso_num_seg_free(struct dp_soc *soc,
  959. uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
  960. {
  961. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  962. tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
  963. soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
  964. soc->tx_tso_num_seg[pool_id].num_free++;
  965. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  966. }
  967. #endif
  968. /*
  969. * dp_tx_me_alloc_buf() Alloc descriptor from me pool
  970. * @pdev DP_PDEV handle for datapath
  971. *
  972. * Return:dp_tx_me_buf_t(buf)
  973. */
  974. static inline struct dp_tx_me_buf_t*
  975. dp_tx_me_alloc_buf(struct dp_pdev *pdev)
  976. {
  977. struct dp_tx_me_buf_t *buf = NULL;
  978. qdf_spin_lock_bh(&pdev->tx_mutex);
  979. if (pdev->me_buf.freelist) {
  980. buf = pdev->me_buf.freelist;
  981. pdev->me_buf.freelist = pdev->me_buf.freelist->next;
  982. pdev->me_buf.buf_in_use++;
  983. } else {
  984. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  985. "Error allocating memory in pool");
  986. qdf_spin_unlock_bh(&pdev->tx_mutex);
  987. return NULL;
  988. }
  989. qdf_spin_unlock_bh(&pdev->tx_mutex);
  990. return buf;
  991. }
  992. /*
  993. * dp_tx_me_free_buf() - Unmap the buffer holding the dest
  994. * address, free me descriptor and add it to the free-pool
  995. * @pdev: DP_PDEV handle for datapath
  996. * @buf : Allocated ME BUF
  997. *
  998. * Return:void
  999. */
  1000. static inline void
  1001. dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
  1002. {
  1003. /*
  1004. * If the buf containing mac address was mapped,
  1005. * it must be unmapped before freeing the me_buf.
  1006. * The "paddr_macbuf" member in the me_buf structure
  1007. * holds the mapped physical address and it must be
  1008. * set to 0 after unmapping.
  1009. */
  1010. if (buf->paddr_macbuf) {
  1011. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  1012. buf->paddr_macbuf,
  1013. QDF_DMA_TO_DEVICE,
  1014. QDF_MAC_ADDR_SIZE);
  1015. buf->paddr_macbuf = 0;
  1016. }
  1017. qdf_spin_lock_bh(&pdev->tx_mutex);
  1018. buf->next = pdev->me_buf.freelist;
  1019. pdev->me_buf.freelist = buf;
  1020. pdev->me_buf.buf_in_use--;
  1021. qdf_spin_unlock_bh(&pdev->tx_mutex);
  1022. }
  1023. #endif /* DP_TX_DESC_H */