dp_tx_flow_control.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <cds_api.h>
  19. /* OS abstraction libraries */
  20. #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
  21. #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
  22. #include <qdf_util.h> /* qdf_unlikely */
  23. #include "dp_types.h"
  24. #include "dp_tx_desc.h"
  25. #include <cdp_txrx_handle.h>
  26. #include "dp_internal.h"
  27. #define INVALID_FLOW_ID 0xFF
  28. #define MAX_INVALID_BIN 3
  29. #ifdef QCA_AC_BASED_FLOW_CONTROL
  30. /**
  31. * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
  32. * @pool: flow_pool
  33. * @stop_threshold: stop threshold of certian AC
  34. * @start_threshold: start threshold of certian AC
  35. * @flow_pool_size: flow pool size
  36. *
  37. * Return: none
  38. */
  39. static inline void
  40. dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
  41. uint32_t start_threshold,
  42. uint32_t stop_threshold,
  43. uint16_t flow_pool_size)
  44. {
  45. /* BE_BK threshold is same as previous threahold */
  46. pool->start_th[DP_TH_BE_BK] = (start_threshold
  47. * flow_pool_size) / 100;
  48. pool->stop_th[DP_TH_BE_BK] = (stop_threshold
  49. * flow_pool_size) / 100;
  50. /* Update VI threshold based on BE_BK threashold */
  51. pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
  52. * FL_TH_VI_PERCENTAGE) / 100;
  53. pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
  54. * FL_TH_VI_PERCENTAGE) / 100;
  55. /* Update VO threshold based on BE_BK threashold */
  56. pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
  57. * FL_TH_VO_PERCENTAGE) / 100;
  58. pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
  59. * FL_TH_VO_PERCENTAGE) / 100;
  60. /* Update High Priority threshold based on BE_BK threashold */
  61. pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
  62. * FL_TH_HI_PERCENTAGE) / 100;
  63. pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
  64. * FL_TH_HI_PERCENTAGE) / 100;
  65. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  66. "%s: tx flow control threshold is set, pool size is %d",
  67. __func__, flow_pool_size);
  68. }
  69. /**
  70. * dp_tx_flow_pool_reattach() - Reattach flow_pool
  71. * @pool: flow_pool
  72. *
  73. * Return: none
  74. */
  75. static inline void
  76. dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
  77. {
  78. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  79. "%s: flow pool already allocated, attached %d times",
  80. __func__, pool->pool_create_cnt);
  81. if (pool->avail_desc > pool->start_th[DP_TH_BE_BK])
  82. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  83. else if (pool->avail_desc <= pool->start_th[DP_TH_BE_BK] &&
  84. pool->avail_desc > pool->start_th[DP_TH_VI])
  85. pool->status = FLOW_POOL_BE_BK_PAUSED;
  86. else if (pool->avail_desc <= pool->start_th[DP_TH_VI] &&
  87. pool->avail_desc > pool->start_th[DP_TH_VO])
  88. pool->status = FLOW_POOL_VI_PAUSED;
  89. else if (pool->avail_desc <= pool->start_th[DP_TH_VO] &&
  90. pool->avail_desc > pool->start_th[DP_TH_HI])
  91. pool->status = FLOW_POOL_VO_PAUSED;
  92. else
  93. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  94. pool->pool_create_cnt++;
  95. }
  96. /**
  97. * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
  98. * @pool: flow_pool
  99. *
  100. * Return: none
  101. */
  102. static inline void
  103. dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
  104. {
  105. int i;
  106. for (i = 0; i < FL_TH_MAX; i++) {
  107. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  108. "Level %d :: Start threshold %d :: Stop threshold %d",
  109. i, pool->start_th[i], pool->stop_th[i]);
  110. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  111. "Level %d :: Maximun pause time %lu ms",
  112. i, pool->max_pause_time[i]);
  113. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  114. "Level %d :: Latest pause timestamp %lu",
  115. i, pool->latest_pause_time[i]);
  116. }
  117. }
  118. #else
  119. static inline void
  120. dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
  121. uint32_t start_threshold,
  122. uint32_t stop_threshold,
  123. uint16_t flow_pool_size)
  124. {
  125. /* INI is in percentage so divide by 100 */
  126. pool->start_th = (start_threshold * flow_pool_size) / 100;
  127. pool->stop_th = (stop_threshold * flow_pool_size) / 100;
  128. }
  129. static inline void
  130. dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
  131. {
  132. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  133. "%s: flow pool already allocated, attached %d times",
  134. __func__, pool->pool_create_cnt);
  135. if (pool->avail_desc > pool->start_th)
  136. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  137. else
  138. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  139. pool->pool_create_cnt++;
  140. }
  141. static inline void
  142. dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
  143. {
  144. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  145. "Start threshold %d :: Stop threshold %d",
  146. pool->start_th, pool->stop_th);
  147. }
  148. #endif
  149. /**
  150. * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
  151. *
  152. * @ctx: Handle to struct dp_soc.
  153. *
  154. * Return: none
  155. */
  156. void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
  157. {
  158. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  159. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  160. struct dp_tx_desc_pool_s *pool = NULL;
  161. struct dp_tx_desc_pool_s tmp_pool;
  162. int i;
  163. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  164. "No of pool map received %d", pool_stats->pool_map_count);
  165. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  166. "No of pool unmap received %d", pool_stats->pool_unmap_count);
  167. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  168. "Pkt dropped due to unavailablity of pool %d",
  169. pool_stats->pkt_drop_no_pool);
  170. /*
  171. * Nested spin lock.
  172. * Always take in below order.
  173. * flow_pool_array_lock -> flow_pool_lock
  174. */
  175. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  176. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  177. pool = &soc->tx_desc[i];
  178. if (pool->status > FLOW_POOL_INVALID)
  179. continue;
  180. qdf_spin_lock_bh(&pool->flow_pool_lock);
  181. qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
  182. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  183. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  184. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
  185. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  186. "Flow_pool_id %d :: status %d",
  187. tmp_pool.flow_pool_id, tmp_pool.status);
  188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  189. "Total %d :: Available %d",
  190. tmp_pool.pool_size, tmp_pool.avail_desc);
  191. dp_tx_flow_pool_dump_threshold(&tmp_pool);
  192. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  193. "Member flow_id %d :: flow_type %d",
  194. tmp_pool.flow_pool_id, tmp_pool.flow_type);
  195. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  196. "Pkt dropped due to unavailablity of descriptors %d",
  197. tmp_pool.pkt_drop_no_desc);
  198. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  199. }
  200. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  201. }
  202. /**
  203. * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
  204. *
  205. * @soc: Handle to struct dp_soc.
  206. *
  207. * Return: None
  208. */
  209. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
  210. {
  211. if (!soc) {
  212. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  213. "%s: soc is null", __func__);
  214. return;
  215. }
  216. qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
  217. }
  218. /**
  219. * dp_tx_create_flow_pool() - create flow pool
  220. * @soc: Handle to struct dp_soc
  221. * @flow_pool_id: flow pool id
  222. * @flow_pool_size: flow pool size
  223. *
  224. * Return: flow_pool pointer / NULL for error
  225. */
  226. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  227. uint8_t flow_pool_id, uint16_t flow_pool_size)
  228. {
  229. struct dp_tx_desc_pool_s *pool;
  230. uint32_t stop_threshold;
  231. uint32_t start_threshold;
  232. if (flow_pool_id >= MAX_TXDESC_POOLS) {
  233. dp_err("invalid flow_pool_id %d", flow_pool_id);
  234. return NULL;
  235. }
  236. pool = &soc->tx_desc[flow_pool_id];
  237. qdf_spin_lock_bh(&pool->flow_pool_lock);
  238. if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
  239. dp_tx_flow_pool_reattach(pool);
  240. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  241. dp_err("cannot alloc desc, status=%d, create_cnt=%d",
  242. pool->status, pool->pool_create_cnt);
  243. return pool;
  244. }
  245. if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
  246. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  247. return NULL;
  248. }
  249. if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
  250. dp_tx_desc_pool_free(soc, flow_pool_id);
  251. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  252. return NULL;
  253. }
  254. stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
  255. start_threshold = stop_threshold +
  256. wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
  257. pool->flow_pool_id = flow_pool_id;
  258. pool->pool_size = flow_pool_size;
  259. pool->avail_desc = flow_pool_size;
  260. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  261. dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
  262. flow_pool_size);
  263. pool->pool_create_cnt++;
  264. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  265. return pool;
  266. }
  267. /**
  268. * dp_tx_delete_flow_pool() - delete flow pool
  269. * @soc: Handle to struct dp_soc
  270. * @pool: flow pool pointer
  271. * @force: free pool forcefully
  272. *
  273. * Delete flow_pool if all tx descriptors are available.
  274. * Otherwise put it in FLOW_POOL_INVALID state.
  275. * If force is set then pull all available descriptors to
  276. * global pool.
  277. *
  278. * Return: 0 for success or error
  279. */
  280. int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
  281. bool force)
  282. {
  283. struct dp_vdev *vdev;
  284. if (!soc || !pool) {
  285. dp_err("pool or soc is NULL");
  286. QDF_ASSERT(0);
  287. return ENOMEM;
  288. }
  289. dp_info("pool create_cnt=%d, avail_desc=%d, size=%d, status=%d",
  290. pool->pool_create_cnt, pool->avail_desc,
  291. pool->pool_size, pool->status);
  292. qdf_spin_lock_bh(&pool->flow_pool_lock);
  293. if (!pool->pool_create_cnt) {
  294. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  295. dp_err("flow pool either not created or alread deleted");
  296. return -ENOENT;
  297. }
  298. pool->pool_create_cnt--;
  299. if (pool->pool_create_cnt) {
  300. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  301. dp_err("pool is still attached, pending detach %d",
  302. pool->pool_create_cnt);
  303. return -EAGAIN;
  304. }
  305. if (pool->avail_desc < pool->pool_size) {
  306. pool->status = FLOW_POOL_INVALID;
  307. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  308. /* Reset TX desc associated to this Vdev as NULL */
  309. vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id);
  310. if (vdev) {
  311. dp_tx_desc_flush(vdev->pdev, vdev, false);
  312. dp_vdev_unref_delete(soc, vdev);
  313. }
  314. dp_err("avail desc less than pool size");
  315. return -EAGAIN;
  316. }
  317. /* We have all the descriptors for the pool, we can delete the pool */
  318. dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
  319. dp_tx_desc_pool_free(soc, pool->flow_pool_id);
  320. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  321. return 0;
  322. }
  323. /**
  324. * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
  325. * @pdev: Handle to struct dp_pdev
  326. * @pool: flow_pool
  327. * @vdev_id: flow_id /vdev_id
  328. *
  329. * Return: none
  330. */
  331. static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
  332. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  333. {
  334. struct dp_vdev *vdev;
  335. struct dp_soc *soc = pdev->soc;
  336. vdev = dp_vdev_get_ref_by_id(soc, vdev_id);
  337. if (!vdev) {
  338. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  339. "%s: invalid vdev_id %d",
  340. __func__, vdev_id);
  341. return;
  342. }
  343. vdev->pool = pool;
  344. qdf_spin_lock_bh(&pool->flow_pool_lock);
  345. pool->pool_owner_ctx = soc;
  346. pool->flow_pool_id = vdev_id;
  347. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  348. dp_vdev_unref_delete(soc, vdev);
  349. }
  350. /**
  351. * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
  352. * @pdev: Handle to struct dp_pdev
  353. * @pool: flow_pool
  354. * @vdev_id: flow_id /vdev_id
  355. *
  356. * Return: none
  357. */
  358. static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
  359. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  360. {
  361. struct dp_vdev *vdev;
  362. struct dp_soc *soc = pdev->soc;
  363. vdev = dp_vdev_get_ref_by_id(soc, vdev_id);
  364. if (!vdev) {
  365. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  366. "%s: invalid vdev_id %d",
  367. __func__, vdev_id);
  368. return;
  369. }
  370. vdev->pool = NULL;
  371. dp_vdev_unref_delete(soc, vdev);
  372. }
  373. /**
  374. * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
  375. * @pdev: Handle to struct dp_pdev
  376. * @flow_id: flow id
  377. * @flow_type: flow type
  378. * @flow_pool_id: pool id
  379. * @flow_pool_size: pool size
  380. *
  381. * Process below target to host message
  382. * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
  383. *
  384. * Return: none
  385. */
  386. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  387. uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
  388. {
  389. struct dp_soc *soc = pdev->soc;
  390. struct dp_tx_desc_pool_s *pool;
  391. enum htt_flow_type type = flow_type;
  392. dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
  393. flow_id, flow_type, flow_pool_id, flow_pool_size);
  394. if (qdf_unlikely(!soc)) {
  395. dp_err("soc is NULL");
  396. return QDF_STATUS_E_FAULT;
  397. }
  398. soc->pool_stats.pool_map_count++;
  399. pool = dp_tx_create_flow_pool(soc, flow_pool_id,
  400. flow_pool_size);
  401. if (!pool) {
  402. dp_err("creation of flow_pool %d size %d failed",
  403. flow_pool_id, flow_pool_size);
  404. return QDF_STATUS_E_RESOURCES;
  405. }
  406. switch (type) {
  407. case FLOW_TYPE_VDEV:
  408. dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
  409. break;
  410. default:
  411. dp_err("flow type %d not supported", type);
  412. break;
  413. }
  414. return QDF_STATUS_SUCCESS;
  415. }
  416. /**
  417. * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
  418. * @pdev: Handle to struct dp_pdev
  419. * @flow_id: flow id
  420. * @flow_type: flow type
  421. * @flow_pool_id: pool id
  422. *
  423. * Process below target to host message
  424. * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
  425. *
  426. * Return: none
  427. */
  428. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  429. uint8_t flow_type, uint8_t flow_pool_id)
  430. {
  431. struct dp_soc *soc = pdev->soc;
  432. struct dp_tx_desc_pool_s *pool;
  433. enum htt_flow_type type = flow_type;
  434. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  435. "%s: flow_id %d flow_type %d flow_pool_id %d",
  436. __func__, flow_id, flow_type, flow_pool_id);
  437. if (qdf_unlikely(!pdev)) {
  438. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  439. "%s: pdev is NULL", __func__);
  440. return;
  441. }
  442. soc->pool_stats.pool_unmap_count++;
  443. pool = &soc->tx_desc[flow_pool_id];
  444. if (!pool) {
  445. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  446. "%s: flow_pool not available flow_pool_id %d",
  447. __func__, type);
  448. return;
  449. }
  450. switch (type) {
  451. case FLOW_TYPE_VDEV:
  452. dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
  453. break;
  454. default:
  455. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  456. "%s: flow type %d not supported !!!",
  457. __func__, type);
  458. return;
  459. }
  460. /* only delete if all descriptors are available */
  461. dp_tx_delete_flow_pool(soc, pool, false);
  462. }
  463. /**
  464. * dp_tx_flow_control_init() - Initialize tx flow control
  465. * @tx_desc_pool: Handle to flow_pool
  466. *
  467. * Return: none
  468. */
  469. void dp_tx_flow_control_init(struct dp_soc *soc)
  470. {
  471. qdf_spinlock_create(&soc->flow_pool_array_lock);
  472. }
  473. /**
  474. * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
  475. * @tx_desc_pool: Handle to flow_pool
  476. *
  477. * Return: none
  478. */
  479. static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
  480. {
  481. struct dp_tx_desc_pool_s *tx_desc_pool;
  482. int i;
  483. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  484. tx_desc_pool = &((soc)->tx_desc[i]);
  485. if (!tx_desc_pool->desc_pages.num_pages)
  486. continue;
  487. dp_tx_desc_pool_deinit(soc, i);
  488. dp_tx_desc_pool_free(soc, i);
  489. }
  490. }
  491. /**
  492. * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
  493. * @tx_desc_pool: Handle to flow_pool
  494. *
  495. * Return: none
  496. */
  497. void dp_tx_flow_control_deinit(struct dp_soc *soc)
  498. {
  499. dp_tx_desc_pool_dealloc(soc);
  500. qdf_spinlock_destroy(&soc->flow_pool_array_lock);
  501. }
  502. /**
  503. * dp_txrx_register_pause_cb() - Register pause callback
  504. * @ctx: Handle to struct dp_soc
  505. * @pause_cb: Tx pause_cb
  506. *
  507. * Return: none
  508. */
  509. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
  510. tx_pause_callback pause_cb)
  511. {
  512. struct dp_soc *soc = (struct dp_soc *)handle;
  513. if (!soc || !pause_cb) {
  514. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  515. FL("soc or pause_cb is NULL"));
  516. return QDF_STATUS_E_INVAL;
  517. }
  518. soc->pause_cb = pause_cb;
  519. return QDF_STATUS_SUCCESS;
  520. }
  521. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
  522. uint8_t vdev_id)
  523. {
  524. struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
  525. struct dp_pdev *pdev =
  526. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  527. int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  528. if (!pdev) {
  529. dp_err("pdev is NULL");
  530. return QDF_STATUS_E_INVAL;
  531. }
  532. return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
  533. vdev_id, tx_ring_size);
  534. }
  535. void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
  536. uint8_t vdev_id)
  537. {
  538. struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
  539. struct dp_pdev *pdev =
  540. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  541. if (!pdev) {
  542. dp_err("pdev is NULL");
  543. return;
  544. }
  545. return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  546. FLOW_TYPE_VDEV, vdev_id);
  547. }