dp_tx_flow_control.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <cds_api.h>
  19. /* OS abstraction libraries */
  20. #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
  21. #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
  22. #include <qdf_util.h> /* qdf_unlikely */
  23. #include "dp_types.h"
  24. #include "dp_tx_desc.h"
  25. #include <cdp_txrx_handle.h>
  26. #include "dp_internal.h"
  27. #define INVALID_FLOW_ID 0xFF
  28. #define MAX_INVALID_BIN 3
  29. #define GLOBAL_FLOW_POOL_STATS_LEN 25
  30. #define FLOW_POOL_LOG_LEN 50
  31. #ifdef QCA_AC_BASED_FLOW_CONTROL
  32. /**
  33. * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
  34. * @pool: flow_pool
  35. * @stop_threshold: stop threshold of certian AC
  36. * @start_threshold: start threshold of certian AC
  37. * @flow_pool_size: flow pool size
  38. *
  39. * Return: none
  40. */
  41. static inline void
  42. dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
  43. uint32_t start_threshold,
  44. uint32_t stop_threshold,
  45. uint16_t flow_pool_size)
  46. {
  47. /* BE_BK threshold is same as previous threahold */
  48. pool->start_th[DP_TH_BE_BK] = (start_threshold
  49. * flow_pool_size) / 100;
  50. pool->stop_th[DP_TH_BE_BK] = (stop_threshold
  51. * flow_pool_size) / 100;
  52. /* Update VI threshold based on BE_BK threashold */
  53. pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
  54. * FL_TH_VI_PERCENTAGE) / 100;
  55. pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
  56. * FL_TH_VI_PERCENTAGE) / 100;
  57. /* Update VO threshold based on BE_BK threashold */
  58. pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
  59. * FL_TH_VO_PERCENTAGE) / 100;
  60. pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
  61. * FL_TH_VO_PERCENTAGE) / 100;
  62. /* Update High Priority threshold based on BE_BK threashold */
  63. pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
  64. * FL_TH_HI_PERCENTAGE) / 100;
  65. pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
  66. * FL_TH_HI_PERCENTAGE) / 100;
  67. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  68. "%s: tx flow control threshold is set, pool size is %d",
  69. __func__, flow_pool_size);
  70. }
  71. /**
  72. * dp_tx_flow_pool_reattach() - Reattach flow_pool
  73. * @pool: flow_pool
  74. *
  75. * Return: none
  76. */
  77. static inline void
  78. dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
  79. {
  80. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  81. "%s: flow pool already allocated, attached %d times",
  82. __func__, pool->pool_create_cnt);
  83. if (pool->avail_desc > pool->start_th[DP_TH_BE_BK])
  84. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  85. else if (pool->avail_desc <= pool->start_th[DP_TH_BE_BK] &&
  86. pool->avail_desc > pool->start_th[DP_TH_VI])
  87. pool->status = FLOW_POOL_BE_BK_PAUSED;
  88. else if (pool->avail_desc <= pool->start_th[DP_TH_VI] &&
  89. pool->avail_desc > pool->start_th[DP_TH_VO])
  90. pool->status = FLOW_POOL_VI_PAUSED;
  91. else if (pool->avail_desc <= pool->start_th[DP_TH_VO] &&
  92. pool->avail_desc > pool->start_th[DP_TH_HI])
  93. pool->status = FLOW_POOL_VO_PAUSED;
  94. else
  95. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  96. pool->pool_create_cnt++;
  97. }
  98. /**
  99. * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
  100. * @pool: flow_pool
  101. *
  102. * Return: none
  103. */
  104. static inline void
  105. dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
  106. {
  107. int i;
  108. for (i = 0; i < FL_TH_MAX; i++) {
  109. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  110. "Level %d :: Start threshold %d :: Stop threshold %d",
  111. i, pool->start_th[i], pool->stop_th[i]);
  112. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  113. "Level %d :: Maximun pause time %lu ms",
  114. i, pool->max_pause_time[i]);
  115. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  116. "Level %d :: Latest pause timestamp %lu",
  117. i, pool->latest_pause_time[i]);
  118. }
  119. }
  120. #else
  121. static inline void
  122. dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
  123. uint32_t start_threshold,
  124. uint32_t stop_threshold,
  125. uint16_t flow_pool_size)
  126. {
  127. /* INI is in percentage so divide by 100 */
  128. pool->start_th = (start_threshold * flow_pool_size) / 100;
  129. pool->stop_th = (stop_threshold * flow_pool_size) / 100;
  130. }
  131. static inline void
  132. dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
  133. {
  134. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  135. "%s: flow pool already allocated, attached %d times",
  136. __func__, pool->pool_create_cnt);
  137. if (pool->avail_desc > pool->start_th)
  138. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  139. else
  140. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  141. pool->pool_create_cnt++;
  142. }
  143. static inline void
  144. dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
  145. {
  146. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  147. "Start threshold %d :: Stop threshold %d",
  148. pool->start_th, pool->stop_th);
  149. }
  150. #endif
  151. /**
  152. * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
  153. *
  154. * @ctx: Handle to struct dp_soc.
  155. *
  156. * Return: none
  157. */
  158. void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
  159. {
  160. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  161. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  162. struct dp_tx_desc_pool_s *pool = NULL;
  163. struct dp_tx_desc_pool_s tmp_pool;
  164. int i;
  165. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  166. "No of pool map received %d", pool_stats->pool_map_count);
  167. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  168. "No of pool unmap received %d", pool_stats->pool_unmap_count);
  169. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  170. "Pkt dropped due to unavailablity of pool %d",
  171. pool_stats->pkt_drop_no_pool);
  172. /*
  173. * Nested spin lock.
  174. * Always take in below order.
  175. * flow_pool_array_lock -> flow_pool_lock
  176. */
  177. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  178. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  179. pool = &soc->tx_desc[i];
  180. if (pool->status > FLOW_POOL_INVALID)
  181. continue;
  182. qdf_spin_lock_bh(&pool->flow_pool_lock);
  183. qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
  184. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  185. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  186. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
  187. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  188. "Flow_pool_id %d :: status %d",
  189. tmp_pool.flow_pool_id, tmp_pool.status);
  190. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  191. "Total %d :: Available %d",
  192. tmp_pool.pool_size, tmp_pool.avail_desc);
  193. dp_tx_flow_pool_dump_threshold(&tmp_pool);
  194. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  195. "Member flow_id %d :: flow_type %d",
  196. tmp_pool.flow_pool_id, tmp_pool.flow_type);
  197. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  198. "Pkt dropped due to unavailablity of descriptors %d",
  199. tmp_pool.pkt_drop_no_desc);
  200. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  201. }
  202. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  203. }
  204. void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
  205. {
  206. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  207. struct dp_tx_desc_pool_s *pool = NULL;
  208. char *comb_log_str;
  209. uint32_t comb_log_str_size;
  210. int bytes_written = 0;
  211. int i;
  212. comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
  213. (FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
  214. comb_log_str = qdf_mem_malloc(comb_log_str_size);
  215. if (!comb_log_str)
  216. return;
  217. bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
  218. comb_log_str_size, "G:(%d,%d,%d) ",
  219. pool_stats->pool_map_count,
  220. pool_stats->pool_unmap_count,
  221. pool_stats->pkt_drop_no_pool);
  222. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  223. pool = &soc->tx_desc[i];
  224. if (pool->status > FLOW_POOL_INVALID)
  225. continue;
  226. bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
  227. (bytes_written >= comb_log_str_size) ? 0 :
  228. comb_log_str_size - bytes_written,
  229. "| %d %d: (%d,%d,%d)",
  230. pool->flow_pool_id, pool->status,
  231. pool->pool_size, pool->avail_desc,
  232. pool->pkt_drop_no_desc);
  233. }
  234. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  235. "FLOW_POOL_STATS %s", comb_log_str);
  236. qdf_mem_free(comb_log_str);
  237. }
  238. /**
  239. * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
  240. *
  241. * @soc: Handle to struct dp_soc.
  242. *
  243. * Return: None
  244. */
  245. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
  246. {
  247. if (!soc) {
  248. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  249. "%s: soc is null", __func__);
  250. return;
  251. }
  252. qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
  253. }
  254. /**
  255. * dp_tx_create_flow_pool() - create flow pool
  256. * @soc: Handle to struct dp_soc
  257. * @flow_pool_id: flow pool id
  258. * @flow_pool_size: flow pool size
  259. *
  260. * Return: flow_pool pointer / NULL for error
  261. */
  262. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  263. uint8_t flow_pool_id, uint16_t flow_pool_size)
  264. {
  265. struct dp_tx_desc_pool_s *pool;
  266. uint32_t stop_threshold;
  267. uint32_t start_threshold;
  268. if (flow_pool_id >= MAX_TXDESC_POOLS) {
  269. dp_err("invalid flow_pool_id %d", flow_pool_id);
  270. return NULL;
  271. }
  272. pool = &soc->tx_desc[flow_pool_id];
  273. qdf_spin_lock_bh(&pool->flow_pool_lock);
  274. if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
  275. dp_tx_flow_pool_reattach(pool);
  276. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  277. dp_err("cannot alloc desc, status=%d, create_cnt=%d",
  278. pool->status, pool->pool_create_cnt);
  279. return pool;
  280. }
  281. if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
  282. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  283. return NULL;
  284. }
  285. if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
  286. dp_tx_desc_pool_free(soc, flow_pool_id);
  287. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  288. return NULL;
  289. }
  290. stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
  291. start_threshold = stop_threshold +
  292. wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
  293. pool->flow_pool_id = flow_pool_id;
  294. pool->pool_size = flow_pool_size;
  295. pool->avail_desc = flow_pool_size;
  296. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  297. dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
  298. flow_pool_size);
  299. pool->pool_create_cnt++;
  300. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  301. return pool;
  302. }
  303. /**
  304. * dp_tx_delete_flow_pool() - delete flow pool
  305. * @soc: Handle to struct dp_soc
  306. * @pool: flow pool pointer
  307. * @force: free pool forcefully
  308. *
  309. * Delete flow_pool if all tx descriptors are available.
  310. * Otherwise put it in FLOW_POOL_INVALID state.
  311. * If force is set then pull all available descriptors to
  312. * global pool.
  313. *
  314. * Return: 0 for success or error
  315. */
  316. int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
  317. bool force)
  318. {
  319. struct dp_vdev *vdev;
  320. if (!soc || !pool) {
  321. dp_err("pool or soc is NULL");
  322. QDF_ASSERT(0);
  323. return ENOMEM;
  324. }
  325. dp_info("pool create_cnt=%d, avail_desc=%d, size=%d, status=%d",
  326. pool->pool_create_cnt, pool->avail_desc,
  327. pool->pool_size, pool->status);
  328. qdf_spin_lock_bh(&pool->flow_pool_lock);
  329. if (!pool->pool_create_cnt) {
  330. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  331. dp_err("flow pool either not created or alread deleted");
  332. return -ENOENT;
  333. }
  334. pool->pool_create_cnt--;
  335. if (pool->pool_create_cnt) {
  336. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  337. dp_err("pool is still attached, pending detach %d",
  338. pool->pool_create_cnt);
  339. return -EAGAIN;
  340. }
  341. if (pool->avail_desc < pool->pool_size) {
  342. pool->status = FLOW_POOL_INVALID;
  343. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  344. /* Reset TX desc associated to this Vdev as NULL */
  345. vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
  346. DP_MOD_ID_MISC);
  347. if (vdev) {
  348. dp_tx_desc_flush(vdev->pdev, vdev, false);
  349. dp_vdev_unref_delete(soc, vdev,
  350. DP_MOD_ID_MISC);
  351. }
  352. dp_err("avail desc less than pool size");
  353. return -EAGAIN;
  354. }
  355. /* We have all the descriptors for the pool, we can delete the pool */
  356. dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
  357. dp_tx_desc_pool_free(soc, pool->flow_pool_id);
  358. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  359. return 0;
  360. }
  361. /**
  362. * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
  363. * @pdev: Handle to struct dp_pdev
  364. * @pool: flow_pool
  365. * @vdev_id: flow_id /vdev_id
  366. *
  367. * Return: none
  368. */
  369. static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
  370. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  371. {
  372. struct dp_vdev *vdev;
  373. struct dp_soc *soc = pdev->soc;
  374. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  375. if (!vdev) {
  376. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  377. "%s: invalid vdev_id %d",
  378. __func__, vdev_id);
  379. return;
  380. }
  381. vdev->pool = pool;
  382. qdf_spin_lock_bh(&pool->flow_pool_lock);
  383. pool->pool_owner_ctx = soc;
  384. pool->flow_pool_id = vdev_id;
  385. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  386. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  387. }
  388. /**
  389. * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
  390. * @pdev: Handle to struct dp_pdev
  391. * @pool: flow_pool
  392. * @vdev_id: flow_id /vdev_id
  393. *
  394. * Return: none
  395. */
  396. static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
  397. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  398. {
  399. struct dp_vdev *vdev;
  400. struct dp_soc *soc = pdev->soc;
  401. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  402. if (!vdev) {
  403. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  404. "%s: invalid vdev_id %d",
  405. __func__, vdev_id);
  406. return;
  407. }
  408. vdev->pool = NULL;
  409. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  410. }
  411. /**
  412. * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
  413. * @pdev: Handle to struct dp_pdev
  414. * @flow_id: flow id
  415. * @flow_type: flow type
  416. * @flow_pool_id: pool id
  417. * @flow_pool_size: pool size
  418. *
  419. * Process below target to host message
  420. * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
  421. *
  422. * Return: none
  423. */
  424. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  425. uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
  426. {
  427. struct dp_soc *soc = pdev->soc;
  428. struct dp_tx_desc_pool_s *pool;
  429. enum htt_flow_type type = flow_type;
  430. dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
  431. flow_id, flow_type, flow_pool_id, flow_pool_size);
  432. if (qdf_unlikely(!soc)) {
  433. dp_err("soc is NULL");
  434. return QDF_STATUS_E_FAULT;
  435. }
  436. soc->pool_stats.pool_map_count++;
  437. pool = dp_tx_create_flow_pool(soc, flow_pool_id,
  438. flow_pool_size);
  439. if (!pool) {
  440. dp_err("creation of flow_pool %d size %d failed",
  441. flow_pool_id, flow_pool_size);
  442. return QDF_STATUS_E_RESOURCES;
  443. }
  444. switch (type) {
  445. case FLOW_TYPE_VDEV:
  446. dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
  447. break;
  448. default:
  449. dp_err("flow type %d not supported", type);
  450. break;
  451. }
  452. return QDF_STATUS_SUCCESS;
  453. }
  454. /**
  455. * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
  456. * @pdev: Handle to struct dp_pdev
  457. * @flow_id: flow id
  458. * @flow_type: flow type
  459. * @flow_pool_id: pool id
  460. *
  461. * Process below target to host message
  462. * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
  463. *
  464. * Return: none
  465. */
  466. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  467. uint8_t flow_type, uint8_t flow_pool_id)
  468. {
  469. struct dp_soc *soc = pdev->soc;
  470. struct dp_tx_desc_pool_s *pool;
  471. enum htt_flow_type type = flow_type;
  472. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  473. "%s: flow_id %d flow_type %d flow_pool_id %d",
  474. __func__, flow_id, flow_type, flow_pool_id);
  475. if (qdf_unlikely(!pdev)) {
  476. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  477. "%s: pdev is NULL", __func__);
  478. return;
  479. }
  480. soc->pool_stats.pool_unmap_count++;
  481. pool = &soc->tx_desc[flow_pool_id];
  482. if (!pool) {
  483. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  484. "%s: flow_pool not available flow_pool_id %d",
  485. __func__, type);
  486. return;
  487. }
  488. switch (type) {
  489. case FLOW_TYPE_VDEV:
  490. dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
  491. break;
  492. default:
  493. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  494. "%s: flow type %d not supported !!!",
  495. __func__, type);
  496. return;
  497. }
  498. /* only delete if all descriptors are available */
  499. dp_tx_delete_flow_pool(soc, pool, false);
  500. }
  501. /**
  502. * dp_tx_flow_control_init() - Initialize tx flow control
  503. * @tx_desc_pool: Handle to flow_pool
  504. *
  505. * Return: none
  506. */
  507. void dp_tx_flow_control_init(struct dp_soc *soc)
  508. {
  509. qdf_spinlock_create(&soc->flow_pool_array_lock);
  510. }
  511. /**
  512. * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
  513. * @tx_desc_pool: Handle to flow_pool
  514. *
  515. * Return: none
  516. */
  517. static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
  518. {
  519. struct dp_tx_desc_pool_s *tx_desc_pool;
  520. int i;
  521. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  522. tx_desc_pool = &((soc)->tx_desc[i]);
  523. if (!tx_desc_pool->desc_pages.num_pages)
  524. continue;
  525. dp_tx_desc_pool_deinit(soc, i);
  526. dp_tx_desc_pool_free(soc, i);
  527. }
  528. }
  529. /**
  530. * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
  531. * @tx_desc_pool: Handle to flow_pool
  532. *
  533. * Return: none
  534. */
  535. void dp_tx_flow_control_deinit(struct dp_soc *soc)
  536. {
  537. dp_tx_desc_pool_dealloc(soc);
  538. qdf_spinlock_destroy(&soc->flow_pool_array_lock);
  539. }
  540. /**
  541. * dp_txrx_register_pause_cb() - Register pause callback
  542. * @ctx: Handle to struct dp_soc
  543. * @pause_cb: Tx pause_cb
  544. *
  545. * Return: none
  546. */
  547. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
  548. tx_pause_callback pause_cb)
  549. {
  550. struct dp_soc *soc = (struct dp_soc *)handle;
  551. if (!soc || !pause_cb) {
  552. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  553. FL("soc or pause_cb is NULL"));
  554. return QDF_STATUS_E_INVAL;
  555. }
  556. soc->pause_cb = pause_cb;
  557. return QDF_STATUS_SUCCESS;
  558. }
  559. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
  560. uint8_t vdev_id)
  561. {
  562. struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
  563. struct dp_pdev *pdev =
  564. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  565. int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  566. if (!pdev) {
  567. dp_err("pdev is NULL");
  568. return QDF_STATUS_E_INVAL;
  569. }
  570. return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
  571. vdev_id, tx_ring_size);
  572. }
  573. void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
  574. uint8_t vdev_id)
  575. {
  576. struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
  577. struct dp_pdev *pdev =
  578. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  579. if (!pdev) {
  580. dp_err("pdev is NULL");
  581. return;
  582. }
  583. return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  584. FLOW_TYPE_VDEV, vdev_id);
  585. }