dp_tx_flow_control.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include <cds_api.h>
  20. /* OS abstraction libraries */
  21. #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
  22. #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
  23. #include <qdf_util.h> /* qdf_unlikely */
  24. #include "dp_types.h"
  25. #include "dp_tx_desc.h"
  26. #include <cdp_txrx_handle.h>
  27. #include "dp_internal.h"
  28. #define INVALID_FLOW_ID 0xFF
  29. #define MAX_INVALID_BIN 3
  30. #define GLOBAL_FLOW_POOL_STATS_LEN 25
  31. #define FLOW_POOL_LOG_LEN 50
  32. #ifdef QCA_AC_BASED_FLOW_CONTROL
  33. /**
  34. * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
  35. * @pool: flow_pool
  36. * @stop_threshold: stop threshold of certain AC
  37. * @start_threshold: start threshold of certain AC
  38. * @flow_pool_size: flow pool size
  39. *
  40. * Return: none
  41. */
  42. static inline void
  43. dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
  44. uint32_t start_threshold,
  45. uint32_t stop_threshold,
  46. uint16_t flow_pool_size)
  47. {
  48. /* BE_BK threshold is same as previous threahold */
  49. pool->start_th[DP_TH_BE_BK] = (start_threshold
  50. * flow_pool_size) / 100;
  51. pool->stop_th[DP_TH_BE_BK] = (stop_threshold
  52. * flow_pool_size) / 100;
  53. /* Update VI threshold based on BE_BK threshold */
  54. pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
  55. * FL_TH_VI_PERCENTAGE) / 100;
  56. pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
  57. * FL_TH_VI_PERCENTAGE) / 100;
  58. /* Update VO threshold based on BE_BK threshold */
  59. pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
  60. * FL_TH_VO_PERCENTAGE) / 100;
  61. pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
  62. * FL_TH_VO_PERCENTAGE) / 100;
  63. /* Update High Priority threshold based on BE_BK threshold */
  64. pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
  65. * FL_TH_HI_PERCENTAGE) / 100;
  66. pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
  67. * FL_TH_HI_PERCENTAGE) / 100;
  68. dp_debug("tx flow control threshold is set, pool size is %d",
  69. flow_pool_size);
  70. }
  71. /**
  72. * dp_tx_flow_pool_reattach() - Reattach flow_pool
  73. * @pool: flow_pool
  74. *
  75. * Return: none
  76. */
  77. static inline void
  78. dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
  79. {
  80. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  81. "%s: flow pool already allocated, attached %d times",
  82. __func__, pool->pool_create_cnt);
  83. pool->status = FLOW_POOL_ACTIVE_UNPAUSED_REATTACH;
  84. pool->pool_create_cnt++;
  85. }
  86. /**
  87. * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
  88. * @pool: flow_pool
  89. *
  90. * Return: none
  91. */
  92. static inline void
  93. dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
  94. {
  95. int i;
  96. for (i = 0; i < FL_TH_MAX; i++) {
  97. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  98. "Level %d :: Start threshold %d :: Stop threshold %d",
  99. i, pool->start_th[i], pool->stop_th[i]);
  100. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  101. "Level %d :: Maximum pause time %lu ms",
  102. i, pool->max_pause_time[i]);
  103. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  104. "Level %d :: Latest pause timestamp %lu",
  105. i, pool->latest_pause_time[i]);
  106. }
  107. }
  108. /**
  109. * dp_tx_flow_ctrl_reset_subqueues() - Reset subqueues to original state
  110. * @soc: dp soc
  111. * @pool: flow pool
  112. * @pool_status: flow pool status
  113. *
  114. * Return: none
  115. */
  116. static inline void
  117. dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
  118. struct dp_tx_desc_pool_s *pool,
  119. enum flow_pool_status pool_status)
  120. {
  121. switch (pool_status) {
  122. case FLOW_POOL_ACTIVE_PAUSED:
  123. soc->pause_cb(pool->flow_pool_id,
  124. WLAN_NETIF_PRIORITY_QUEUE_ON,
  125. WLAN_DATA_FLOW_CTRL_PRI);
  126. fallthrough;
  127. case FLOW_POOL_VO_PAUSED:
  128. soc->pause_cb(pool->flow_pool_id,
  129. WLAN_NETIF_VO_QUEUE_ON,
  130. WLAN_DATA_FLOW_CTRL_VO);
  131. fallthrough;
  132. case FLOW_POOL_VI_PAUSED:
  133. soc->pause_cb(pool->flow_pool_id,
  134. WLAN_NETIF_VI_QUEUE_ON,
  135. WLAN_DATA_FLOW_CTRL_VI);
  136. fallthrough;
  137. case FLOW_POOL_BE_BK_PAUSED:
  138. soc->pause_cb(pool->flow_pool_id,
  139. WLAN_NETIF_BE_BK_QUEUE_ON,
  140. WLAN_DATA_FLOW_CTRL_BE_BK);
  141. fallthrough;
  142. default:
  143. break;
  144. }
  145. }
  146. #else
  147. static inline void
  148. dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
  149. uint32_t start_threshold,
  150. uint32_t stop_threshold,
  151. uint16_t flow_pool_size)
  152. {
  153. /* INI is in percentage so divide by 100 */
  154. pool->start_th = (start_threshold * flow_pool_size) / 100;
  155. pool->stop_th = (stop_threshold * flow_pool_size) / 100;
  156. }
  157. static inline void
  158. dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
  159. {
  160. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  161. "%s: flow pool already allocated, attached %d times",
  162. __func__, pool->pool_create_cnt);
  163. if (pool->avail_desc > pool->start_th)
  164. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  165. else
  166. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  167. pool->pool_create_cnt++;
  168. }
  169. static inline void
  170. dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
  171. {
  172. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  173. "Start threshold %d :: Stop threshold %d",
  174. pool->start_th, pool->stop_th);
  175. }
  176. static inline void
  177. dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
  178. struct dp_tx_desc_pool_s *pool,
  179. enum flow_pool_status pool_status)
  180. {
  181. }
  182. #endif
  183. /**
  184. * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
  185. *
  186. * @ctx: Handle to struct dp_soc.
  187. *
  188. * Return: none
  189. */
  190. void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
  191. {
  192. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  193. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  194. struct dp_tx_desc_pool_s *pool = NULL;
  195. struct dp_tx_desc_pool_s tmp_pool;
  196. int i;
  197. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  198. "No of pool map received %d", pool_stats->pool_map_count);
  199. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  200. "No of pool unmap received %d", pool_stats->pool_unmap_count);
  201. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  202. "Pkt dropped due to unavailablity of pool %d",
  203. pool_stats->pkt_drop_no_pool);
  204. /*
  205. * Nested spin lock.
  206. * Always take in below order.
  207. * flow_pool_array_lock -> flow_pool_lock
  208. */
  209. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  210. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  211. pool = &soc->tx_desc[i];
  212. if (pool->status > FLOW_POOL_INVALID)
  213. continue;
  214. qdf_spin_lock_bh(&pool->flow_pool_lock);
  215. qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
  216. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  217. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  218. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
  219. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  220. "Flow_pool_id %d :: status %d",
  221. tmp_pool.flow_pool_id, tmp_pool.status);
  222. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  223. "Total %d :: Available %d",
  224. tmp_pool.pool_size, tmp_pool.avail_desc);
  225. dp_tx_flow_pool_dump_threshold(&tmp_pool);
  226. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  227. "Member flow_id %d :: flow_type %d",
  228. tmp_pool.flow_pool_id, tmp_pool.flow_type);
  229. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  230. "Pkt dropped due to unavailablity of descriptors %d",
  231. tmp_pool.pkt_drop_no_desc);
  232. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  233. }
  234. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  235. }
  236. void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
  237. {
  238. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  239. struct dp_tx_desc_pool_s *pool = NULL;
  240. char *comb_log_str;
  241. uint32_t comb_log_str_size;
  242. int bytes_written = 0;
  243. int i;
  244. comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
  245. (FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
  246. comb_log_str = qdf_mem_malloc(comb_log_str_size);
  247. if (!comb_log_str)
  248. return;
  249. bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
  250. comb_log_str_size, "G:(%d,%d,%d) ",
  251. pool_stats->pool_map_count,
  252. pool_stats->pool_unmap_count,
  253. pool_stats->pkt_drop_no_pool);
  254. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  255. pool = &soc->tx_desc[i];
  256. if (pool->status > FLOW_POOL_INVALID)
  257. continue;
  258. bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
  259. (bytes_written >= comb_log_str_size) ? 0 :
  260. comb_log_str_size - bytes_written,
  261. "| %d %d: (%d,%d,%d)",
  262. pool->flow_pool_id, pool->status,
  263. pool->pool_size, pool->avail_desc,
  264. pool->pkt_drop_no_desc);
  265. }
  266. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  267. "FLOW_POOL_STATS %s", comb_log_str);
  268. qdf_mem_free(comb_log_str);
  269. }
  270. /**
  271. * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
  272. *
  273. * @soc: Handle to struct dp_soc.
  274. *
  275. * Return: None
  276. */
  277. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
  278. {
  279. if (!soc) {
  280. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  281. "%s: soc is null", __func__);
  282. return;
  283. }
  284. qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
  285. }
  286. /**
  287. * dp_tx_create_flow_pool() - create flow pool
  288. * @soc: Handle to struct dp_soc
  289. * @flow_pool_id: flow pool id
  290. * @flow_pool_size: flow pool size
  291. *
  292. * Return: flow_pool pointer / NULL for error
  293. */
  294. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  295. uint8_t flow_pool_id, uint32_t flow_pool_size)
  296. {
  297. struct dp_tx_desc_pool_s *pool;
  298. uint32_t stop_threshold;
  299. uint32_t start_threshold;
  300. if (flow_pool_id >= MAX_TXDESC_POOLS) {
  301. dp_err("invalid flow_pool_id %d", flow_pool_id);
  302. return NULL;
  303. }
  304. pool = &soc->tx_desc[flow_pool_id];
  305. qdf_spin_lock_bh(&pool->flow_pool_lock);
  306. if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
  307. dp_tx_flow_pool_reattach(pool);
  308. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  309. dp_err("cannot alloc desc, status=%d, create_cnt=%d",
  310. pool->status, pool->pool_create_cnt);
  311. return pool;
  312. }
  313. if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
  314. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  315. dp_err("dp_tx_desc_pool_alloc failed flow_pool_id: %d",
  316. flow_pool_id);
  317. return NULL;
  318. }
  319. if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
  320. dp_tx_desc_pool_free(soc, flow_pool_id);
  321. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  322. dp_err("dp_tx_desc_pool_init failed flow_pool_id: %d",
  323. flow_pool_id);
  324. return NULL;
  325. }
  326. stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
  327. start_threshold = stop_threshold +
  328. wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
  329. pool->flow_pool_id = flow_pool_id;
  330. pool->pool_size = flow_pool_size;
  331. pool->avail_desc = flow_pool_size;
  332. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  333. dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
  334. flow_pool_size);
  335. pool->pool_create_cnt++;
  336. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  337. return pool;
  338. }
  339. /**
  340. * dp_is_tx_flow_pool_delete_allowed() - Can flow pool be deleted
  341. * @soc: Handle to struct dp_soc
  342. * @vdev_id: vdev_id corresponding to flow pool
  343. *
  344. * Check if it is OK to go ahead delete the flow pool. One of the case is
  345. * MLO where it is not OK to delete the flow pool when link switch happens.
  346. *
  347. * Return: 0 for success or error
  348. */
  349. static bool dp_is_tx_flow_pool_delete_allowed(struct dp_soc *soc,
  350. uint8_t vdev_id)
  351. {
  352. struct dp_vdev *vdev = NULL;
  353. bool is_allow = true;
  354. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC);
  355. /* only check for sta mode */
  356. if (!vdev || vdev->opmode != wlan_op_mode_sta)
  357. goto comp_ret;
  358. /*
  359. * Only if current vdev is belong to MLO connection and connected,
  360. * then it's not allowed to delete current pool, for legacy
  361. * connection, allowed always.
  362. */
  363. is_allow = policy_mgr_is_mlo_sta_disconnected(
  364. (struct wlan_objmgr_psoc *)soc->ctrl_psoc,
  365. vdev_id);
  366. comp_ret:
  367. if (vdev)
  368. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
  369. return is_allow;
  370. }
  371. /**
  372. * dp_tx_delete_flow_pool() - delete flow pool
  373. * @soc: Handle to struct dp_soc
  374. * @pool: flow pool pointer
  375. * @force: free pool forcefully
  376. *
  377. * Delete flow_pool if all tx descriptors are available.
  378. * Otherwise put it in FLOW_POOL_INVALID state.
  379. * If force is set then pull all available descriptors to
  380. * global pool.
  381. *
  382. * Return: 0 for success or error
  383. */
  384. int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
  385. bool force)
  386. {
  387. struct dp_vdev *vdev;
  388. enum flow_pool_status pool_status;
  389. if (!soc || !pool) {
  390. dp_err("pool or soc is NULL");
  391. QDF_ASSERT(0);
  392. return ENOMEM;
  393. }
  394. dp_info("pool_id %d create_cnt=%d, avail_desc=%d, size=%d, status=%d",
  395. pool->flow_pool_id, pool->pool_create_cnt, pool->avail_desc,
  396. pool->pool_size, pool->status);
  397. if (!dp_is_tx_flow_pool_delete_allowed(soc, pool->flow_pool_id)) {
  398. dp_info("skip pool id %d delete as it's not allowed",
  399. pool->flow_pool_id);
  400. return -EAGAIN;
  401. }
  402. qdf_spin_lock_bh(&pool->flow_pool_lock);
  403. if (!pool->pool_create_cnt) {
  404. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  405. dp_err("flow pool either not created or already deleted");
  406. return -ENOENT;
  407. }
  408. pool->pool_create_cnt--;
  409. if (pool->pool_create_cnt) {
  410. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  411. dp_err("pool is still attached, pending detach %d",
  412. pool->pool_create_cnt);
  413. return -EAGAIN;
  414. }
  415. if (pool->avail_desc < pool->pool_size) {
  416. pool_status = pool->status;
  417. pool->status = FLOW_POOL_INVALID;
  418. dp_tx_flow_ctrl_reset_subqueues(soc, pool, pool_status);
  419. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  420. /* Reset TX desc associated to this Vdev as NULL */
  421. vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
  422. DP_MOD_ID_MISC);
  423. if (vdev) {
  424. dp_tx_desc_flush(vdev->pdev, vdev, false);
  425. dp_vdev_unref_delete(soc, vdev,
  426. DP_MOD_ID_MISC);
  427. }
  428. dp_err("avail desc less than pool size");
  429. return -EAGAIN;
  430. }
  431. /* We have all the descriptors for the pool, we can delete the pool */
  432. dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
  433. dp_tx_desc_pool_free(soc, pool->flow_pool_id);
  434. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  435. return 0;
  436. }
  437. /**
  438. * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
  439. * @pdev: Handle to struct dp_pdev
  440. * @pool: flow_pool
  441. * @vdev_id: flow_id /vdev_id
  442. *
  443. * Return: none
  444. */
  445. static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
  446. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  447. {
  448. struct dp_vdev *vdev;
  449. struct dp_soc *soc = pdev->soc;
  450. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  451. if (!vdev) {
  452. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  453. "%s: invalid vdev_id %d",
  454. __func__, vdev_id);
  455. return;
  456. }
  457. vdev->pool = pool;
  458. qdf_spin_lock_bh(&pool->flow_pool_lock);
  459. pool->pool_owner_ctx = soc;
  460. pool->flow_pool_id = vdev_id;
  461. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  462. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  463. }
  464. /**
  465. * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
  466. * @pdev: Handle to struct dp_pdev
  467. * @pool: flow_pool
  468. * @vdev_id: flow_id /vdev_id
  469. *
  470. * Return: none
  471. */
  472. static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
  473. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  474. {
  475. struct dp_vdev *vdev;
  476. struct dp_soc *soc = pdev->soc;
  477. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  478. if (!vdev) {
  479. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  480. "%s: invalid vdev_id %d",
  481. __func__, vdev_id);
  482. return;
  483. }
  484. vdev->pool = NULL;
  485. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  486. }
  487. /**
  488. * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
  489. * @pdev: Handle to struct dp_pdev
  490. * @flow_id: flow id
  491. * @flow_type: flow type
  492. * @flow_pool_id: pool id
  493. * @flow_pool_size: pool size
  494. *
  495. * Process below target to host message
  496. * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
  497. *
  498. * Return: none
  499. */
  500. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  501. uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size)
  502. {
  503. struct dp_soc *soc = pdev->soc;
  504. struct dp_tx_desc_pool_s *pool;
  505. enum htt_flow_type type = flow_type;
  506. dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
  507. flow_id, flow_type, flow_pool_id, flow_pool_size);
  508. if (qdf_unlikely(!soc)) {
  509. dp_err("soc is NULL");
  510. return QDF_STATUS_E_FAULT;
  511. }
  512. soc->pool_stats.pool_map_count++;
  513. pool = dp_tx_create_flow_pool(soc, flow_pool_id,
  514. flow_pool_size);
  515. if (!pool) {
  516. dp_err("creation of flow_pool %d size %d failed",
  517. flow_pool_id, flow_pool_size);
  518. return QDF_STATUS_E_RESOURCES;
  519. }
  520. switch (type) {
  521. case FLOW_TYPE_VDEV:
  522. dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
  523. break;
  524. default:
  525. dp_err("flow type %d not supported", type);
  526. break;
  527. }
  528. return QDF_STATUS_SUCCESS;
  529. }
  530. /**
  531. * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
  532. * @pdev: Handle to struct dp_pdev
  533. * @flow_id: flow id
  534. * @flow_type: flow type
  535. * @flow_pool_id: pool id
  536. *
  537. * Process below target to host message
  538. * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
  539. *
  540. * Return: none
  541. */
  542. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  543. uint8_t flow_type, uint8_t flow_pool_id)
  544. {
  545. struct dp_soc *soc = pdev->soc;
  546. struct dp_tx_desc_pool_s *pool;
  547. enum htt_flow_type type = flow_type;
  548. dp_info("flow_id %d flow_type %d flow_pool_id %d", flow_id, flow_type,
  549. flow_pool_id);
  550. if (qdf_unlikely(!pdev)) {
  551. dp_err("pdev is NULL");
  552. return;
  553. }
  554. soc->pool_stats.pool_unmap_count++;
  555. pool = &soc->tx_desc[flow_pool_id];
  556. dp_info("pool status: %d", pool->status);
  557. if (pool->status == FLOW_POOL_INACTIVE) {
  558. dp_err("flow pool id: %d is inactive, ignore unmap",
  559. flow_pool_id);
  560. return;
  561. }
  562. switch (type) {
  563. case FLOW_TYPE_VDEV:
  564. dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
  565. break;
  566. default:
  567. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  568. "%s: flow type %d not supported !!!",
  569. __func__, type);
  570. return;
  571. }
  572. /* only delete if all descriptors are available */
  573. dp_tx_delete_flow_pool(soc, pool, false);
  574. }
  575. /**
  576. * dp_tx_flow_control_init() - Initialize tx flow control
  577. * @tx_desc_pool: Handle to flow_pool
  578. *
  579. * Return: none
  580. */
  581. void dp_tx_flow_control_init(struct dp_soc *soc)
  582. {
  583. qdf_spinlock_create(&soc->flow_pool_array_lock);
  584. }
  585. /**
  586. * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
  587. * @tx_desc_pool: Handle to flow_pool
  588. *
  589. * Return: none
  590. */
  591. static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
  592. {
  593. struct dp_tx_desc_pool_s *tx_desc_pool;
  594. int i;
  595. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  596. tx_desc_pool = &((soc)->tx_desc[i]);
  597. if (!tx_desc_pool->desc_pages.num_pages)
  598. continue;
  599. dp_tx_desc_pool_deinit(soc, i);
  600. dp_tx_desc_pool_free(soc, i);
  601. }
  602. }
  603. /**
  604. * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
  605. * @tx_desc_pool: Handle to flow_pool
  606. *
  607. * Return: none
  608. */
  609. void dp_tx_flow_control_deinit(struct dp_soc *soc)
  610. {
  611. dp_tx_desc_pool_dealloc(soc);
  612. qdf_spinlock_destroy(&soc->flow_pool_array_lock);
  613. }
  614. /**
  615. * dp_txrx_register_pause_cb() - Register pause callback
  616. * @ctx: Handle to struct dp_soc
  617. * @pause_cb: Tx pause_cb
  618. *
  619. * Return: none
  620. */
  621. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
  622. tx_pause_callback pause_cb)
  623. {
  624. struct dp_soc *soc = (struct dp_soc *)handle;
  625. if (!soc || !pause_cb) {
  626. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  627. FL("soc or pause_cb is NULL"));
  628. return QDF_STATUS_E_INVAL;
  629. }
  630. soc->pause_cb = pause_cb;
  631. return QDF_STATUS_SUCCESS;
  632. }
  633. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
  634. uint8_t vdev_id)
  635. {
  636. struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
  637. struct dp_pdev *pdev =
  638. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  639. int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  640. if (!pdev) {
  641. dp_err("pdev is NULL");
  642. return QDF_STATUS_E_INVAL;
  643. }
  644. return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
  645. vdev_id, tx_ring_size);
  646. }
  647. void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
  648. uint8_t vdev_id)
  649. {
  650. struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
  651. struct dp_pdev *pdev =
  652. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  653. if (!pdev) {
  654. dp_err("pdev is NULL");
  655. return;
  656. }
  657. return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  658. FLOW_TYPE_VDEV, vdev_id);
  659. }