dp_tx_flow_control.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include <cds_api.h>
  20. /* OS abstraction libraries */
  21. #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
  22. #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
  23. #include <qdf_util.h> /* qdf_unlikely */
  24. #include "dp_types.h"
  25. #include "dp_tx_desc.h"
  26. #include "dp_peer.h"
  27. #include <cdp_txrx_handle.h>
  28. #include "dp_internal.h"
  29. #define INVALID_FLOW_ID 0xFF
  30. #define MAX_INVALID_BIN 3
  31. #define GLOBAL_FLOW_POOL_STATS_LEN 25
  32. #define FLOW_POOL_LOG_LEN 50
  33. #ifdef QCA_AC_BASED_FLOW_CONTROL
  34. /**
  35. * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
  36. * @pool: flow_pool
  37. * @stop_threshold: stop threshold of certain AC
  38. * @start_threshold: start threshold of certain AC
  39. * @flow_pool_size: flow pool size
  40. *
  41. * Return: none
  42. */
  43. static inline void
  44. dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
  45. uint32_t start_threshold,
  46. uint32_t stop_threshold,
  47. uint16_t flow_pool_size)
  48. {
  49. /* BE_BK threshold is same as previous threahold */
  50. pool->start_th[DP_TH_BE_BK] = (start_threshold
  51. * flow_pool_size) / 100;
  52. pool->stop_th[DP_TH_BE_BK] = (stop_threshold
  53. * flow_pool_size) / 100;
  54. /* Update VI threshold based on BE_BK threshold */
  55. pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
  56. * FL_TH_VI_PERCENTAGE) / 100;
  57. pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
  58. * FL_TH_VI_PERCENTAGE) / 100;
  59. /* Update VO threshold based on BE_BK threshold */
  60. pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
  61. * FL_TH_VO_PERCENTAGE) / 100;
  62. pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
  63. * FL_TH_VO_PERCENTAGE) / 100;
  64. /* Update High Priority threshold based on BE_BK threshold */
  65. pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
  66. * FL_TH_HI_PERCENTAGE) / 100;
  67. pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
  68. * FL_TH_HI_PERCENTAGE) / 100;
  69. dp_debug("tx flow control threshold is set, pool size is %d",
  70. flow_pool_size);
  71. }
  72. /**
  73. * dp_tx_flow_pool_reattach() - Reattach flow_pool
  74. * @pool: flow_pool
  75. *
  76. * Return: none
  77. */
  78. static inline void
  79. dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
  80. {
  81. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  82. "%s: flow pool already allocated, attached %d times",
  83. __func__, pool->pool_create_cnt);
  84. pool->status = FLOW_POOL_ACTIVE_UNPAUSED_REATTACH;
  85. pool->pool_create_cnt++;
  86. }
  87. /**
  88. * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
  89. * @pool: flow_pool
  90. *
  91. * Return: none
  92. */
  93. static inline void
  94. dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
  95. {
  96. int i;
  97. for (i = 0; i < FL_TH_MAX; i++) {
  98. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  99. "Level %d :: Start threshold %d :: Stop threshold %d",
  100. i, pool->start_th[i], pool->stop_th[i]);
  101. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  102. "Level %d :: Maximum pause time %lu ms",
  103. i, pool->max_pause_time[i]);
  104. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  105. "Level %d :: Latest pause timestamp %lu",
  106. i, pool->latest_pause_time[i]);
  107. }
  108. }
  109. /**
  110. * dp_tx_flow_ctrl_reset_subqueues() - Reset subqueues to original state
  111. * @soc: dp soc
  112. * @pool: flow pool
  113. * @pool_status: flow pool status
  114. *
  115. * Return: none
  116. */
  117. static inline void
  118. dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
  119. struct dp_tx_desc_pool_s *pool,
  120. enum flow_pool_status pool_status)
  121. {
  122. switch (pool_status) {
  123. case FLOW_POOL_ACTIVE_PAUSED:
  124. soc->pause_cb(pool->flow_pool_id,
  125. WLAN_NETIF_PRIORITY_QUEUE_ON,
  126. WLAN_DATA_FLOW_CTRL_PRI);
  127. fallthrough;
  128. case FLOW_POOL_VO_PAUSED:
  129. soc->pause_cb(pool->flow_pool_id,
  130. WLAN_NETIF_VO_QUEUE_ON,
  131. WLAN_DATA_FLOW_CTRL_VO);
  132. fallthrough;
  133. case FLOW_POOL_VI_PAUSED:
  134. soc->pause_cb(pool->flow_pool_id,
  135. WLAN_NETIF_VI_QUEUE_ON,
  136. WLAN_DATA_FLOW_CTRL_VI);
  137. fallthrough;
  138. case FLOW_POOL_BE_BK_PAUSED:
  139. soc->pause_cb(pool->flow_pool_id,
  140. WLAN_NETIF_BE_BK_QUEUE_ON,
  141. WLAN_DATA_FLOW_CTRL_BE_BK);
  142. fallthrough;
  143. default:
  144. break;
  145. }
  146. }
  147. #else
  148. static inline void
  149. dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
  150. uint32_t start_threshold,
  151. uint32_t stop_threshold,
  152. uint16_t flow_pool_size)
  153. {
  154. /* INI is in percentage so divide by 100 */
  155. pool->start_th = (start_threshold * flow_pool_size) / 100;
  156. pool->stop_th = (stop_threshold * flow_pool_size) / 100;
  157. }
  158. static inline void
  159. dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
  160. {
  161. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  162. "%s: flow pool already allocated, attached %d times",
  163. __func__, pool->pool_create_cnt);
  164. if (pool->avail_desc > pool->start_th)
  165. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  166. else
  167. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  168. pool->pool_create_cnt++;
  169. }
  170. static inline void
  171. dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
  172. {
  173. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  174. "Start threshold %d :: Stop threshold %d",
  175. pool->start_th, pool->stop_th);
  176. }
  177. static inline void
  178. dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
  179. struct dp_tx_desc_pool_s *pool,
  180. enum flow_pool_status pool_status)
  181. {
  182. }
  183. #endif
  184. /**
  185. * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
  186. *
  187. * @ctx: Handle to struct dp_soc.
  188. *
  189. * Return: none
  190. */
  191. void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
  192. {
  193. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  194. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  195. struct dp_tx_desc_pool_s *pool = NULL;
  196. struct dp_tx_desc_pool_s tmp_pool;
  197. int i;
  198. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  199. "No of pool map received %d", pool_stats->pool_map_count);
  200. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  201. "No of pool unmap received %d", pool_stats->pool_unmap_count);
  202. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  203. "Pkt dropped due to unavailablity of pool %d",
  204. pool_stats->pkt_drop_no_pool);
  205. /*
  206. * Nested spin lock.
  207. * Always take in below order.
  208. * flow_pool_array_lock -> flow_pool_lock
  209. */
  210. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  211. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  212. pool = &soc->tx_desc[i];
  213. if (pool->status > FLOW_POOL_INVALID)
  214. continue;
  215. qdf_spin_lock_bh(&pool->flow_pool_lock);
  216. qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
  217. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  218. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  219. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
  220. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  221. "Flow_pool_id %d :: status %d",
  222. tmp_pool.flow_pool_id, tmp_pool.status);
  223. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  224. "Total %d :: Available %d",
  225. tmp_pool.pool_size, tmp_pool.avail_desc);
  226. dp_tx_flow_pool_dump_threshold(&tmp_pool);
  227. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  228. "Member flow_id %d :: flow_type %d",
  229. tmp_pool.flow_pool_id, tmp_pool.flow_type);
  230. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  231. "Pkt dropped due to unavailablity of descriptors %d",
  232. tmp_pool.pkt_drop_no_desc);
  233. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  234. }
  235. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  236. }
  237. void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
  238. {
  239. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  240. struct dp_tx_desc_pool_s *pool = NULL;
  241. char *comb_log_str;
  242. uint32_t comb_log_str_size;
  243. int bytes_written = 0;
  244. int i;
  245. comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
  246. (FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
  247. comb_log_str = qdf_mem_malloc(comb_log_str_size);
  248. if (!comb_log_str)
  249. return;
  250. bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
  251. comb_log_str_size, "G:(%d,%d,%d) ",
  252. pool_stats->pool_map_count,
  253. pool_stats->pool_unmap_count,
  254. pool_stats->pkt_drop_no_pool);
  255. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  256. pool = &soc->tx_desc[i];
  257. if (pool->status > FLOW_POOL_INVALID)
  258. continue;
  259. bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
  260. (bytes_written >= comb_log_str_size) ? 0 :
  261. comb_log_str_size - bytes_written,
  262. "| %d %d: (%d,%d,%d)",
  263. pool->flow_pool_id, pool->status,
  264. pool->pool_size, pool->avail_desc,
  265. pool->pkt_drop_no_desc);
  266. }
  267. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  268. "FLOW_POOL_STATS %s", comb_log_str);
  269. qdf_mem_free(comb_log_str);
  270. }
  271. /**
  272. * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
  273. *
  274. * @soc: Handle to struct dp_soc.
  275. *
  276. * Return: None
  277. */
  278. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
  279. {
  280. if (!soc) {
  281. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  282. "%s: soc is null", __func__);
  283. return;
  284. }
  285. qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
  286. }
  287. /**
  288. * dp_tx_create_flow_pool() - create flow pool
  289. * @soc: Handle to struct dp_soc
  290. * @flow_pool_id: flow pool id
  291. * @flow_pool_size: flow pool size
  292. *
  293. * Return: flow_pool pointer / NULL for error
  294. */
  295. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  296. uint8_t flow_pool_id, uint32_t flow_pool_size)
  297. {
  298. struct dp_tx_desc_pool_s *pool;
  299. uint32_t stop_threshold;
  300. uint32_t start_threshold;
  301. if (flow_pool_id >= MAX_TXDESC_POOLS) {
  302. dp_err("invalid flow_pool_id %d", flow_pool_id);
  303. return NULL;
  304. }
  305. pool = &soc->tx_desc[flow_pool_id];
  306. qdf_spin_lock_bh(&pool->flow_pool_lock);
  307. if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
  308. dp_tx_flow_pool_reattach(pool);
  309. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  310. dp_err("cannot alloc desc, status=%d, create_cnt=%d",
  311. pool->status, pool->pool_create_cnt);
  312. return pool;
  313. }
  314. if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
  315. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  316. dp_err("dp_tx_desc_pool_alloc failed flow_pool_id: %d",
  317. flow_pool_id);
  318. return NULL;
  319. }
  320. if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
  321. dp_tx_desc_pool_free(soc, flow_pool_id);
  322. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  323. dp_err("dp_tx_desc_pool_init failed flow_pool_id: %d",
  324. flow_pool_id);
  325. return NULL;
  326. }
  327. stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
  328. start_threshold = stop_threshold +
  329. wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
  330. pool->flow_pool_id = flow_pool_id;
  331. pool->pool_size = flow_pool_size;
  332. pool->avail_desc = flow_pool_size;
  333. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  334. dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
  335. flow_pool_size);
  336. pool->pool_create_cnt++;
  337. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  338. return pool;
  339. }
  340. /**
  341. * dp_is_tx_flow_pool_delete_allowed() - Can flow pool be deleted
  342. * @soc: Handle to struct dp_soc
  343. * @vdev_id: vdev_id corresponding to flow pool
  344. *
  345. * Check if it is OK to go ahead delete the flow pool. One of the case is
  346. * MLO where it is not OK to delete the flow pool when link switch happens.
  347. *
  348. * Return: 0 for success or error
  349. */
  350. static bool dp_is_tx_flow_pool_delete_allowed(struct dp_soc *soc,
  351. uint8_t vdev_id)
  352. {
  353. struct dp_peer *peer;
  354. struct dp_peer *tmp_peer;
  355. struct dp_vdev *vdev = NULL;
  356. bool is_allow = true;
  357. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC);
  358. /* only check for sta mode */
  359. if (!vdev || vdev->opmode != wlan_op_mode_sta)
  360. goto comp_ret;
  361. /*
  362. * Only if current vdev is belong to MLO connection and connected,
  363. * then it's not allowed to delete current pool, for legacy
  364. * connection, allowed always.
  365. */
  366. qdf_spin_lock_bh(&vdev->peer_list_lock);
  367. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  368. peer_list_elem,
  369. tmp_peer) {
  370. if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
  371. QDF_STATUS_SUCCESS) {
  372. if (peer->valid && !peer->sta_self_peer)
  373. is_allow = false;
  374. dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
  375. }
  376. }
  377. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  378. comp_ret:
  379. if (vdev)
  380. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
  381. return is_allow;
  382. }
  383. /**
  384. * dp_tx_delete_flow_pool() - delete flow pool
  385. * @soc: Handle to struct dp_soc
  386. * @pool: flow pool pointer
  387. * @force: free pool forcefully
  388. *
  389. * Delete flow_pool if all tx descriptors are available.
  390. * Otherwise put it in FLOW_POOL_INVALID state.
  391. * If force is set then pull all available descriptors to
  392. * global pool.
  393. *
  394. * Return: 0 for success or error
  395. */
  396. int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
  397. bool force)
  398. {
  399. struct dp_vdev *vdev;
  400. enum flow_pool_status pool_status;
  401. if (!soc || !pool) {
  402. dp_err("pool or soc is NULL");
  403. QDF_ASSERT(0);
  404. return ENOMEM;
  405. }
  406. dp_info("pool_id %d create_cnt=%d, avail_desc=%d, size=%d, status=%d",
  407. pool->flow_pool_id, pool->pool_create_cnt, pool->avail_desc,
  408. pool->pool_size, pool->status);
  409. if (!dp_is_tx_flow_pool_delete_allowed(soc, pool->flow_pool_id)) {
  410. dp_info("skip pool id %d delete as it's not allowed",
  411. pool->flow_pool_id);
  412. return -EAGAIN;
  413. }
  414. qdf_spin_lock_bh(&pool->flow_pool_lock);
  415. if (!pool->pool_create_cnt) {
  416. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  417. dp_err("flow pool either not created or already deleted");
  418. return -ENOENT;
  419. }
  420. pool->pool_create_cnt--;
  421. if (pool->pool_create_cnt) {
  422. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  423. dp_err("pool is still attached, pending detach %d",
  424. pool->pool_create_cnt);
  425. return -EAGAIN;
  426. }
  427. if (pool->avail_desc < pool->pool_size) {
  428. pool_status = pool->status;
  429. pool->status = FLOW_POOL_INVALID;
  430. dp_tx_flow_ctrl_reset_subqueues(soc, pool, pool_status);
  431. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  432. /* Reset TX desc associated to this Vdev as NULL */
  433. vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
  434. DP_MOD_ID_MISC);
  435. if (vdev) {
  436. dp_tx_desc_flush(vdev->pdev, vdev, false);
  437. dp_vdev_unref_delete(soc, vdev,
  438. DP_MOD_ID_MISC);
  439. }
  440. dp_err("avail desc less than pool size");
  441. return -EAGAIN;
  442. }
  443. /* We have all the descriptors for the pool, we can delete the pool */
  444. dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
  445. dp_tx_desc_pool_free(soc, pool->flow_pool_id);
  446. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  447. return 0;
  448. }
  449. /**
  450. * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
  451. * @pdev: Handle to struct dp_pdev
  452. * @pool: flow_pool
  453. * @vdev_id: flow_id /vdev_id
  454. *
  455. * Return: none
  456. */
  457. static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
  458. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  459. {
  460. struct dp_vdev *vdev;
  461. struct dp_soc *soc = pdev->soc;
  462. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  463. if (!vdev) {
  464. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  465. "%s: invalid vdev_id %d",
  466. __func__, vdev_id);
  467. return;
  468. }
  469. vdev->pool = pool;
  470. qdf_spin_lock_bh(&pool->flow_pool_lock);
  471. pool->pool_owner_ctx = soc;
  472. pool->flow_pool_id = vdev_id;
  473. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  474. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  475. }
  476. /**
  477. * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
  478. * @pdev: Handle to struct dp_pdev
  479. * @pool: flow_pool
  480. * @vdev_id: flow_id /vdev_id
  481. *
  482. * Return: none
  483. */
  484. static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
  485. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  486. {
  487. struct dp_vdev *vdev;
  488. struct dp_soc *soc = pdev->soc;
  489. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  490. if (!vdev) {
  491. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  492. "%s: invalid vdev_id %d",
  493. __func__, vdev_id);
  494. return;
  495. }
  496. vdev->pool = NULL;
  497. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  498. }
  499. /**
  500. * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
  501. * @pdev: Handle to struct dp_pdev
  502. * @flow_id: flow id
  503. * @flow_type: flow type
  504. * @flow_pool_id: pool id
  505. * @flow_pool_size: pool size
  506. *
  507. * Process below target to host message
  508. * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
  509. *
  510. * Return: none
  511. */
  512. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  513. uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size)
  514. {
  515. struct dp_soc *soc = pdev->soc;
  516. struct dp_tx_desc_pool_s *pool;
  517. enum htt_flow_type type = flow_type;
  518. dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
  519. flow_id, flow_type, flow_pool_id, flow_pool_size);
  520. if (qdf_unlikely(!soc)) {
  521. dp_err("soc is NULL");
  522. return QDF_STATUS_E_FAULT;
  523. }
  524. soc->pool_stats.pool_map_count++;
  525. pool = dp_tx_create_flow_pool(soc, flow_pool_id,
  526. flow_pool_size);
  527. if (!pool) {
  528. dp_err("creation of flow_pool %d size %d failed",
  529. flow_pool_id, flow_pool_size);
  530. return QDF_STATUS_E_RESOURCES;
  531. }
  532. switch (type) {
  533. case FLOW_TYPE_VDEV:
  534. dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
  535. break;
  536. default:
  537. dp_err("flow type %d not supported", type);
  538. break;
  539. }
  540. return QDF_STATUS_SUCCESS;
  541. }
  542. /**
  543. * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
  544. * @pdev: Handle to struct dp_pdev
  545. * @flow_id: flow id
  546. * @flow_type: flow type
  547. * @flow_pool_id: pool id
  548. *
  549. * Process below target to host message
  550. * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
  551. *
  552. * Return: none
  553. */
  554. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  555. uint8_t flow_type, uint8_t flow_pool_id)
  556. {
  557. struct dp_soc *soc = pdev->soc;
  558. struct dp_tx_desc_pool_s *pool;
  559. enum htt_flow_type type = flow_type;
  560. dp_info("flow_id %d flow_type %d flow_pool_id %d", flow_id, flow_type,
  561. flow_pool_id);
  562. if (qdf_unlikely(!pdev)) {
  563. dp_err("pdev is NULL");
  564. return;
  565. }
  566. soc->pool_stats.pool_unmap_count++;
  567. pool = &soc->tx_desc[flow_pool_id];
  568. dp_info("pool status: %d", pool->status);
  569. if (pool->status == FLOW_POOL_INACTIVE) {
  570. dp_err("flow pool id: %d is inactive, ignore unmap",
  571. flow_pool_id);
  572. return;
  573. }
  574. switch (type) {
  575. case FLOW_TYPE_VDEV:
  576. dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
  577. break;
  578. default:
  579. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  580. "%s: flow type %d not supported !!!",
  581. __func__, type);
  582. return;
  583. }
  584. /* only delete if all descriptors are available */
  585. dp_tx_delete_flow_pool(soc, pool, false);
  586. }
  587. /**
  588. * dp_tx_flow_control_init() - Initialize tx flow control
  589. * @tx_desc_pool: Handle to flow_pool
  590. *
  591. * Return: none
  592. */
  593. void dp_tx_flow_control_init(struct dp_soc *soc)
  594. {
  595. qdf_spinlock_create(&soc->flow_pool_array_lock);
  596. }
  597. /**
  598. * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
  599. * @tx_desc_pool: Handle to flow_pool
  600. *
  601. * Return: none
  602. */
  603. static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
  604. {
  605. struct dp_tx_desc_pool_s *tx_desc_pool;
  606. int i;
  607. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  608. tx_desc_pool = &((soc)->tx_desc[i]);
  609. if (!tx_desc_pool->desc_pages.num_pages)
  610. continue;
  611. dp_tx_desc_pool_deinit(soc, i);
  612. dp_tx_desc_pool_free(soc, i);
  613. }
  614. }
  615. /**
  616. * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
  617. * @tx_desc_pool: Handle to flow_pool
  618. *
  619. * Return: none
  620. */
  621. void dp_tx_flow_control_deinit(struct dp_soc *soc)
  622. {
  623. dp_tx_desc_pool_dealloc(soc);
  624. qdf_spinlock_destroy(&soc->flow_pool_array_lock);
  625. }
  626. /**
  627. * dp_txrx_register_pause_cb() - Register pause callback
  628. * @ctx: Handle to struct dp_soc
  629. * @pause_cb: Tx pause_cb
  630. *
  631. * Return: none
  632. */
  633. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
  634. tx_pause_callback pause_cb)
  635. {
  636. struct dp_soc *soc = (struct dp_soc *)handle;
  637. if (!soc || !pause_cb) {
  638. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  639. FL("soc or pause_cb is NULL"));
  640. return QDF_STATUS_E_INVAL;
  641. }
  642. soc->pause_cb = pause_cb;
  643. return QDF_STATUS_SUCCESS;
  644. }
  645. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
  646. uint8_t vdev_id)
  647. {
  648. struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
  649. struct dp_pdev *pdev =
  650. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  651. int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  652. if (!pdev) {
  653. dp_err("pdev is NULL");
  654. return QDF_STATUS_E_INVAL;
  655. }
  656. return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
  657. vdev_id, tx_ring_size);
  658. }
  659. void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
  660. uint8_t vdev_id)
  661. {
  662. struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
  663. struct dp_pdev *pdev =
  664. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  665. if (!pdev) {
  666. dp_err("pdev is NULL");
  667. return;
  668. }
  669. return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
  670. FLOW_TYPE_VDEV, vdev_id);
  671. }