dp_tx_flow_control.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. /*
  2. * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <cds_api.h>
  19. /* OS abstraction libraries */
  20. #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
  21. #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
  22. #include <qdf_util.h> /* qdf_unlikely */
  23. #include "dp_types.h"
  24. #include "dp_tx_desc.h"
  25. #include <cdp_txrx_handle.h>
  26. #include "dp_internal.h"
  27. #define INVALID_FLOW_ID 0xFF
  28. #define MAX_INVALID_BIN 3
  29. /**
  30. * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
  31. *
  32. * @ctx: Handle to struct dp_soc.
  33. *
  34. * Return: none
  35. */
  36. void dp_tx_dump_flow_pool_info(void *ctx)
  37. {
  38. struct dp_soc *soc = ctx;
  39. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  40. struct dp_tx_desc_pool_s *pool = NULL;
  41. struct dp_tx_desc_pool_s tmp_pool;
  42. int i;
  43. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  44. "No of pool map received %d", pool_stats->pool_map_count);
  45. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  46. "No of pool unmap received %d", pool_stats->pool_unmap_count);
  47. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  48. "Pkt dropped due to unavailablity of pool %d",
  49. pool_stats->pkt_drop_no_pool);
  50. /*
  51. * Nested spin lock.
  52. * Always take in below order.
  53. * flow_pool_array_lock -> flow_pool_lock
  54. */
  55. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  56. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  57. pool = &soc->tx_desc[i];
  58. if (pool->status > FLOW_POOL_INVALID)
  59. continue;
  60. qdf_spin_lock_bh(&pool->flow_pool_lock);
  61. qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
  62. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  63. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  64. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
  65. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  66. "Flow_pool_id %d :: status %d",
  67. tmp_pool.flow_pool_id, tmp_pool.status);
  68. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  69. "Total %d :: Available %d",
  70. tmp_pool.pool_size, tmp_pool.avail_desc);
  71. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  72. "Start threshold %d :: Stop threshold %d",
  73. tmp_pool.start_th, tmp_pool.stop_th);
  74. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  75. "Member flow_id %d :: flow_type %d",
  76. tmp_pool.flow_pool_id, tmp_pool.flow_type);
  77. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  78. "Pkt dropped due to unavailablity of descriptors %d",
  79. tmp_pool.pkt_drop_no_desc);
  80. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  81. }
  82. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  83. }
  84. /**
  85. * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
  86. *
  87. * @soc: Handle to struct dp_soc.
  88. *
  89. * Return: None
  90. */
  91. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
  92. {
  93. if (!soc) {
  94. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  95. "%s: soc is null\n", __func__);
  96. return;
  97. }
  98. qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
  99. }
  100. /**
  101. * dp_tx_create_flow_pool() - create flow pool
  102. * @soc: Handle to struct dp_soc
  103. * @flow_pool_id: flow pool id
  104. * @flow_pool_size: flow pool size
  105. *
  106. * Return: flow_pool pointer / NULL for error
  107. */
  108. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  109. uint8_t flow_pool_id, uint16_t flow_pool_size)
  110. {
  111. struct dp_tx_desc_pool_s *pool;
  112. uint32_t stop_threshold;
  113. uint32_t start_threshold;
  114. if (flow_pool_id >= MAX_TXDESC_POOLS) {
  115. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  116. "%s: invalid flow_pool_id %d", __func__, flow_pool_id);
  117. return NULL;
  118. }
  119. pool = &soc->tx_desc[flow_pool_id];
  120. qdf_spin_lock_bh(&pool->flow_pool_lock);
  121. if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
  122. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  123. "%s: flow pool already allocated, attached %d times\n",
  124. __func__, pool->pool_create_cnt);
  125. if (pool->avail_desc > pool->start_th)
  126. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  127. else
  128. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  129. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  130. return pool;
  131. }
  132. if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
  133. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  134. return NULL;
  135. }
  136. stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
  137. start_threshold = stop_threshold +
  138. wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
  139. pool->flow_pool_id = flow_pool_id;
  140. pool->pool_size = flow_pool_size;
  141. pool->avail_desc = flow_pool_size;
  142. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  143. /* INI is in percentage so divide by 100 */
  144. pool->start_th = (start_threshold * flow_pool_size)/100;
  145. pool->stop_th = (stop_threshold * flow_pool_size)/100;
  146. pool->pool_create_cnt++;
  147. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  148. return pool;
  149. }
  150. /**
  151. * dp_tx_delete_flow_pool() - delete flow pool
  152. * @soc: Handle to struct dp_soc
  153. * @pool: flow pool pointer
  154. * @force: free pool forcefully
  155. *
  156. * Delete flow_pool if all tx descriptors are available.
  157. * Otherwise put it in FLOW_POOL_INVALID state.
  158. * If force is set then pull all available descriptors to
  159. * global pool.
  160. *
  161. * Return: 0 for success or error
  162. */
  163. int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
  164. bool force)
  165. {
  166. if (!soc || !pool) {
  167. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  168. "%s: pool or soc is NULL\n", __func__);
  169. QDF_ASSERT(0);
  170. return ENOMEM;
  171. }
  172. qdf_spin_lock_bh(&pool->flow_pool_lock);
  173. if (!pool->pool_create_cnt) {
  174. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  175. "flow pool either not created or alread deleted");
  176. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  177. return -ENOENT;
  178. }
  179. pool->pool_create_cnt--;
  180. if (pool->pool_create_cnt) {
  181. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  182. "%s: pool is still attached, pending detach %d\n",
  183. __func__, pool->pool_create_cnt);
  184. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  185. return -EAGAIN;
  186. }
  187. if (pool->avail_desc < pool->pool_size) {
  188. pool->status = FLOW_POOL_INVALID;
  189. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  190. return -EAGAIN;
  191. }
  192. /* We have all the descriptors for the pool, we can delete the pool */
  193. dp_tx_desc_pool_free(soc, pool->flow_pool_id);
  194. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  195. return 0;
  196. }
  197. /**
  198. * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
  199. * @pdev: Handle to struct dp_pdev
  200. * @pool: flow_pool
  201. * @vdev_id: flow_id /vdev_id
  202. *
  203. * Return: none
  204. */
  205. static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
  206. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  207. {
  208. struct dp_vdev *vdev;
  209. struct dp_soc *soc = pdev->soc;
  210. vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
  211. (struct cdp_pdev *)pdev, vdev_id);
  212. if (!vdev) {
  213. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  214. "%s: invalid vdev_id %d\n",
  215. __func__, vdev_id);
  216. return;
  217. }
  218. vdev->pool = pool;
  219. qdf_spin_lock_bh(&pool->flow_pool_lock);
  220. pool->pool_owner_ctx = soc;
  221. pool->flow_pool_id = vdev_id;
  222. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  223. }
  224. /**
  225. * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
  226. * @pdev: Handle to struct dp_pdev
  227. * @pool: flow_pool
  228. * @vdev_id: flow_id /vdev_id
  229. *
  230. * Return: none
  231. */
  232. static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
  233. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  234. {
  235. struct dp_vdev *vdev;
  236. struct dp_soc *soc = pdev->soc;
  237. vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
  238. (struct cdp_pdev *)pdev, vdev_id);
  239. if (!vdev) {
  240. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  241. "%s: invalid vdev_id %d\n",
  242. __func__, vdev_id);
  243. return;
  244. }
  245. vdev->pool = NULL;
  246. }
  247. /**
  248. * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
  249. * @pdev: Handle to struct dp_pdev
  250. * @flow_id: flow id
  251. * @flow_type: flow type
  252. * @flow_pool_id: pool id
  253. * @flow_pool_size: pool size
  254. *
  255. * Process below target to host message
  256. * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
  257. *
  258. * Return: none
  259. */
  260. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  261. uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
  262. {
  263. struct dp_soc *soc = pdev->soc;
  264. struct dp_tx_desc_pool_s *pool;
  265. enum htt_flow_type type = flow_type;
  266. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  267. "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
  268. __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
  269. if (qdf_unlikely(!soc)) {
  270. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  271. "%s: soc is NULL", __func__);
  272. return QDF_STATUS_E_FAULT;
  273. }
  274. soc->pool_stats.pool_map_count++;
  275. pool = dp_tx_create_flow_pool(soc, flow_pool_id,
  276. flow_pool_size);
  277. if (pool == NULL) {
  278. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  279. "%s: creation of flow_pool %d size %d failed\n",
  280. __func__, flow_pool_id, flow_pool_size);
  281. return QDF_STATUS_E_RESOURCES;
  282. }
  283. switch (type) {
  284. case FLOW_TYPE_VDEV:
  285. dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
  286. break;
  287. default:
  288. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  289. "%s: flow type %d not supported !!!\n",
  290. __func__, type);
  291. break;
  292. }
  293. return QDF_STATUS_SUCCESS;
  294. }
  295. /**
  296. * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
  297. * @pdev: Handle to struct dp_pdev
  298. * @flow_id: flow id
  299. * @flow_type: flow type
  300. * @flow_pool_id: pool id
  301. *
  302. * Process below target to host message
  303. * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
  304. *
  305. * Return: none
  306. */
  307. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  308. uint8_t flow_type, uint8_t flow_pool_id)
  309. {
  310. struct dp_soc *soc = pdev->soc;
  311. struct dp_tx_desc_pool_s *pool;
  312. enum htt_flow_type type = flow_type;
  313. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  314. "%s: flow_id %d flow_type %d flow_pool_id %d\n",
  315. __func__, flow_id, flow_type, flow_pool_id);
  316. if (qdf_unlikely(!pdev)) {
  317. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  318. "%s: pdev is NULL", __func__);
  319. return;
  320. }
  321. soc->pool_stats.pool_unmap_count++;
  322. pool = &soc->tx_desc[flow_pool_id];
  323. if (!pool) {
  324. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  325. "%s: flow_pool not available flow_pool_id %d\n",
  326. __func__, type);
  327. return;
  328. }
  329. switch (type) {
  330. case FLOW_TYPE_VDEV:
  331. dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
  332. break;
  333. default:
  334. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  335. "%s: flow type %d not supported !!!\n",
  336. __func__, type);
  337. return;
  338. }
  339. /* only delete if all descriptors are available */
  340. dp_tx_delete_flow_pool(soc, pool, false);
  341. }
  342. /**
  343. * dp_tx_flow_control_init() - Initialize tx flow control
  344. * @tx_desc_pool: Handle to flow_pool
  345. *
  346. * Return: none
  347. */
  348. void dp_tx_flow_control_init(struct dp_soc *soc)
  349. {
  350. qdf_spinlock_create(&soc->flow_pool_array_lock);
  351. }
  352. /**
  353. * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
  354. * @tx_desc_pool: Handle to flow_pool
  355. *
  356. * Return: none
  357. */
  358. void dp_tx_flow_control_deinit(struct dp_soc *soc)
  359. {
  360. qdf_spinlock_destroy(&soc->flow_pool_array_lock);
  361. }
  362. /**
  363. * dp_txrx_register_pause_cb() - Register pause callback
  364. * @ctx: Handle to struct dp_soc
  365. * @pause_cb: Tx pause_cb
  366. *
  367. * Return: none
  368. */
  369. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
  370. tx_pause_callback pause_cb)
  371. {
  372. struct dp_soc *soc = (struct dp_soc *)handle;
  373. if (!soc || !pause_cb) {
  374. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  375. FL("soc or pause_cb is NULL"));
  376. return QDF_STATUS_E_INVAL;
  377. }
  378. soc->pause_cb = pause_cb;
  379. return QDF_STATUS_SUCCESS;
  380. }
  381. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, struct cdp_pdev *pdev,
  382. uint8_t vdev_id)
  383. {
  384. struct dp_soc *soc = (struct dp_soc *)handle;
  385. int tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
  386. return (dp_tx_flow_pool_map_handler((struct dp_pdev *)pdev, vdev_id,
  387. FLOW_TYPE_VDEV, vdev_id, tx_ring_size));
  388. }
  389. void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
  390. uint8_t vdev_id)
  391. {
  392. return(dp_tx_flow_pool_unmap_handler((struct dp_pdev *)pdev, vdev_id,
  393. FLOW_TYPE_VDEV, vdev_id));
  394. }