dp_tx_flow_control.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. /*
  2. * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <cds_api.h>
  27. /* OS abstraction libraries */
  28. #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
  29. #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
  30. #include <qdf_util.h> /* qdf_unlikely */
  31. #include "dp_types.h"
  32. #include "dp_tx_desc.h"
  33. #include <cdp_txrx_handle.h>
  34. #include "dp_internal.h"
  35. #define INVALID_FLOW_ID 0xFF
  36. #define MAX_INVALID_BIN 3
  37. /**
  38. * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
  39. *
  40. * @ctx: Handle to struct dp_soc.
  41. *
  42. * Return: none
  43. */
  44. void dp_tx_dump_flow_pool_info(void *ctx)
  45. {
  46. struct dp_soc *soc = ctx;
  47. struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
  48. struct dp_tx_desc_pool_s *pool = NULL;
  49. struct dp_tx_desc_pool_s tmp_pool;
  50. int i;
  51. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  52. "No of pool map received %d", pool_stats->pool_map_count);
  53. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  54. "No of pool unmap received %d", pool_stats->pool_unmap_count);
  55. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  56. "Pkt dropped due to unavailablity of pool %d",
  57. pool_stats->pkt_drop_no_pool);
  58. /*
  59. * Nested spin lock.
  60. * Always take in below order.
  61. * flow_pool_array_lock -> flow_pool_lock
  62. */
  63. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  64. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  65. pool = &soc->tx_desc[i];
  66. if (pool->status > FLOW_POOL_INVALID)
  67. continue;
  68. qdf_spin_lock_bh(&pool->flow_pool_lock);
  69. qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
  70. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  71. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  72. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
  73. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  74. "Flow_pool_id %d :: status %d",
  75. tmp_pool.flow_pool_id, tmp_pool.status);
  76. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  77. "Total %d :: Available %d",
  78. tmp_pool.pool_size, tmp_pool.avail_desc);
  79. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  80. "Start threshold %d :: Stop threshold %d",
  81. tmp_pool.start_th, tmp_pool.stop_th);
  82. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  83. "Member flow_id %d :: flow_type %d",
  84. tmp_pool.flow_pool_id, tmp_pool.flow_type);
  85. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  86. "Pkt dropped due to unavailablity of descriptors %d",
  87. tmp_pool.pkt_drop_no_desc);
  88. qdf_spin_lock_bh(&soc->flow_pool_array_lock);
  89. }
  90. qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
  91. }
  92. /**
  93. * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
  94. *
  95. * @soc: Handle to struct dp_soc.
  96. *
  97. * Return: None
  98. */
  99. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
  100. {
  101. if (!soc) {
  102. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  103. "%s: soc is null\n", __func__);
  104. return;
  105. }
  106. qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
  107. }
  108. /**
  109. * dp_tx_create_flow_pool() - create flow pool
  110. * @soc: Handle to struct dp_soc
  111. * @flow_pool_id: flow pool id
  112. * @flow_pool_size: flow pool size
  113. *
  114. * Return: flow_pool pointer / NULL for error
  115. */
  116. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  117. uint8_t flow_pool_id, uint16_t flow_pool_size)
  118. {
  119. struct dp_tx_desc_pool_s *pool;
  120. uint32_t stop_threshold;
  121. uint32_t start_threshold;
  122. if (!soc) {
  123. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  124. "%s: soc is NULL\n", __func__);
  125. return NULL;
  126. }
  127. pool = &soc->tx_desc[flow_pool_id];
  128. qdf_spin_lock_bh(&pool->flow_pool_lock);
  129. if (pool->status == FLOW_POOL_INVALID) {
  130. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  131. "%s: flow pool already allocated\n", __func__);
  132. if (pool->avail_desc > pool->start_th)
  133. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  134. else
  135. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  136. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  137. return pool;
  138. }
  139. if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
  140. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  141. return NULL;
  142. }
  143. stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
  144. start_threshold = stop_threshold +
  145. wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
  146. pool->flow_pool_id = flow_pool_id;
  147. pool->pool_size = flow_pool_size;
  148. pool->avail_desc = flow_pool_size;
  149. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  150. /* INI is in percentage so divide by 100 */
  151. pool->start_th = (start_threshold * flow_pool_size)/100;
  152. pool->stop_th = (stop_threshold * flow_pool_size)/100;
  153. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  154. return pool;
  155. }
  156. /**
  157. * dp_tx_delete_flow_pool() - delete flow pool
  158. * @soc: Handle to struct dp_soc
  159. * @pool: flow pool pointer
  160. * @force: free pool forcefully
  161. *
  162. * Delete flow_pool if all tx descriptors are available.
  163. * Otherwise put it in FLOW_POOL_INVALID state.
  164. * If force is set then pull all available descriptors to
  165. * global pool.
  166. *
  167. * Return: 0 for success or error
  168. */
  169. int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
  170. bool force)
  171. {
  172. if (!soc || !pool) {
  173. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  174. "%s: pool or soc is NULL\n", __func__);
  175. QDF_ASSERT(0);
  176. return ENOMEM;
  177. }
  178. qdf_spin_lock_bh(&pool->flow_pool_lock);
  179. if (pool->avail_desc < pool->pool_size) {
  180. pool->status = FLOW_POOL_INVALID;
  181. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  182. return EAGAIN;
  183. }
  184. /* We have all the descriptors for the pool, we can delete the pool */
  185. dp_tx_desc_pool_free(soc, pool->flow_pool_id);
  186. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  187. return 0;
  188. }
  189. /**
  190. * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
  191. * @pdev: Handle to struct dp_pdev
  192. * @pool: flow_pool
  193. * @vdev_id: flow_id /vdev_id
  194. *
  195. * Return: none
  196. */
  197. static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
  198. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  199. {
  200. struct dp_vdev *vdev;
  201. struct dp_soc *soc = pdev->soc;
  202. vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
  203. (struct cdp_pdev *)pdev, vdev_id);
  204. if (!vdev) {
  205. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  206. "%s: invalid vdev_id %d\n",
  207. __func__, vdev_id);
  208. return;
  209. }
  210. vdev->pool = pool;
  211. qdf_spin_lock_bh(&pool->flow_pool_lock);
  212. pool->pool_owner_ctx = soc;
  213. pool->flow_pool_id = vdev_id;
  214. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  215. }
  216. /**
  217. * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
  218. * @pdev: Handle to struct dp_pdev
  219. * @pool: flow_pool
  220. * @vdev_id: flow_id /vdev_id
  221. *
  222. * Return: none
  223. */
  224. static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
  225. struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
  226. {
  227. struct dp_vdev *vdev;
  228. struct dp_soc *soc = pdev->soc;
  229. vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
  230. (struct cdp_pdev *)pdev, vdev_id);
  231. if (!vdev) {
  232. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  233. "%s: invalid vdev_id %d\n",
  234. __func__, vdev_id);
  235. return;
  236. }
  237. vdev->pool = NULL;
  238. }
  239. /**
  240. * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
  241. * @pdev: Handle to struct dp_pdev
  242. * @flow_id: flow id
  243. * @flow_type: flow type
  244. * @flow_pool_id: pool id
  245. * @flow_pool_size: pool size
  246. *
  247. * Process below target to host message
  248. * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
  249. *
  250. * Return: none
  251. */
  252. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  253. uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
  254. {
  255. struct dp_soc *soc = pdev->soc;
  256. struct dp_tx_desc_pool_s *pool;
  257. enum htt_flow_type type = flow_type;
  258. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  259. "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
  260. __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
  261. if (qdf_unlikely(!soc)) {
  262. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  263. "%s: soc is NULL", __func__);
  264. return QDF_STATUS_E_FAULT;
  265. }
  266. soc->pool_stats.pool_map_count++;
  267. pool = dp_tx_create_flow_pool(soc, flow_pool_id,
  268. flow_pool_size);
  269. if (pool == NULL) {
  270. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  271. "%s: creation of flow_pool %d size %d failed\n",
  272. __func__, flow_pool_id, flow_pool_size);
  273. return QDF_STATUS_E_RESOURCES;
  274. }
  275. switch (type) {
  276. case FLOW_TYPE_VDEV:
  277. dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
  278. break;
  279. default:
  280. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  281. "%s: flow type %d not supported !!!\n",
  282. __func__, type);
  283. break;
  284. }
  285. return QDF_STATUS_SUCCESS;
  286. }
  287. /**
  288. * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
  289. * @pdev: Handle to struct dp_pdev
  290. * @flow_id: flow id
  291. * @flow_type: flow type
  292. * @flow_pool_id: pool id
  293. *
  294. * Process below target to host message
  295. * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
  296. *
  297. * Return: none
  298. */
  299. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  300. uint8_t flow_type, uint8_t flow_pool_id)
  301. {
  302. struct dp_soc *soc = pdev->soc;
  303. struct dp_tx_desc_pool_s *pool;
  304. enum htt_flow_type type = flow_type;
  305. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  306. "%s: flow_id %d flow_type %d flow_pool_id %d\n",
  307. __func__, flow_id, flow_type, flow_pool_id);
  308. if (qdf_unlikely(!pdev)) {
  309. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  310. "%s: pdev is NULL", __func__);
  311. return;
  312. }
  313. soc->pool_stats.pool_unmap_count++;
  314. pool = &soc->tx_desc[flow_pool_id];
  315. if (!pool) {
  316. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  317. "%s: flow_pool not available flow_pool_id %d\n",
  318. __func__, type);
  319. return;
  320. }
  321. switch (type) {
  322. case FLOW_TYPE_VDEV:
  323. dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
  324. break;
  325. default:
  326. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  327. "%s: flow type %d not supported !!!\n",
  328. __func__, type);
  329. return;
  330. }
  331. /* only delete if all descriptors are available */
  332. dp_tx_delete_flow_pool(soc, pool, false);
  333. }
  334. /**
  335. * dp_tx_flow_control_init() - Initialize tx flow control
  336. * @tx_desc_pool: Handle to flow_pool
  337. *
  338. * Return: none
  339. */
  340. void dp_tx_flow_control_init(struct dp_soc *soc)
  341. {
  342. qdf_spinlock_create(&soc->flow_pool_array_lock);
  343. }
  344. /**
  345. * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
  346. * @tx_desc_pool: Handle to flow_pool
  347. *
  348. * Return: none
  349. */
  350. void dp_tx_flow_control_deinit(struct dp_soc *soc)
  351. {
  352. qdf_spinlock_destroy(&soc->flow_pool_array_lock);
  353. }
  354. /**
  355. * dp_txrx_register_pause_cb() - Register pause callback
  356. * @ctx: Handle to struct dp_soc
  357. * @pause_cb: Tx pause_cb
  358. *
  359. * Return: none
  360. */
  361. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
  362. tx_pause_callback pause_cb)
  363. {
  364. struct dp_soc *soc = (struct dp_soc *)handle;
  365. if (!soc || !pause_cb) {
  366. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  367. FL("soc or pause_cb is NULL"));
  368. return QDF_STATUS_E_INVAL;
  369. }
  370. soc->pause_cb = pause_cb;
  371. return QDF_STATUS_SUCCESS;
  372. }
  373. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, struct cdp_pdev *pdev,
  374. uint8_t vdev_id)
  375. {
  376. struct dp_soc *soc = (struct dp_soc *)handle;
  377. int tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
  378. return (dp_tx_flow_pool_map_handler((struct dp_pdev *)pdev, vdev_id,
  379. FLOW_TYPE_VDEV, vdev_id, tx_ring_size));
  380. }
  381. void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
  382. uint8_t vdev_id)
  383. {
  384. return(dp_tx_flow_pool_unmap_handler((struct dp_pdev *)pdev, vdev_id,
  385. FLOW_TYPE_VDEV, vdev_id));
  386. }