dp_tx_desc.h 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef DP_TX_DESC_H
  20. #define DP_TX_DESC_H
  21. #include "dp_types.h"
  22. #include "dp_tx.h"
  23. #include "dp_internal.h"
  24. /*
  25. * 21 bits cookie
  26. * 2 bits pool id 0 ~ 3,
  27. * 10 bits page id 0 ~ 1023
  28. * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
  29. */
  30. /* ???Ring ID needed??? */
  31. /* TODO: Need to revisit this change for Rhine */
  32. #ifdef WLAN_SOFTUMAC_SUPPORT
  33. #define DP_TX_DESC_ID_POOL_MASK 0x018000
  34. #define DP_TX_DESC_ID_POOL_OS 15
  35. #define DP_TX_DESC_ID_PAGE_MASK 0x007FF0
  36. #define DP_TX_DESC_ID_PAGE_OS 4
  37. #define DP_TX_DESC_ID_OFFSET_MASK 0x00000F
  38. #define DP_TX_DESC_ID_OFFSET_OS 0
  39. #else
  40. #define DP_TX_DESC_ID_POOL_MASK 0x018000
  41. #define DP_TX_DESC_ID_POOL_OS 15
  42. #define DP_TX_DESC_ID_PAGE_MASK 0x007FE0
  43. #define DP_TX_DESC_ID_PAGE_OS 5
  44. #define DP_TX_DESC_ID_OFFSET_MASK 0x00001F
  45. #define DP_TX_DESC_ID_OFFSET_OS 0
  46. #endif /* WLAN_SOFTUMAC_SUPPORT */
  47. /*
  48. * Compilation assert on tx desc size
  49. *
  50. * if assert is hit please update POOL_MASK,
  51. * PAGE_MASK according to updated size
  52. *
  53. * for current PAGE mask allowed size range of tx_desc
  54. * is between 128 and 256
  55. */
  56. QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
  57. ((sizeof(struct dp_tx_desc_s)) <=
  58. (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
  59. ((sizeof(struct dp_tx_desc_s)) >
  60. (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
  61. );
  62. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  63. #define TX_DESC_LOCK_CREATE(lock)
  64. #define TX_DESC_LOCK_DESTROY(lock)
  65. #define TX_DESC_LOCK_LOCK(lock)
  66. #define TX_DESC_LOCK_UNLOCK(lock)
  67. #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
  68. ((pool)->status == FLOW_POOL_INACTIVE)
  69. #ifdef QCA_AC_BASED_FLOW_CONTROL
  70. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  71. dp_tx_flow_pool_member_clean(_tx_desc_pool)
  72. #else /* !QCA_AC_BASED_FLOW_CONTROL */
  73. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  74. do { \
  75. (_tx_desc_pool)->elem_size = 0; \
  76. (_tx_desc_pool)->freelist = NULL; \
  77. (_tx_desc_pool)->pool_size = 0; \
  78. (_tx_desc_pool)->avail_desc = 0; \
  79. (_tx_desc_pool)->start_th = 0; \
  80. (_tx_desc_pool)->stop_th = 0; \
  81. (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \
  82. } while (0)
  83. #endif /* QCA_AC_BASED_FLOW_CONTROL */
  84. #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  85. #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock)
  86. #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
  87. #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock)
  88. #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock)
  89. #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
  90. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  91. do { \
  92. (_tx_desc_pool)->elem_size = 0; \
  93. (_tx_desc_pool)->num_allocated = 0; \
  94. (_tx_desc_pool)->freelist = NULL; \
  95. (_tx_desc_pool)->elem_count = 0; \
  96. (_tx_desc_pool)->num_free = 0; \
  97. } while (0)
  98. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  99. #define MAX_POOL_BUFF_COUNT 10000
  100. #ifdef DP_TX_TRACKING
  101. static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
  102. uint32_t magic_pattern)
  103. {
  104. tx_desc->magic = magic_pattern;
  105. }
  106. #else
  107. static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
  108. uint32_t magic_pattern)
  109. {
  110. }
  111. #endif
  112. /**
  113. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  114. * @soc: Handle to DP SoC structure
  115. * @pool_id: pool to allocate
  116. * @num_elem: Number of descriptor elements per pool
  117. *
  118. * This function allocates memory for SW tx descriptors
  119. * (used within host for tx data path).
  120. * The number of tx descriptors required will be large
  121. * since based on number of clients (1024 clients x 3 radios),
  122. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  123. * large.
  124. *
  125. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  126. * function to allocate memory
  127. * in multiple pages. It then iterates through the memory allocated across pages
  128. * and links each descriptor
  129. * to next descriptor, taking care of page boundaries.
  130. *
  131. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  132. * one for each ring;
  133. * This minimizes lock contention when hard_start_xmit is called
  134. * from multiple CPUs.
  135. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  136. * flow control.
  137. *
  138. * Return: Status code. 0 for success.
  139. */
  140. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  141. uint32_t num_elem);
  142. /**
  143. * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
  144. * @soc: Handle to DP SoC structure
  145. * @pool_id: pool to allocate
  146. * @num_elem: Number of descriptor elements per pool
  147. *
  148. * Return: QDF_STATUS_SUCCESS
  149. * QDF_STATUS_E_FAULT
  150. */
  151. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  152. uint32_t num_elem);
  153. /**
  154. * dp_tx_desc_pool_free() - Free the tx dexcriptor pools
  155. * @soc: Handle to DP SoC structure
  156. * @pool_id: pool to free
  157. *
  158. */
  159. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  160. /**
  161. * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
  162. * @soc: Handle to DP SoC structure
  163. * @pool_id: pool to de-initialize
  164. *
  165. */
  166. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  167. /**
  168. * dp_tx_ext_desc_pool_alloc_by_id() - allocate TX extension Descriptor pool
  169. * based on pool ID
  170. * @soc: Handle to DP SoC structure
  171. * @num_elem: Number of descriptor elements per pool
  172. * @pool_id: Pool ID
  173. *
  174. * Return - QDF_STATUS_SUCCESS
  175. * QDF_STATUS_E_NOMEM
  176. */
  177. QDF_STATUS dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc,
  178. uint32_t num_elem,
  179. uint8_t pool_id);
  180. /**
  181. * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
  182. * @soc: Handle to DP SoC structure
  183. * @num_pool: Number of pools to allocate
  184. * @num_elem: Number of descriptor elements per pool
  185. *
  186. * Return: QDF_STATUS_SUCCESS
  187. * QDF_STATUS_E_NOMEM
  188. */
  189. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  190. uint32_t num_elem);
  191. /**
  192. * dp_tx_ext_desc_pool_init_by_id() - initialize Tx extension Descriptor pool
  193. * based on pool ID
  194. * @soc: Handle to DP SoC structure
  195. * @num_elem: Number of descriptor elements per pool
  196. * @pool_id: Pool ID
  197. *
  198. * Return - QDF_STATUS_SUCCESS
  199. * QDF_STATUS_E_FAULT
  200. */
  201. QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  202. uint8_t pool_id);
  203. /**
  204. * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
  205. * @soc: Handle to DP SoC structure
  206. * @num_pool: Number of pools to initialize
  207. * @num_elem: Number of descriptor elements per pool
  208. *
  209. * Return: QDF_STATUS_SUCCESS
  210. * QDF_STATUS_E_NOMEM
  211. */
  212. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  213. uint32_t num_elem);
  214. /**
  215. * dp_tx_ext_desc_pool_free_by_id() - free TX extension Descriptor pool
  216. * based on pool ID
  217. * @soc: Handle to DP SoC structure
  218. * @pool_id: Pool ID
  219. *
  220. */
  221. void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
  222. /**
  223. * dp_tx_ext_desc_pool_free() - free Tx extension Descriptor pool(s)
  224. * @soc: Handle to DP SoC structure
  225. * @num_pool: Number of pools to free
  226. *
  227. */
  228. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
  229. /**
  230. * dp_tx_ext_desc_pool_deinit_by_id() - deinit Tx extension Descriptor pool
  231. * based on pool ID
  232. * @soc: Handle to DP SoC structure
  233. * @pool_id: Pool ID
  234. *
  235. */
  236. void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
  237. /**
  238. * dp_tx_ext_desc_pool_deinit() - deinit Tx extension Descriptor pool(s)
  239. * @soc: Handle to DP SoC structure
  240. * @num_pool: Number of pools to de-initialize
  241. *
  242. */
  243. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
  244. /**
  245. * dp_tx_tso_desc_pool_alloc_by_id() - allocate TSO Descriptor pool based
  246. * on pool ID
  247. * @soc: Handle to DP SoC structure
  248. * @num_elem: Number of descriptor elements per pool
  249. * @pool_id: Pool ID
  250. *
  251. * Return - QDF_STATUS_SUCCESS
  252. * QDF_STATUS_E_NOMEM
  253. */
  254. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  255. uint8_t pool_id);
  256. /**
  257. * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
  258. * @soc: Handle to DP SoC structure
  259. * @num_pool: Number of pools to allocate
  260. * @num_elem: Number of descriptor elements per pool
  261. *
  262. * Return: QDF_STATUS_SUCCESS
  263. * QDF_STATUS_E_NOMEM
  264. */
  265. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  266. uint32_t num_elem);
  267. /**
  268. * dp_tx_tso_desc_pool_init_by_id() - initialize TSO Descriptor pool
  269. * based on pool ID
  270. * @soc: Handle to DP SoC structure
  271. * @num_elem: Number of descriptor elements per pool
  272. * @pool_id: Pool ID
  273. *
  274. * Return - QDF_STATUS_SUCCESS
  275. * QDF_STATUS_E_NOMEM
  276. */
  277. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  278. uint8_t pool_id);
  279. /**
  280. * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
  281. * @soc: Handle to DP SoC structure
  282. * @num_pool: Number of pools to initialize
  283. * @num_elem: Number of descriptor elements per pool
  284. *
  285. * Return: QDF_STATUS_SUCCESS
  286. * QDF_STATUS_E_NOMEM
  287. */
  288. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  289. uint32_t num_elem);
  290. /**
  291. * dp_tx_tso_desc_pool_free_by_id() - free TSO Descriptor pool based on pool ID
  292. * @soc: Handle to DP SoC structure
  293. * @pool_id: Pool ID
  294. */
  295. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
  296. /**
  297. * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
  298. * @soc: Handle to DP SoC structure
  299. * @num_pool: Number of pools to free
  300. *
  301. */
  302. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
  303. /**
  304. * dp_tx_tso_desc_pool_deinit_by_id() - deinitialize TSO Descriptor pool
  305. * based on pool ID
  306. * @soc: Handle to DP SoC structure
  307. * @pool_id: Pool ID
  308. */
  309. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
  310. /**
  311. * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
  312. * @soc: Handle to DP SoC structure
  313. * @num_pool: Number of pools to free
  314. *
  315. */
  316. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
  317. /**
  318. * dp_tx_tso_num_seg_pool_alloc_by_id() - Allocate descriptors that tracks the
  319. * fragments in each tso segment based on pool ID
  320. * @soc: handle to dp soc structure
  321. * @num_elem: total number of descriptors to be allocated
  322. * @pool_id: Pool ID
  323. *
  324. * Return - QDF_STATUS_SUCCESS
  325. * QDF_STATUS_E_NOMEM
  326. */
  327. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  328. uint32_t num_elem,
  329. uint8_t pool_id);
  330. /**
  331. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  332. * fragments in each tso segment
  333. *
  334. * @soc: handle to dp soc structure
  335. * @num_pool: number of pools to allocate
  336. * @num_elem: total number of descriptors to be allocated
  337. *
  338. * Return: QDF_STATUS_SUCCESS
  339. * QDF_STATUS_E_NOMEM
  340. */
  341. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  342. uint32_t num_elem);
  343. /**
  344. * dp_tx_tso_num_seg_pool_init_by_id() - Initialize descriptors that tracks the
  345. * fragments in each tso segment based on pool ID
  346. *
  347. * @soc: handle to dp soc structure
  348. * @num_elem: total number of descriptors to be initialized
  349. * @pool_id: Pool ID
  350. *
  351. * Return - QDF_STATUS_SUCCESS
  352. * QDF_STATUS_E_FAULT
  353. */
  354. QDF_STATUS dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc,
  355. uint32_t num_elem,
  356. uint8_t pool_id);
  357. /**
  358. * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
  359. * fragments in each tso segment
  360. *
  361. * @soc: handle to dp soc structure
  362. * @num_pool: number of pools to initialize
  363. * @num_elem: total number of descriptors to be initialized
  364. *
  365. * Return: QDF_STATUS_SUCCESS
  366. * QDF_STATUS_E_FAULT
  367. */
  368. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  369. uint32_t num_elem);
  370. /**
  371. * dp_tx_tso_num_seg_pool_free_by_id() - free descriptors that tracks the
  372. * fragments in each tso segment based on pool ID
  373. *
  374. * @soc: handle to dp soc structure
  375. * @pool_id: Pool ID
  376. */
  377. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
  378. /**
  379. * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
  380. * fragments in each tso segment
  381. *
  382. * @soc: handle to dp soc structure
  383. * @num_pool: number of pools to free
  384. */
  385. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool);
  386. /**
  387. * dp_tx_tso_num_seg_pool_deinit_by_id() - de-initialize descriptors that tracks
  388. * the fragments in each tso segment based on pool ID
  389. * @soc: handle to dp soc structure
  390. * @pool_id: Pool ID
  391. */
  392. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
  393. /**
  394. * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
  395. * fragments in each tso segment
  396. *
  397. * @soc: handle to dp soc structure
  398. * @num_pool: number of pools to de-initialize
  399. *
  400. * Return: QDF_STATUS_SUCCESS
  401. * QDF_STATUS_E_FAULT
  402. */
  403. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
  404. #ifdef DP_UMAC_HW_RESET_SUPPORT
  405. /**
  406. * dp_tx_desc_pool_cleanup() - Clean up the tx dexcriptor pools
  407. * @soc: Handle to DP SoC structure
  408. * @nbuf_list: nbuf list for delayed free
  409. *
  410. */
  411. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
  412. #endif
  413. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  414. void dp_tx_flow_control_init(struct dp_soc *);
  415. void dp_tx_flow_control_deinit(struct dp_soc *);
  416. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
  417. tx_pause_callback pause_cb);
  418. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
  419. uint8_t vdev_id);
  420. void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
  421. uint8_t vdev_id);
  422. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
  423. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  424. uint8_t flow_pool_id, uint32_t flow_pool_size);
  425. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  426. uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
  427. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  428. uint8_t flow_type, uint8_t flow_pool_id);
  429. /**
  430. * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
  431. * @pool: flow pool
  432. *
  433. * Caller needs to take lock and do sanity checks.
  434. *
  435. * Return: tx descriptor
  436. */
  437. static inline
  438. struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
  439. {
  440. struct dp_tx_desc_s *tx_desc = pool->freelist;
  441. pool->freelist = pool->freelist->next;
  442. pool->avail_desc--;
  443. return tx_desc;
  444. }
  445. /**
  446. * dp_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
  447. * @pool: flow pool
  448. * @tx_desc: tx descriptor
  449. *
  450. * Caller needs to take lock and do sanity checks.
  451. *
  452. * Return: none
  453. */
  454. static inline
  455. void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
  456. struct dp_tx_desc_s *tx_desc)
  457. {
  458. tx_desc->next = pool->freelist;
  459. pool->freelist = tx_desc;
  460. pool->avail_desc++;
  461. }
  462. #ifdef QCA_AC_BASED_FLOW_CONTROL
  463. /**
  464. * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
  465. * @pool: flow pool
  466. *
  467. * Return: None
  468. */
  469. static inline void
  470. dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
  471. {
  472. pool->elem_size = 0;
  473. pool->freelist = NULL;
  474. pool->pool_size = 0;
  475. pool->avail_desc = 0;
  476. qdf_mem_zero(pool->start_th, FL_TH_MAX);
  477. qdf_mem_zero(pool->stop_th, FL_TH_MAX);
  478. pool->status = FLOW_POOL_INACTIVE;
  479. }
  480. /**
  481. * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
  482. * @pool: flow pool
  483. * @avail_desc: available descriptor number
  484. *
  485. * Return: true if threshold is met, false if not
  486. */
  487. static inline bool
  488. dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
  489. {
  490. if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
  491. return true;
  492. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
  493. return true;
  494. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
  495. return true;
  496. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
  497. return true;
  498. else
  499. return false;
  500. }
  501. /**
  502. * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
  503. * @soc: dp soc
  504. * @pool: flow pool
  505. */
  506. static inline void
  507. dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
  508. struct dp_tx_desc_pool_s *pool)
  509. {
  510. if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
  511. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  512. return;
  513. } else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
  514. pool->avail_desc > pool->stop_th[DP_TH_VI]) {
  515. pool->status = FLOW_POOL_BE_BK_PAUSED;
  516. } else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
  517. pool->avail_desc > pool->stop_th[DP_TH_VO]) {
  518. pool->status = FLOW_POOL_VI_PAUSED;
  519. } else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
  520. pool->avail_desc > pool->stop_th[DP_TH_HI]) {
  521. pool->status = FLOW_POOL_VO_PAUSED;
  522. } else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
  523. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  524. }
  525. switch (pool->status) {
  526. case FLOW_POOL_ACTIVE_PAUSED:
  527. soc->pause_cb(pool->flow_pool_id,
  528. WLAN_NETIF_PRIORITY_QUEUE_OFF,
  529. WLAN_DATA_FLOW_CTRL_PRI);
  530. fallthrough;
  531. case FLOW_POOL_VO_PAUSED:
  532. soc->pause_cb(pool->flow_pool_id,
  533. WLAN_NETIF_VO_QUEUE_OFF,
  534. WLAN_DATA_FLOW_CTRL_VO);
  535. fallthrough;
  536. case FLOW_POOL_VI_PAUSED:
  537. soc->pause_cb(pool->flow_pool_id,
  538. WLAN_NETIF_VI_QUEUE_OFF,
  539. WLAN_DATA_FLOW_CTRL_VI);
  540. fallthrough;
  541. case FLOW_POOL_BE_BK_PAUSED:
  542. soc->pause_cb(pool->flow_pool_id,
  543. WLAN_NETIF_BE_BK_QUEUE_OFF,
  544. WLAN_DATA_FLOW_CTRL_BE_BK);
  545. break;
  546. default:
  547. dp_err("Invalid pool status:%u to adjust", pool->status);
  548. }
  549. }
  550. /**
  551. * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
  552. * @soc: Handle to DP SoC structure
  553. * @desc_pool_id: ID of the flow control fool
  554. *
  555. * Return: TX descriptor allocated or NULL
  556. */
  557. static inline struct dp_tx_desc_s *
  558. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  559. {
  560. struct dp_tx_desc_s *tx_desc = NULL;
  561. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  562. bool is_pause = false;
  563. enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
  564. enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
  565. enum netif_reason_type reason;
  566. if (qdf_likely(pool)) {
  567. qdf_spin_lock_bh(&pool->flow_pool_lock);
  568. if (qdf_likely(pool->avail_desc &&
  569. pool->status != FLOW_POOL_INVALID &&
  570. pool->status != FLOW_POOL_INACTIVE)) {
  571. tx_desc = dp_tx_get_desc_flow_pool(pool);
  572. tx_desc->pool_id = desc_pool_id;
  573. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  574. dp_tx_desc_set_magic(tx_desc,
  575. DP_TX_MAGIC_PATTERN_INUSE);
  576. is_pause = dp_tx_is_threshold_reached(pool,
  577. pool->avail_desc);
  578. if (qdf_unlikely(pool->status ==
  579. FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
  580. dp_tx_adjust_flow_pool_state(soc, pool);
  581. is_pause = false;
  582. }
  583. if (qdf_unlikely(is_pause)) {
  584. switch (pool->status) {
  585. case FLOW_POOL_ACTIVE_UNPAUSED:
  586. /* pause network BE\BK queue */
  587. act = WLAN_NETIF_BE_BK_QUEUE_OFF;
  588. reason = WLAN_DATA_FLOW_CTRL_BE_BK;
  589. level = DP_TH_BE_BK;
  590. pool->status = FLOW_POOL_BE_BK_PAUSED;
  591. break;
  592. case FLOW_POOL_BE_BK_PAUSED:
  593. /* pause network VI queue */
  594. act = WLAN_NETIF_VI_QUEUE_OFF;
  595. reason = WLAN_DATA_FLOW_CTRL_VI;
  596. level = DP_TH_VI;
  597. pool->status = FLOW_POOL_VI_PAUSED;
  598. break;
  599. case FLOW_POOL_VI_PAUSED:
  600. /* pause network VO queue */
  601. act = WLAN_NETIF_VO_QUEUE_OFF;
  602. reason = WLAN_DATA_FLOW_CTRL_VO;
  603. level = DP_TH_VO;
  604. pool->status = FLOW_POOL_VO_PAUSED;
  605. break;
  606. case FLOW_POOL_VO_PAUSED:
  607. /* pause network HI PRI queue */
  608. act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
  609. reason = WLAN_DATA_FLOW_CTRL_PRI;
  610. level = DP_TH_HI;
  611. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  612. break;
  613. case FLOW_POOL_ACTIVE_PAUSED:
  614. act = WLAN_NETIF_ACTION_TYPE_NONE;
  615. break;
  616. default:
  617. dp_err_rl("pool status is %d!",
  618. pool->status);
  619. break;
  620. }
  621. if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
  622. pool->latest_pause_time[level] =
  623. qdf_get_system_timestamp();
  624. soc->pause_cb(desc_pool_id,
  625. act,
  626. reason);
  627. }
  628. }
  629. } else {
  630. pool->pkt_drop_no_desc++;
  631. }
  632. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  633. } else {
  634. dp_err_rl("NULL desc pool pool_id %d", desc_pool_id);
  635. soc->pool_stats.pkt_drop_no_pool++;
  636. }
  637. return tx_desc;
  638. }
  639. /**
  640. * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
  641. * @soc: Handle to DP SoC structure
  642. * @tx_desc: the tx descriptor to be freed
  643. * @desc_pool_id: ID of the flow control pool
  644. *
  645. * Return: None
  646. */
  647. static inline void
  648. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  649. uint8_t desc_pool_id)
  650. {
  651. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  652. qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
  653. enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
  654. enum netif_reason_type reason;
  655. qdf_spin_lock_bh(&pool->flow_pool_lock);
  656. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  657. tx_desc->nbuf = NULL;
  658. tx_desc->flags = 0;
  659. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  660. dp_tx_put_desc_flow_pool(pool, tx_desc);
  661. switch (pool->status) {
  662. case FLOW_POOL_ACTIVE_PAUSED:
  663. if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
  664. act = WLAN_NETIF_PRIORITY_QUEUE_ON;
  665. reason = WLAN_DATA_FLOW_CTRL_PRI;
  666. pool->status = FLOW_POOL_VO_PAUSED;
  667. /* Update maximum pause duration for HI queue */
  668. pause_dur = unpause_time -
  669. pool->latest_pause_time[DP_TH_HI];
  670. if (pool->max_pause_time[DP_TH_HI] < pause_dur)
  671. pool->max_pause_time[DP_TH_HI] = pause_dur;
  672. }
  673. break;
  674. case FLOW_POOL_VO_PAUSED:
  675. if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
  676. act = WLAN_NETIF_VO_QUEUE_ON;
  677. reason = WLAN_DATA_FLOW_CTRL_VO;
  678. pool->status = FLOW_POOL_VI_PAUSED;
  679. /* Update maximum pause duration for VO queue */
  680. pause_dur = unpause_time -
  681. pool->latest_pause_time[DP_TH_VO];
  682. if (pool->max_pause_time[DP_TH_VO] < pause_dur)
  683. pool->max_pause_time[DP_TH_VO] = pause_dur;
  684. }
  685. break;
  686. case FLOW_POOL_VI_PAUSED:
  687. if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
  688. act = WLAN_NETIF_VI_QUEUE_ON;
  689. reason = WLAN_DATA_FLOW_CTRL_VI;
  690. pool->status = FLOW_POOL_BE_BK_PAUSED;
  691. /* Update maximum pause duration for VI queue */
  692. pause_dur = unpause_time -
  693. pool->latest_pause_time[DP_TH_VI];
  694. if (pool->max_pause_time[DP_TH_VI] < pause_dur)
  695. pool->max_pause_time[DP_TH_VI] = pause_dur;
  696. }
  697. break;
  698. case FLOW_POOL_BE_BK_PAUSED:
  699. if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
  700. act = WLAN_NETIF_BE_BK_QUEUE_ON;
  701. reason = WLAN_DATA_FLOW_CTRL_BE_BK;
  702. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  703. /* Update maximum pause duration for BE_BK queue */
  704. pause_dur = unpause_time -
  705. pool->latest_pause_time[DP_TH_BE_BK];
  706. if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
  707. pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
  708. }
  709. break;
  710. case FLOW_POOL_INVALID:
  711. if (pool->avail_desc == pool->pool_size) {
  712. dp_tx_desc_pool_deinit(soc, desc_pool_id);
  713. dp_tx_desc_pool_free(soc, desc_pool_id);
  714. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  715. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  716. "%s %d pool is freed!!",
  717. __func__, __LINE__);
  718. return;
  719. }
  720. break;
  721. case FLOW_POOL_ACTIVE_UNPAUSED:
  722. break;
  723. default:
  724. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  725. "%s %d pool is INACTIVE State!!",
  726. __func__, __LINE__);
  727. break;
  728. };
  729. if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
  730. soc->pause_cb(pool->flow_pool_id,
  731. act, reason);
  732. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  733. }
  734. #else /* QCA_AC_BASED_FLOW_CONTROL */
  735. static inline bool
  736. dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
  737. {
  738. if (qdf_unlikely(avail_desc < pool->stop_th))
  739. return true;
  740. else
  741. return false;
  742. }
  743. /**
  744. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  745. * @soc: Handle to DP SoC structure
  746. * @desc_pool_id:
  747. *
  748. * Return: Tx descriptor or NULL
  749. */
  750. static inline struct dp_tx_desc_s *
  751. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  752. {
  753. struct dp_tx_desc_s *tx_desc = NULL;
  754. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  755. if (pool) {
  756. qdf_spin_lock_bh(&pool->flow_pool_lock);
  757. if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
  758. pool->avail_desc) {
  759. tx_desc = dp_tx_get_desc_flow_pool(pool);
  760. tx_desc->pool_id = desc_pool_id;
  761. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  762. dp_tx_desc_set_magic(tx_desc,
  763. DP_TX_MAGIC_PATTERN_INUSE);
  764. if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
  765. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  766. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  767. /* pause network queues */
  768. soc->pause_cb(desc_pool_id,
  769. WLAN_STOP_ALL_NETIF_QUEUE,
  770. WLAN_DATA_FLOW_CONTROL);
  771. } else {
  772. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  773. }
  774. } else {
  775. pool->pkt_drop_no_desc++;
  776. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  777. }
  778. } else {
  779. soc->pool_stats.pkt_drop_no_pool++;
  780. }
  781. return tx_desc;
  782. }
  783. /**
  784. * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
  785. * @soc: Handle to DP SoC structure
  786. * @tx_desc: Descriptor to free
  787. * @desc_pool_id: Descriptor pool Id
  788. *
  789. * Return: None
  790. */
  791. static inline void
  792. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  793. uint8_t desc_pool_id)
  794. {
  795. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  796. qdf_spin_lock_bh(&pool->flow_pool_lock);
  797. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  798. tx_desc->nbuf = NULL;
  799. tx_desc->flags = 0;
  800. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  801. dp_tx_put_desc_flow_pool(pool, tx_desc);
  802. switch (pool->status) {
  803. case FLOW_POOL_ACTIVE_PAUSED:
  804. if (pool->avail_desc > pool->start_th) {
  805. soc->pause_cb(pool->flow_pool_id,
  806. WLAN_WAKE_ALL_NETIF_QUEUE,
  807. WLAN_DATA_FLOW_CONTROL);
  808. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  809. }
  810. break;
  811. case FLOW_POOL_INVALID:
  812. if (pool->avail_desc == pool->pool_size) {
  813. dp_tx_desc_pool_deinit(soc, desc_pool_id);
  814. dp_tx_desc_pool_free(soc, desc_pool_id);
  815. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  816. qdf_print("%s %d pool is freed!!",
  817. __func__, __LINE__);
  818. return;
  819. }
  820. break;
  821. case FLOW_POOL_ACTIVE_UNPAUSED:
  822. break;
  823. default:
  824. qdf_print("%s %d pool is INACTIVE State!!",
  825. __func__, __LINE__);
  826. break;
  827. };
  828. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  829. }
  830. #endif /* QCA_AC_BASED_FLOW_CONTROL */
  831. static inline bool
  832. dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  833. {
  834. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  835. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  836. DP_MOD_ID_CDP);
  837. struct dp_tx_desc_pool_s *pool;
  838. bool status;
  839. if (!vdev)
  840. return false;
  841. pool = vdev->pool;
  842. status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
  843. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  844. return status;
  845. }
  846. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  847. static inline void dp_tx_flow_control_init(struct dp_soc *handle)
  848. {
  849. }
  850. static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
  851. {
  852. }
  853. static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
  854. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
  855. uint32_t flow_pool_size)
  856. {
  857. return QDF_STATUS_SUCCESS;
  858. }
  859. static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
  860. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
  861. {
  862. }
  863. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  864. static inline
  865. void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
  866. {
  867. if (tx_desc)
  868. prefetch(tx_desc);
  869. }
  870. #else
  871. static inline
  872. void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
  873. {
  874. }
  875. #endif
  876. /**
  877. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  878. * @soc: Handle to DP SoC structure
  879. * @desc_pool_id: pool id
  880. *
  881. * Return: Tx Descriptor or NULL
  882. */
  883. static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
  884. uint8_t desc_pool_id)
  885. {
  886. struct dp_tx_desc_s *tx_desc = NULL;
  887. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  888. TX_DESC_LOCK_LOCK(&pool->lock);
  889. tx_desc = pool->freelist;
  890. /* Pool is exhausted */
  891. if (!tx_desc) {
  892. TX_DESC_LOCK_UNLOCK(&pool->lock);
  893. return NULL;
  894. }
  895. pool->freelist = pool->freelist->next;
  896. pool->num_allocated++;
  897. pool->num_free--;
  898. dp_tx_prefetch_desc(pool->freelist);
  899. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  900. TX_DESC_LOCK_UNLOCK(&pool->lock);
  901. return tx_desc;
  902. }
  903. /**
  904. * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
  905. * from given pool
  906. * @soc: Handle to DP SoC structure
  907. * @desc_pool_id: pool id should pick up
  908. * @num_requested: number of required descriptor
  909. *
  910. * allocate multiple tx descriptor and make a link
  911. *
  912. * Return: first descriptor pointer or NULL
  913. */
  914. static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
  915. struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
  916. {
  917. struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
  918. uint8_t count;
  919. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  920. TX_DESC_LOCK_LOCK(&pool->lock);
  921. if ((num_requested == 0) ||
  922. (pool->num_free < num_requested)) {
  923. TX_DESC_LOCK_UNLOCK(&pool->lock);
  924. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  925. "%s, No Free Desc: Available(%d) num_requested(%d)",
  926. __func__, pool->num_free,
  927. num_requested);
  928. return NULL;
  929. }
  930. h_desc = pool->freelist;
  931. /* h_desc should never be NULL since num_free > requested */
  932. qdf_assert_always(h_desc);
  933. c_desc = h_desc;
  934. for (count = 0; count < (num_requested - 1); count++) {
  935. c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  936. c_desc = c_desc->next;
  937. }
  938. pool->num_free -= count;
  939. pool->num_allocated += count;
  940. pool->freelist = c_desc->next;
  941. c_desc->next = NULL;
  942. TX_DESC_LOCK_UNLOCK(&pool->lock);
  943. return h_desc;
  944. }
  945. /**
  946. * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
  947. * @soc: Handle to DP SoC structure
  948. * @tx_desc: descriptor to free
  949. * @desc_pool_id: ID of the free pool
  950. */
  951. static inline void
  952. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  953. uint8_t desc_pool_id)
  954. {
  955. struct dp_tx_desc_pool_s *pool = NULL;
  956. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  957. tx_desc->nbuf = NULL;
  958. tx_desc->flags = 0;
  959. pool = &soc->tx_desc[desc_pool_id];
  960. TX_DESC_LOCK_LOCK(&pool->lock);
  961. tx_desc->next = pool->freelist;
  962. pool->freelist = tx_desc;
  963. pool->num_allocated--;
  964. pool->num_free++;
  965. TX_DESC_LOCK_UNLOCK(&pool->lock);
  966. }
  967. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  968. #ifdef QCA_DP_TX_DESC_ID_CHECK
  969. /**
  970. * dp_tx_is_desc_id_valid() - check is the tx desc id valid
  971. * @soc: Handle to DP SoC structure
  972. * @tx_desc_id:
  973. *
  974. * Return: true or false
  975. */
  976. static inline bool
  977. dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
  978. {
  979. uint8_t pool_id;
  980. uint16_t page_id, offset;
  981. struct dp_tx_desc_pool_s *pool;
  982. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  983. DP_TX_DESC_ID_POOL_OS;
  984. /* Pool ID is out of limit */
  985. if (pool_id > wlan_cfg_get_num_tx_desc_pool(
  986. soc->wlan_cfg_ctx)) {
  987. QDF_TRACE(QDF_MODULE_ID_DP,
  988. QDF_TRACE_LEVEL_FATAL,
  989. "%s:Tx Comp pool id %d not valid",
  990. __func__,
  991. pool_id);
  992. goto warn_exit;
  993. }
  994. pool = &soc->tx_desc[pool_id];
  995. /* the pool is freed */
  996. if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
  997. QDF_TRACE(QDF_MODULE_ID_DP,
  998. QDF_TRACE_LEVEL_FATAL,
  999. "%s:the pool %d has been freed",
  1000. __func__,
  1001. pool_id);
  1002. goto warn_exit;
  1003. }
  1004. page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  1005. DP_TX_DESC_ID_PAGE_OS;
  1006. /* the page id is out of limit */
  1007. if (page_id >= pool->desc_pages.num_pages) {
  1008. QDF_TRACE(QDF_MODULE_ID_DP,
  1009. QDF_TRACE_LEVEL_FATAL,
  1010. "%s:the page id %d invalid, pool id %d, num_page %d",
  1011. __func__,
  1012. page_id,
  1013. pool_id,
  1014. pool->desc_pages.num_pages);
  1015. goto warn_exit;
  1016. }
  1017. offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  1018. DP_TX_DESC_ID_OFFSET_OS;
  1019. /* the offset is out of limit */
  1020. if (offset >= pool->desc_pages.num_element_per_page) {
  1021. QDF_TRACE(QDF_MODULE_ID_DP,
  1022. QDF_TRACE_LEVEL_FATAL,
  1023. "%s:offset %d invalid, pool%d,num_elem_per_page %d",
  1024. __func__,
  1025. offset,
  1026. pool_id,
  1027. pool->desc_pages.num_element_per_page);
  1028. goto warn_exit;
  1029. }
  1030. return true;
  1031. warn_exit:
  1032. QDF_TRACE(QDF_MODULE_ID_DP,
  1033. QDF_TRACE_LEVEL_FATAL,
  1034. "%s:Tx desc id 0x%x not valid",
  1035. __func__,
  1036. tx_desc_id);
  1037. qdf_assert_always(0);
  1038. return false;
  1039. }
  1040. #else
  1041. static inline bool
  1042. dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
  1043. {
  1044. return true;
  1045. }
  1046. #endif /* QCA_DP_TX_DESC_ID_CHECK */
  1047. #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
  1048. static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
  1049. struct dp_tx_desc_s *desc,
  1050. uint8_t allow_fast_comp)
  1051. {
  1052. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
  1053. qdf_likely(allow_fast_comp)) {
  1054. desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
  1055. }
  1056. }
  1057. #else
  1058. static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
  1059. struct dp_tx_desc_s *desc,
  1060. uint8_t allow_fast_comp)
  1061. {
  1062. }
  1063. #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
  1064. /**
  1065. * dp_tx_desc_find() - find dp tx descriptor from pool/page/offset
  1066. * @soc: handle for the device sending the data
  1067. * @pool_id:
  1068. * @page_id:
  1069. * @offset:
  1070. *
  1071. * Use page and offset to find the corresponding descriptor object in
  1072. * the given descriptor pool.
  1073. *
  1074. * Return: the descriptor object that has the specified ID
  1075. */
  1076. static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
  1077. uint8_t pool_id, uint16_t page_id, uint16_t offset)
  1078. {
  1079. struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
  1080. return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
  1081. tx_desc_pool->elem_size * offset;
  1082. }
  1083. /**
  1084. * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
  1085. * @soc: handle for the device sending the data
  1086. * @desc_pool_id: target pool id
  1087. *
  1088. * Return: None
  1089. */
  1090. static inline
  1091. struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
  1092. uint8_t desc_pool_id)
  1093. {
  1094. struct dp_tx_ext_desc_elem_s *c_elem;
  1095. desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
  1096. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1097. if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
  1098. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1099. return NULL;
  1100. }
  1101. c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
  1102. soc->tx_ext_desc[desc_pool_id].freelist =
  1103. soc->tx_ext_desc[desc_pool_id].freelist->next;
  1104. soc->tx_ext_desc[desc_pool_id].num_free--;
  1105. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1106. return c_elem;
  1107. }
  1108. /**
  1109. * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
  1110. * @soc: handle for the device sending the data
  1111. * @elem: ext descriptor pointer should release
  1112. * @desc_pool_id: target pool id
  1113. *
  1114. * Return: None
  1115. */
  1116. static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
  1117. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
  1118. {
  1119. desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
  1120. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1121. elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
  1122. soc->tx_ext_desc[desc_pool_id].freelist = elem;
  1123. soc->tx_ext_desc[desc_pool_id].num_free++;
  1124. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1125. return;
  1126. }
  1127. /**
  1128. * dp_tx_ext_desc_free_multiple() - Free multiple tx extension descriptor and
  1129. * attach it to free list
  1130. * @soc: Handle to DP SoC structure
  1131. * @desc_pool_id: pool id should pick up
  1132. * @elem: tx descriptor should be freed
  1133. * @num_free: number of descriptors should be freed
  1134. *
  1135. * Return: none
  1136. */
  1137. static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
  1138. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
  1139. uint8_t num_free)
  1140. {
  1141. struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
  1142. uint8_t freed = num_free;
  1143. /* caller should always guarantee atleast list of num_free nodes */
  1144. qdf_assert_always(elem);
  1145. head = elem;
  1146. c_elem = head;
  1147. tail = head;
  1148. while (c_elem && freed) {
  1149. tail = c_elem;
  1150. c_elem = c_elem->next;
  1151. freed--;
  1152. }
  1153. /* caller should always guarantee atleast list of num_free nodes */
  1154. qdf_assert_always(tail);
  1155. desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
  1156. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1157. tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
  1158. soc->tx_ext_desc[desc_pool_id].freelist = head;
  1159. soc->tx_ext_desc[desc_pool_id].num_free += num_free;
  1160. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1161. return;
  1162. }
  1163. #if defined(FEATURE_TSO)
  1164. /**
  1165. * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
  1166. * @soc: device soc instance
  1167. * @pool_id: pool id should pick up tso descriptor
  1168. *
  1169. * Allocates a TSO segment element from the free list held in
  1170. * the soc
  1171. *
  1172. * Return: tso_seg, tso segment memory pointer
  1173. */
  1174. static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
  1175. struct dp_soc *soc, uint8_t pool_id)
  1176. {
  1177. struct qdf_tso_seg_elem_t *tso_seg = NULL;
  1178. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  1179. if (soc->tx_tso_desc[pool_id].freelist) {
  1180. soc->tx_tso_desc[pool_id].num_free--;
  1181. tso_seg = soc->tx_tso_desc[pool_id].freelist;
  1182. soc->tx_tso_desc[pool_id].freelist =
  1183. soc->tx_tso_desc[pool_id].freelist->next;
  1184. }
  1185. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  1186. return tso_seg;
  1187. }
  1188. /**
  1189. * dp_tx_tso_desc_free() - function to free a TSO segment
  1190. * @soc: device soc instance
  1191. * @pool_id: pool id should pick up tso descriptor
  1192. * @tso_seg: tso segment memory pointer
  1193. *
  1194. * Returns a TSO segment element to the free list held in the
  1195. * HTT pdev
  1196. *
  1197. * Return: none
  1198. */
  1199. static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
  1200. uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
  1201. {
  1202. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  1203. tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
  1204. soc->tx_tso_desc[pool_id].freelist = tso_seg;
  1205. soc->tx_tso_desc[pool_id].num_free++;
  1206. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  1207. }
  1208. static inline
  1209. struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc,
  1210. uint8_t pool_id)
  1211. {
  1212. struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
  1213. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  1214. if (soc->tx_tso_num_seg[pool_id].freelist) {
  1215. soc->tx_tso_num_seg[pool_id].num_free--;
  1216. tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
  1217. soc->tx_tso_num_seg[pool_id].freelist =
  1218. soc->tx_tso_num_seg[pool_id].freelist->next;
  1219. }
  1220. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  1221. return tso_num_seg;
  1222. }
  1223. static inline
  1224. void dp_tso_num_seg_free(struct dp_soc *soc,
  1225. uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
  1226. {
  1227. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  1228. tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
  1229. soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
  1230. soc->tx_tso_num_seg[pool_id].num_free++;
  1231. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  1232. }
  1233. #endif
  1234. /**
  1235. * dp_tx_me_alloc_buf() - Alloc descriptor from me pool
  1236. * @pdev: DP_PDEV handle for datapath
  1237. *
  1238. * Return: tx descriptor on success, NULL on error
  1239. */
  1240. static inline struct dp_tx_me_buf_t*
  1241. dp_tx_me_alloc_buf(struct dp_pdev *pdev)
  1242. {
  1243. struct dp_tx_me_buf_t *buf = NULL;
  1244. qdf_spin_lock_bh(&pdev->tx_mutex);
  1245. if (pdev->me_buf.freelist) {
  1246. buf = pdev->me_buf.freelist;
  1247. pdev->me_buf.freelist = pdev->me_buf.freelist->next;
  1248. pdev->me_buf.buf_in_use++;
  1249. } else {
  1250. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1251. "Error allocating memory in pool");
  1252. qdf_spin_unlock_bh(&pdev->tx_mutex);
  1253. return NULL;
  1254. }
  1255. qdf_spin_unlock_bh(&pdev->tx_mutex);
  1256. return buf;
  1257. }
  1258. /**
  1259. * dp_tx_me_free_buf() - Unmap the buffer holding the dest
  1260. * address, free me descriptor and add it to the free-pool
  1261. * @pdev: DP_PDEV handle for datapath
  1262. * @buf : Allocated ME BUF
  1263. *
  1264. * Return:void
  1265. */
  1266. static inline void
  1267. dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
  1268. {
  1269. /*
  1270. * If the buf containing mac address was mapped,
  1271. * it must be unmapped before freeing the me_buf.
  1272. * The "paddr_macbuf" member in the me_buf structure
  1273. * holds the mapped physical address and it must be
  1274. * set to 0 after unmapping.
  1275. */
  1276. if (buf->paddr_macbuf) {
  1277. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  1278. buf->paddr_macbuf,
  1279. QDF_DMA_TO_DEVICE,
  1280. QDF_MAC_ADDR_SIZE);
  1281. buf->paddr_macbuf = 0;
  1282. }
  1283. qdf_spin_lock_bh(&pdev->tx_mutex);
  1284. buf->next = pdev->me_buf.freelist;
  1285. pdev->me_buf.freelist = buf;
  1286. pdev->me_buf.buf_in_use--;
  1287. qdf_spin_unlock_bh(&pdev->tx_mutex);
  1288. }
  1289. #endif /* DP_TX_DESC_H */