dp_tx_desc.h 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef DP_TX_DESC_H
  20. #define DP_TX_DESC_H
  21. #include "dp_types.h"
  22. #include "dp_tx.h"
  23. #include "dp_internal.h"
  24. /*
  25. * 21 bits cookie
  26. * 2 bits pool id 0 ~ 3,
  27. * 10 bits page id 0 ~ 1023
  28. * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
  29. */
  30. /* ???Ring ID needed??? */
  31. #define DP_TX_DESC_ID_POOL_MASK 0x018000
  32. #define DP_TX_DESC_ID_POOL_OS 15
  33. #define DP_TX_DESC_ID_PAGE_MASK 0x007FE0
  34. #define DP_TX_DESC_ID_PAGE_OS 5
  35. #define DP_TX_DESC_ID_OFFSET_MASK 0x00001F
  36. #define DP_TX_DESC_ID_OFFSET_OS 0
  37. /*
  38. * Compilation assert on tx desc size
  39. *
  40. * if assert is hit please update POOL_MASK,
  41. * PAGE_MASK according to updated size
  42. *
  43. * for current PAGE mask allowed size range of tx_desc
  44. * is between 128 and 256
  45. */
  46. QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
  47. ((sizeof(struct dp_tx_desc_s)) <=
  48. (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
  49. ((sizeof(struct dp_tx_desc_s)) >
  50. (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
  51. );
  52. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  53. #define TX_DESC_LOCK_CREATE(lock)
  54. #define TX_DESC_LOCK_DESTROY(lock)
  55. #define TX_DESC_LOCK_LOCK(lock)
  56. #define TX_DESC_LOCK_UNLOCK(lock)
  57. #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
  58. ((pool)->status == FLOW_POOL_INACTIVE)
  59. #ifdef QCA_AC_BASED_FLOW_CONTROL
  60. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  61. dp_tx_flow_pool_member_clean(_tx_desc_pool)
  62. #else /* !QCA_AC_BASED_FLOW_CONTROL */
  63. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  64. do { \
  65. (_tx_desc_pool)->elem_size = 0; \
  66. (_tx_desc_pool)->freelist = NULL; \
  67. (_tx_desc_pool)->pool_size = 0; \
  68. (_tx_desc_pool)->avail_desc = 0; \
  69. (_tx_desc_pool)->start_th = 0; \
  70. (_tx_desc_pool)->stop_th = 0; \
  71. (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \
  72. } while (0)
  73. #endif /* QCA_AC_BASED_FLOW_CONTROL */
  74. #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  75. #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock)
  76. #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
  77. #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock)
  78. #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock)
  79. #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
  80. #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
  81. do { \
  82. (_tx_desc_pool)->elem_size = 0; \
  83. (_tx_desc_pool)->num_allocated = 0; \
  84. (_tx_desc_pool)->freelist = NULL; \
  85. (_tx_desc_pool)->elem_count = 0; \
  86. (_tx_desc_pool)->num_free = 0; \
  87. } while (0)
  88. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  89. #define MAX_POOL_BUFF_COUNT 10000
  90. #ifdef DP_TX_TRACKING
  91. static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
  92. uint32_t magic_pattern)
  93. {
  94. tx_desc->magic = magic_pattern;
  95. }
  96. #else
  97. static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
  98. uint32_t magic_pattern)
  99. {
  100. }
  101. #endif
  102. /**
  103. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  104. * @soc: Handle to DP SoC structure
  105. * @pool_id: pool to allocate
  106. * @num_elem: Number of descriptor elements per pool
  107. *
  108. * This function allocates memory for SW tx descriptors
  109. * (used within host for tx data path).
  110. * The number of tx descriptors required will be large
  111. * since based on number of clients (1024 clients x 3 radios),
  112. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  113. * large.
  114. *
  115. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  116. * function to allocate memory
  117. * in multiple pages. It then iterates through the memory allocated across pages
  118. * and links each descriptor
  119. * to next descriptor, taking care of page boundaries.
  120. *
  121. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  122. * one for each ring;
  123. * This minimizes lock contention when hard_start_xmit is called
  124. * from multiple CPUs.
  125. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  126. * flow control.
  127. *
  128. * Return: Status code. 0 for success.
  129. */
  130. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  131. uint32_t num_elem);
  132. /**
  133. * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
  134. * @soc: Handle to DP SoC structure
  135. * @pool_id: pool to allocate
  136. * @num_elem: Number of descriptor elements per pool
  137. *
  138. * Return: QDF_STATUS_SUCCESS
  139. * QDF_STATUS_E_FAULT
  140. */
  141. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  142. uint32_t num_elem);
  143. /**
  144. * dp_tx_desc_pool_free() - Free the tx dexcriptor pools
  145. * @soc: Handle to DP SoC structure
  146. * @pool_id: pool to free
  147. *
  148. */
  149. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
  150. /**
  151. * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
  152. * @soc: Handle to DP SoC structure
  153. * @pool_id: pool to de-initialize
  154. *
  155. */
  156. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
  157. /**
  158. * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
  159. * @soc: Handle to DP SoC structure
  160. * @num_pool: Number of pools to allocate
  161. * @num_elem: Number of descriptor elements per pool
  162. *
  163. * Return: QDF_STATUS_SUCCESS
  164. * QDF_STATUS_E_NOMEM
  165. */
  166. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  167. uint32_t num_elem);
  168. /**
  169. * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
  170. * @soc: Handle to DP SoC structure
  171. * @num_pool: Number of pools to initialize
  172. * @num_elem: Number of descriptor elements per pool
  173. *
  174. * Return: QDF_STATUS_SUCCESS
  175. * QDF_STATUS_E_NOMEM
  176. */
  177. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  178. uint32_t num_elem);
  179. /**
  180. * dp_tx_ext_desc_pool_free() - free Tx extension Descriptor pool(s)
  181. * @soc: Handle to DP SoC structure
  182. * @num_pool: Number of pools to free
  183. *
  184. */
  185. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
  186. /**
  187. * dp_tx_ext_desc_pool_deinit() - deinit Tx extension Descriptor pool(s)
  188. * @soc: Handle to DP SoC structure
  189. * @num_pool: Number of pools to de-initialize
  190. *
  191. */
  192. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
  193. /**
  194. * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
  195. * @soc: Handle to DP SoC structure
  196. * @num_pool: Number of pools to allocate
  197. * @num_elem: Number of descriptor elements per pool
  198. *
  199. * Return: QDF_STATUS_SUCCESS
  200. * QDF_STATUS_E_NOMEM
  201. */
  202. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  203. uint32_t num_elem);
  204. /**
  205. * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
  206. * @soc: Handle to DP SoC structure
  207. * @num_pool: Number of pools to initialize
  208. * @num_elem: Number of descriptor elements per pool
  209. *
  210. * Return: QDF_STATUS_SUCCESS
  211. * QDF_STATUS_E_NOMEM
  212. */
  213. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  214. uint32_t num_elem);
  215. /**
  216. * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
  217. * @soc: Handle to DP SoC structure
  218. * @num_pool: Number of pools to free
  219. *
  220. */
  221. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
  222. /**
  223. * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
  224. * @soc: Handle to DP SoC structure
  225. * @num_pool: Number of pools to free
  226. *
  227. */
  228. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
  229. /**
  230. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  231. * fragments in each tso segment
  232. *
  233. * @soc: handle to dp soc structure
  234. * @num_pool: number of pools to allocate
  235. * @num_elem: total number of descriptors to be allocated
  236. *
  237. * Return: QDF_STATUS_SUCCESS
  238. * QDF_STATUS_E_NOMEM
  239. */
  240. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  241. uint32_t num_elem);
  242. /**
  243. * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
  244. * fragments in each tso segment
  245. *
  246. * @soc: handle to dp soc structure
  247. * @num_pool: number of pools to initialize
  248. * @num_elem: total number of descriptors to be initialized
  249. *
  250. * Return: QDF_STATUS_SUCCESS
  251. * QDF_STATUS_E_FAULT
  252. */
  253. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  254. uint32_t num_elem);
  255. /**
  256. * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
  257. * fragments in each tso segment
  258. *
  259. * @soc: handle to dp soc structure
  260. * @num_pool: number of pools to free
  261. */
  262. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool);
  263. /**
  264. * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
  265. * fragments in each tso segment
  266. *
  267. * @soc: handle to dp soc structure
  268. * @num_pool: number of pools to de-initialize
  269. *
  270. * Return: QDF_STATUS_SUCCESS
  271. * QDF_STATUS_E_FAULT
  272. */
  273. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
  274. #ifdef DP_UMAC_HW_RESET_SUPPORT
  275. /**
  276. * dp_tx_desc_pool_cleanup() - Clean up the tx dexcriptor pools
  277. * @soc: Handle to DP SoC structure
  278. * @nbuf_list: nbuf list for delayed free
  279. *
  280. */
  281. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
  282. #endif
  283. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  284. void dp_tx_flow_control_init(struct dp_soc *);
  285. void dp_tx_flow_control_deinit(struct dp_soc *);
  286. QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
  287. tx_pause_callback pause_cb);
  288. QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
  289. uint8_t vdev_id);
  290. void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
  291. uint8_t vdev_id);
  292. void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
  293. struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
  294. uint8_t flow_pool_id, uint32_t flow_pool_size);
  295. QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
  296. uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
  297. void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
  298. uint8_t flow_type, uint8_t flow_pool_id);
  299. /**
  300. * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
  301. * @pool: flow pool
  302. *
  303. * Caller needs to take lock and do sanity checks.
  304. *
  305. * Return: tx descriptor
  306. */
  307. static inline
  308. struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
  309. {
  310. struct dp_tx_desc_s *tx_desc = pool->freelist;
  311. pool->freelist = pool->freelist->next;
  312. pool->avail_desc--;
  313. return tx_desc;
  314. }
  315. /**
  316. * dp_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
  317. * @pool: flow pool
  318. * @tx_desc: tx descriptor
  319. *
  320. * Caller needs to take lock and do sanity checks.
  321. *
  322. * Return: none
  323. */
  324. static inline
  325. void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
  326. struct dp_tx_desc_s *tx_desc)
  327. {
  328. tx_desc->next = pool->freelist;
  329. pool->freelist = tx_desc;
  330. pool->avail_desc++;
  331. }
  332. #ifdef QCA_AC_BASED_FLOW_CONTROL
  333. /**
  334. * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
  335. * @pool: flow pool
  336. *
  337. * Return: None
  338. */
  339. static inline void
  340. dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
  341. {
  342. pool->elem_size = 0;
  343. pool->freelist = NULL;
  344. pool->pool_size = 0;
  345. pool->avail_desc = 0;
  346. qdf_mem_zero(pool->start_th, FL_TH_MAX);
  347. qdf_mem_zero(pool->stop_th, FL_TH_MAX);
  348. pool->status = FLOW_POOL_INACTIVE;
  349. }
  350. /**
  351. * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
  352. * @pool: flow pool
  353. * @avail_desc: available descriptor number
  354. *
  355. * Return: true if threshold is met, false if not
  356. */
  357. static inline bool
  358. dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
  359. {
  360. if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
  361. return true;
  362. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
  363. return true;
  364. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
  365. return true;
  366. else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
  367. return true;
  368. else
  369. return false;
  370. }
  371. /**
  372. * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
  373. * @soc: dp soc
  374. * @pool: flow pool
  375. */
  376. static inline void
  377. dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
  378. struct dp_tx_desc_pool_s *pool)
  379. {
  380. if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
  381. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  382. return;
  383. } else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
  384. pool->avail_desc > pool->stop_th[DP_TH_VI]) {
  385. pool->status = FLOW_POOL_BE_BK_PAUSED;
  386. } else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
  387. pool->avail_desc > pool->stop_th[DP_TH_VO]) {
  388. pool->status = FLOW_POOL_VI_PAUSED;
  389. } else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
  390. pool->avail_desc > pool->stop_th[DP_TH_HI]) {
  391. pool->status = FLOW_POOL_VO_PAUSED;
  392. } else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
  393. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  394. }
  395. switch (pool->status) {
  396. case FLOW_POOL_ACTIVE_PAUSED:
  397. soc->pause_cb(pool->flow_pool_id,
  398. WLAN_NETIF_PRIORITY_QUEUE_OFF,
  399. WLAN_DATA_FLOW_CTRL_PRI);
  400. fallthrough;
  401. case FLOW_POOL_VO_PAUSED:
  402. soc->pause_cb(pool->flow_pool_id,
  403. WLAN_NETIF_VO_QUEUE_OFF,
  404. WLAN_DATA_FLOW_CTRL_VO);
  405. fallthrough;
  406. case FLOW_POOL_VI_PAUSED:
  407. soc->pause_cb(pool->flow_pool_id,
  408. WLAN_NETIF_VI_QUEUE_OFF,
  409. WLAN_DATA_FLOW_CTRL_VI);
  410. fallthrough;
  411. case FLOW_POOL_BE_BK_PAUSED:
  412. soc->pause_cb(pool->flow_pool_id,
  413. WLAN_NETIF_BE_BK_QUEUE_OFF,
  414. WLAN_DATA_FLOW_CTRL_BE_BK);
  415. break;
  416. default:
  417. dp_err("Invalid pool status:%u to adjust", pool->status);
  418. }
  419. }
  420. /**
  421. * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
  422. * @soc: Handle to DP SoC structure
  423. * @desc_pool_id: ID of the flow control fool
  424. *
  425. * Return: TX descriptor allocated or NULL
  426. */
  427. static inline struct dp_tx_desc_s *
  428. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  429. {
  430. struct dp_tx_desc_s *tx_desc = NULL;
  431. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  432. bool is_pause = false;
  433. enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
  434. enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
  435. enum netif_reason_type reason;
  436. if (qdf_likely(pool)) {
  437. qdf_spin_lock_bh(&pool->flow_pool_lock);
  438. if (qdf_likely(pool->avail_desc &&
  439. pool->status != FLOW_POOL_INVALID &&
  440. pool->status != FLOW_POOL_INACTIVE)) {
  441. tx_desc = dp_tx_get_desc_flow_pool(pool);
  442. tx_desc->pool_id = desc_pool_id;
  443. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  444. dp_tx_desc_set_magic(tx_desc,
  445. DP_TX_MAGIC_PATTERN_INUSE);
  446. is_pause = dp_tx_is_threshold_reached(pool,
  447. pool->avail_desc);
  448. if (qdf_unlikely(pool->status ==
  449. FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
  450. dp_tx_adjust_flow_pool_state(soc, pool);
  451. is_pause = false;
  452. }
  453. if (qdf_unlikely(is_pause)) {
  454. switch (pool->status) {
  455. case FLOW_POOL_ACTIVE_UNPAUSED:
  456. /* pause network BE\BK queue */
  457. act = WLAN_NETIF_BE_BK_QUEUE_OFF;
  458. reason = WLAN_DATA_FLOW_CTRL_BE_BK;
  459. level = DP_TH_BE_BK;
  460. pool->status = FLOW_POOL_BE_BK_PAUSED;
  461. break;
  462. case FLOW_POOL_BE_BK_PAUSED:
  463. /* pause network VI queue */
  464. act = WLAN_NETIF_VI_QUEUE_OFF;
  465. reason = WLAN_DATA_FLOW_CTRL_VI;
  466. level = DP_TH_VI;
  467. pool->status = FLOW_POOL_VI_PAUSED;
  468. break;
  469. case FLOW_POOL_VI_PAUSED:
  470. /* pause network VO queue */
  471. act = WLAN_NETIF_VO_QUEUE_OFF;
  472. reason = WLAN_DATA_FLOW_CTRL_VO;
  473. level = DP_TH_VO;
  474. pool->status = FLOW_POOL_VO_PAUSED;
  475. break;
  476. case FLOW_POOL_VO_PAUSED:
  477. /* pause network HI PRI queue */
  478. act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
  479. reason = WLAN_DATA_FLOW_CTRL_PRI;
  480. level = DP_TH_HI;
  481. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  482. break;
  483. case FLOW_POOL_ACTIVE_PAUSED:
  484. act = WLAN_NETIF_ACTION_TYPE_NONE;
  485. break;
  486. default:
  487. dp_err_rl("pool status is %d!",
  488. pool->status);
  489. break;
  490. }
  491. if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
  492. pool->latest_pause_time[level] =
  493. qdf_get_system_timestamp();
  494. soc->pause_cb(desc_pool_id,
  495. act,
  496. reason);
  497. }
  498. }
  499. } else {
  500. pool->pkt_drop_no_desc++;
  501. }
  502. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  503. } else {
  504. dp_err_rl("NULL desc pool pool_id %d", desc_pool_id);
  505. soc->pool_stats.pkt_drop_no_pool++;
  506. }
  507. return tx_desc;
  508. }
  509. /**
  510. * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
  511. * @soc: Handle to DP SoC structure
  512. * @tx_desc: the tx descriptor to be freed
  513. * @desc_pool_id: ID of the flow control pool
  514. *
  515. * Return: None
  516. */
  517. static inline void
  518. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  519. uint8_t desc_pool_id)
  520. {
  521. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  522. qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
  523. enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
  524. enum netif_reason_type reason;
  525. qdf_spin_lock_bh(&pool->flow_pool_lock);
  526. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  527. tx_desc->nbuf = NULL;
  528. tx_desc->flags = 0;
  529. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  530. dp_tx_put_desc_flow_pool(pool, tx_desc);
  531. switch (pool->status) {
  532. case FLOW_POOL_ACTIVE_PAUSED:
  533. if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
  534. act = WLAN_NETIF_PRIORITY_QUEUE_ON;
  535. reason = WLAN_DATA_FLOW_CTRL_PRI;
  536. pool->status = FLOW_POOL_VO_PAUSED;
  537. /* Update maximum pause duration for HI queue */
  538. pause_dur = unpause_time -
  539. pool->latest_pause_time[DP_TH_HI];
  540. if (pool->max_pause_time[DP_TH_HI] < pause_dur)
  541. pool->max_pause_time[DP_TH_HI] = pause_dur;
  542. }
  543. break;
  544. case FLOW_POOL_VO_PAUSED:
  545. if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
  546. act = WLAN_NETIF_VO_QUEUE_ON;
  547. reason = WLAN_DATA_FLOW_CTRL_VO;
  548. pool->status = FLOW_POOL_VI_PAUSED;
  549. /* Update maximum pause duration for VO queue */
  550. pause_dur = unpause_time -
  551. pool->latest_pause_time[DP_TH_VO];
  552. if (pool->max_pause_time[DP_TH_VO] < pause_dur)
  553. pool->max_pause_time[DP_TH_VO] = pause_dur;
  554. }
  555. break;
  556. case FLOW_POOL_VI_PAUSED:
  557. if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
  558. act = WLAN_NETIF_VI_QUEUE_ON;
  559. reason = WLAN_DATA_FLOW_CTRL_VI;
  560. pool->status = FLOW_POOL_BE_BK_PAUSED;
  561. /* Update maximum pause duration for VI queue */
  562. pause_dur = unpause_time -
  563. pool->latest_pause_time[DP_TH_VI];
  564. if (pool->max_pause_time[DP_TH_VI] < pause_dur)
  565. pool->max_pause_time[DP_TH_VI] = pause_dur;
  566. }
  567. break;
  568. case FLOW_POOL_BE_BK_PAUSED:
  569. if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
  570. act = WLAN_NETIF_BE_BK_QUEUE_ON;
  571. reason = WLAN_DATA_FLOW_CTRL_BE_BK;
  572. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  573. /* Update maximum pause duration for BE_BK queue */
  574. pause_dur = unpause_time -
  575. pool->latest_pause_time[DP_TH_BE_BK];
  576. if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
  577. pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
  578. }
  579. break;
  580. case FLOW_POOL_INVALID:
  581. if (pool->avail_desc == pool->pool_size) {
  582. dp_tx_desc_pool_deinit(soc, desc_pool_id);
  583. dp_tx_desc_pool_free(soc, desc_pool_id);
  584. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  585. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  586. "%s %d pool is freed!!",
  587. __func__, __LINE__);
  588. return;
  589. }
  590. break;
  591. case FLOW_POOL_ACTIVE_UNPAUSED:
  592. break;
  593. default:
  594. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  595. "%s %d pool is INACTIVE State!!",
  596. __func__, __LINE__);
  597. break;
  598. };
  599. if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
  600. soc->pause_cb(pool->flow_pool_id,
  601. act, reason);
  602. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  603. }
  604. #else /* QCA_AC_BASED_FLOW_CONTROL */
  605. static inline bool
  606. dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
  607. {
  608. if (qdf_unlikely(avail_desc < pool->stop_th))
  609. return true;
  610. else
  611. return false;
  612. }
  613. /**
  614. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  615. * @soc: Handle to DP SoC structure
  616. * @desc_pool_id:
  617. *
  618. * Return: Tx descriptor or NULL
  619. */
  620. static inline struct dp_tx_desc_s *
  621. dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
  622. {
  623. struct dp_tx_desc_s *tx_desc = NULL;
  624. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  625. if (pool) {
  626. qdf_spin_lock_bh(&pool->flow_pool_lock);
  627. if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
  628. pool->avail_desc) {
  629. tx_desc = dp_tx_get_desc_flow_pool(pool);
  630. tx_desc->pool_id = desc_pool_id;
  631. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  632. dp_tx_desc_set_magic(tx_desc,
  633. DP_TX_MAGIC_PATTERN_INUSE);
  634. if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
  635. pool->status = FLOW_POOL_ACTIVE_PAUSED;
  636. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  637. /* pause network queues */
  638. soc->pause_cb(desc_pool_id,
  639. WLAN_STOP_ALL_NETIF_QUEUE,
  640. WLAN_DATA_FLOW_CONTROL);
  641. } else {
  642. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  643. }
  644. } else {
  645. pool->pkt_drop_no_desc++;
  646. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  647. }
  648. } else {
  649. soc->pool_stats.pkt_drop_no_pool++;
  650. }
  651. return tx_desc;
  652. }
  653. /**
  654. * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
  655. * @soc: Handle to DP SoC structure
  656. * @tx_desc: Descriptor to free
  657. * @desc_pool_id: Descriptor pool Id
  658. *
  659. * Return: None
  660. */
  661. static inline void
  662. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  663. uint8_t desc_pool_id)
  664. {
  665. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  666. qdf_spin_lock_bh(&pool->flow_pool_lock);
  667. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  668. tx_desc->nbuf = NULL;
  669. tx_desc->flags = 0;
  670. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  671. dp_tx_put_desc_flow_pool(pool, tx_desc);
  672. switch (pool->status) {
  673. case FLOW_POOL_ACTIVE_PAUSED:
  674. if (pool->avail_desc > pool->start_th) {
  675. soc->pause_cb(pool->flow_pool_id,
  676. WLAN_WAKE_ALL_NETIF_QUEUE,
  677. WLAN_DATA_FLOW_CONTROL);
  678. pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
  679. }
  680. break;
  681. case FLOW_POOL_INVALID:
  682. if (pool->avail_desc == pool->pool_size) {
  683. dp_tx_desc_pool_deinit(soc, desc_pool_id);
  684. dp_tx_desc_pool_free(soc, desc_pool_id);
  685. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  686. qdf_print("%s %d pool is freed!!",
  687. __func__, __LINE__);
  688. return;
  689. }
  690. break;
  691. case FLOW_POOL_ACTIVE_UNPAUSED:
  692. break;
  693. default:
  694. qdf_print("%s %d pool is INACTIVE State!!",
  695. __func__, __LINE__);
  696. break;
  697. };
  698. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  699. }
  700. #endif /* QCA_AC_BASED_FLOW_CONTROL */
  701. static inline bool
  702. dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  703. {
  704. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  705. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  706. DP_MOD_ID_CDP);
  707. struct dp_tx_desc_pool_s *pool;
  708. bool status;
  709. if (!vdev)
  710. return false;
  711. pool = vdev->pool;
  712. status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
  713. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  714. return status;
  715. }
  716. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  717. static inline void dp_tx_flow_control_init(struct dp_soc *handle)
  718. {
  719. }
  720. static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
  721. {
  722. }
  723. static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
  724. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
  725. uint32_t flow_pool_size)
  726. {
  727. return QDF_STATUS_SUCCESS;
  728. }
  729. static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
  730. uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
  731. {
  732. }
  733. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  734. static inline
  735. void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
  736. {
  737. if (tx_desc)
  738. prefetch(tx_desc);
  739. }
  740. #else
  741. static inline
  742. void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
  743. {
  744. }
  745. #endif
  746. /**
  747. * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  748. * @soc: Handle to DP SoC structure
  749. * @desc_pool_id: pool id
  750. *
  751. * Return: Tx Descriptor or NULL
  752. */
  753. static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
  754. uint8_t desc_pool_id)
  755. {
  756. struct dp_tx_desc_s *tx_desc = NULL;
  757. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  758. TX_DESC_LOCK_LOCK(&pool->lock);
  759. tx_desc = pool->freelist;
  760. /* Pool is exhausted */
  761. if (!tx_desc) {
  762. TX_DESC_LOCK_UNLOCK(&pool->lock);
  763. return NULL;
  764. }
  765. pool->freelist = pool->freelist->next;
  766. pool->num_allocated++;
  767. pool->num_free--;
  768. dp_tx_prefetch_desc(pool->freelist);
  769. tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  770. TX_DESC_LOCK_UNLOCK(&pool->lock);
  771. return tx_desc;
  772. }
  773. /**
  774. * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
  775. * from given pool
  776. * @soc: Handle to DP SoC structure
  777. * @desc_pool_id: pool id should pick up
  778. * @num_requested: number of required descriptor
  779. *
  780. * allocate multiple tx descriptor and make a link
  781. *
  782. * Return: first descriptor pointer or NULL
  783. */
  784. static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
  785. struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
  786. {
  787. struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
  788. uint8_t count;
  789. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  790. TX_DESC_LOCK_LOCK(&pool->lock);
  791. if ((num_requested == 0) ||
  792. (pool->num_free < num_requested)) {
  793. TX_DESC_LOCK_UNLOCK(&pool->lock);
  794. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  795. "%s, No Free Desc: Available(%d) num_requested(%d)",
  796. __func__, pool->num_free,
  797. num_requested);
  798. return NULL;
  799. }
  800. h_desc = pool->freelist;
  801. /* h_desc should never be NULL since num_free > requested */
  802. qdf_assert_always(h_desc);
  803. c_desc = h_desc;
  804. for (count = 0; count < (num_requested - 1); count++) {
  805. c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
  806. c_desc = c_desc->next;
  807. }
  808. pool->num_free -= count;
  809. pool->num_allocated += count;
  810. pool->freelist = c_desc->next;
  811. c_desc->next = NULL;
  812. TX_DESC_LOCK_UNLOCK(&pool->lock);
  813. return h_desc;
  814. }
  815. /**
  816. * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
  817. * @soc: Handle to DP SoC structure
  818. * @tx_desc: descriptor to free
  819. * @desc_pool_id: ID of the free pool
  820. */
  821. static inline void
  822. dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  823. uint8_t desc_pool_id)
  824. {
  825. struct dp_tx_desc_pool_s *pool = NULL;
  826. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  827. tx_desc->nbuf = NULL;
  828. tx_desc->flags = 0;
  829. pool = &soc->tx_desc[desc_pool_id];
  830. TX_DESC_LOCK_LOCK(&pool->lock);
  831. tx_desc->next = pool->freelist;
  832. pool->freelist = tx_desc;
  833. pool->num_allocated--;
  834. pool->num_free++;
  835. TX_DESC_LOCK_UNLOCK(&pool->lock);
  836. }
  837. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  838. #ifdef QCA_DP_TX_DESC_ID_CHECK
  839. /**
  840. * dp_tx_is_desc_id_valid() - check is the tx desc id valid
  841. * @soc: Handle to DP SoC structure
  842. * @tx_desc_id:
  843. *
  844. * Return: true or false
  845. */
  846. static inline bool
  847. dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
  848. {
  849. uint8_t pool_id;
  850. uint16_t page_id, offset;
  851. struct dp_tx_desc_pool_s *pool;
  852. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  853. DP_TX_DESC_ID_POOL_OS;
  854. /* Pool ID is out of limit */
  855. if (pool_id > wlan_cfg_get_num_tx_desc_pool(
  856. soc->wlan_cfg_ctx)) {
  857. QDF_TRACE(QDF_MODULE_ID_DP,
  858. QDF_TRACE_LEVEL_FATAL,
  859. "%s:Tx Comp pool id %d not valid",
  860. __func__,
  861. pool_id);
  862. goto warn_exit;
  863. }
  864. pool = &soc->tx_desc[pool_id];
  865. /* the pool is freed */
  866. if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
  867. QDF_TRACE(QDF_MODULE_ID_DP,
  868. QDF_TRACE_LEVEL_FATAL,
  869. "%s:the pool %d has been freed",
  870. __func__,
  871. pool_id);
  872. goto warn_exit;
  873. }
  874. page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  875. DP_TX_DESC_ID_PAGE_OS;
  876. /* the page id is out of limit */
  877. if (page_id >= pool->desc_pages.num_pages) {
  878. QDF_TRACE(QDF_MODULE_ID_DP,
  879. QDF_TRACE_LEVEL_FATAL,
  880. "%s:the page id %d invalid, pool id %d, num_page %d",
  881. __func__,
  882. page_id,
  883. pool_id,
  884. pool->desc_pages.num_pages);
  885. goto warn_exit;
  886. }
  887. offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  888. DP_TX_DESC_ID_OFFSET_OS;
  889. /* the offset is out of limit */
  890. if (offset >= pool->desc_pages.num_element_per_page) {
  891. QDF_TRACE(QDF_MODULE_ID_DP,
  892. QDF_TRACE_LEVEL_FATAL,
  893. "%s:offset %d invalid, pool%d,num_elem_per_page %d",
  894. __func__,
  895. offset,
  896. pool_id,
  897. pool->desc_pages.num_element_per_page);
  898. goto warn_exit;
  899. }
  900. return true;
  901. warn_exit:
  902. QDF_TRACE(QDF_MODULE_ID_DP,
  903. QDF_TRACE_LEVEL_FATAL,
  904. "%s:Tx desc id 0x%x not valid",
  905. __func__,
  906. tx_desc_id);
  907. qdf_assert_always(0);
  908. return false;
  909. }
  910. #else
  911. static inline bool
  912. dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
  913. {
  914. return true;
  915. }
  916. #endif /* QCA_DP_TX_DESC_ID_CHECK */
  917. #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
  918. static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
  919. struct dp_tx_desc_s *desc,
  920. uint8_t allow_fast_comp)
  921. {
  922. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
  923. qdf_likely(allow_fast_comp)) {
  924. desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
  925. }
  926. }
  927. #else
  928. static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
  929. struct dp_tx_desc_s *desc,
  930. uint8_t allow_fast_comp)
  931. {
  932. }
  933. #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
  934. /**
  935. * dp_tx_desc_find() - find dp tx descriptor from pool/page/offset
  936. * @soc: handle for the device sending the data
  937. * @pool_id:
  938. * @page_id:
  939. * @offset:
  940. *
  941. * Use page and offset to find the corresponding descriptor object in
  942. * the given descriptor pool.
  943. *
  944. * Return: the descriptor object that has the specified ID
  945. */
  946. static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
  947. uint8_t pool_id, uint16_t page_id, uint16_t offset)
  948. {
  949. struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
  950. return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
  951. tx_desc_pool->elem_size * offset;
  952. }
  953. /**
  954. * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
  955. * @soc: handle for the device sending the data
  956. * @desc_pool_id: target pool id
  957. *
  958. * Return: None
  959. */
  960. static inline
  961. struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
  962. uint8_t desc_pool_id)
  963. {
  964. struct dp_tx_ext_desc_elem_s *c_elem;
  965. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  966. if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
  967. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  968. return NULL;
  969. }
  970. c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
  971. soc->tx_ext_desc[desc_pool_id].freelist =
  972. soc->tx_ext_desc[desc_pool_id].freelist->next;
  973. soc->tx_ext_desc[desc_pool_id].num_free--;
  974. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  975. return c_elem;
  976. }
  977. /**
  978. * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
  979. * @soc: handle for the device sending the data
  980. * @elem: ext descriptor pointer should release
  981. * @desc_pool_id: target pool id
  982. *
  983. * Return: None
  984. */
  985. static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
  986. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
  987. {
  988. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  989. elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
  990. soc->tx_ext_desc[desc_pool_id].freelist = elem;
  991. soc->tx_ext_desc[desc_pool_id].num_free++;
  992. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  993. return;
  994. }
  995. /**
  996. * dp_tx_ext_desc_free_multiple() - Free multiple tx extension descriptor and
  997. * attach it to free list
  998. * @soc: Handle to DP SoC structure
  999. * @desc_pool_id: pool id should pick up
  1000. * @elem: tx descriptor should be freed
  1001. * @num_free: number of descriptors should be freed
  1002. *
  1003. * Return: none
  1004. */
  1005. static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
  1006. struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
  1007. uint8_t num_free)
  1008. {
  1009. struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
  1010. uint8_t freed = num_free;
  1011. /* caller should always guarantee atleast list of num_free nodes */
  1012. qdf_assert_always(elem);
  1013. head = elem;
  1014. c_elem = head;
  1015. tail = head;
  1016. while (c_elem && freed) {
  1017. tail = c_elem;
  1018. c_elem = c_elem->next;
  1019. freed--;
  1020. }
  1021. /* caller should always guarantee atleast list of num_free nodes */
  1022. qdf_assert_always(tail);
  1023. qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1024. tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
  1025. soc->tx_ext_desc[desc_pool_id].freelist = head;
  1026. soc->tx_ext_desc[desc_pool_id].num_free += num_free;
  1027. qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
  1028. return;
  1029. }
  1030. #if defined(FEATURE_TSO)
  1031. /**
  1032. * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
  1033. * @soc: device soc instance
  1034. * @pool_id: pool id should pick up tso descriptor
  1035. *
  1036. * Allocates a TSO segment element from the free list held in
  1037. * the soc
  1038. *
  1039. * Return: tso_seg, tso segment memory pointer
  1040. */
  1041. static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
  1042. struct dp_soc *soc, uint8_t pool_id)
  1043. {
  1044. struct qdf_tso_seg_elem_t *tso_seg = NULL;
  1045. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  1046. if (soc->tx_tso_desc[pool_id].freelist) {
  1047. soc->tx_tso_desc[pool_id].num_free--;
  1048. tso_seg = soc->tx_tso_desc[pool_id].freelist;
  1049. soc->tx_tso_desc[pool_id].freelist =
  1050. soc->tx_tso_desc[pool_id].freelist->next;
  1051. }
  1052. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  1053. return tso_seg;
  1054. }
  1055. /**
  1056. * dp_tx_tso_desc_free() - function to free a TSO segment
  1057. * @soc: device soc instance
  1058. * @pool_id: pool id should pick up tso descriptor
  1059. * @tso_seg: tso segment memory pointer
  1060. *
  1061. * Returns a TSO segment element to the free list held in the
  1062. * HTT pdev
  1063. *
  1064. * Return: none
  1065. */
  1066. static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
  1067. uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
  1068. {
  1069. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  1070. tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
  1071. soc->tx_tso_desc[pool_id].freelist = tso_seg;
  1072. soc->tx_tso_desc[pool_id].num_free++;
  1073. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  1074. }
  1075. static inline
  1076. struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc,
  1077. uint8_t pool_id)
  1078. {
  1079. struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
  1080. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  1081. if (soc->tx_tso_num_seg[pool_id].freelist) {
  1082. soc->tx_tso_num_seg[pool_id].num_free--;
  1083. tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
  1084. soc->tx_tso_num_seg[pool_id].freelist =
  1085. soc->tx_tso_num_seg[pool_id].freelist->next;
  1086. }
  1087. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  1088. return tso_num_seg;
  1089. }
  1090. static inline
  1091. void dp_tso_num_seg_free(struct dp_soc *soc,
  1092. uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
  1093. {
  1094. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  1095. tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
  1096. soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
  1097. soc->tx_tso_num_seg[pool_id].num_free++;
  1098. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  1099. }
  1100. #endif
  1101. /**
  1102. * dp_tx_me_alloc_buf() - Alloc descriptor from me pool
  1103. * @pdev: DP_PDEV handle for datapath
  1104. *
  1105. * Return: tx descriptor on success, NULL on error
  1106. */
  1107. static inline struct dp_tx_me_buf_t*
  1108. dp_tx_me_alloc_buf(struct dp_pdev *pdev)
  1109. {
  1110. struct dp_tx_me_buf_t *buf = NULL;
  1111. qdf_spin_lock_bh(&pdev->tx_mutex);
  1112. if (pdev->me_buf.freelist) {
  1113. buf = pdev->me_buf.freelist;
  1114. pdev->me_buf.freelist = pdev->me_buf.freelist->next;
  1115. pdev->me_buf.buf_in_use++;
  1116. } else {
  1117. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1118. "Error allocating memory in pool");
  1119. qdf_spin_unlock_bh(&pdev->tx_mutex);
  1120. return NULL;
  1121. }
  1122. qdf_spin_unlock_bh(&pdev->tx_mutex);
  1123. return buf;
  1124. }
  1125. /**
  1126. * dp_tx_me_free_buf() - Unmap the buffer holding the dest
  1127. * address, free me descriptor and add it to the free-pool
  1128. * @pdev: DP_PDEV handle for datapath
  1129. * @buf : Allocated ME BUF
  1130. *
  1131. * Return:void
  1132. */
  1133. static inline void
  1134. dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
  1135. {
  1136. /*
  1137. * If the buf containing mac address was mapped,
  1138. * it must be unmapped before freeing the me_buf.
  1139. * The "paddr_macbuf" member in the me_buf structure
  1140. * holds the mapped physical address and it must be
  1141. * set to 0 after unmapping.
  1142. */
  1143. if (buf->paddr_macbuf) {
  1144. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  1145. buf->paddr_macbuf,
  1146. QDF_DMA_TO_DEVICE,
  1147. QDF_MAC_ADDR_SIZE);
  1148. buf->paddr_macbuf = 0;
  1149. }
  1150. qdf_spin_lock_bh(&pdev->tx_mutex);
  1151. buf->next = pdev->me_buf.freelist;
  1152. pdev->me_buf.freelist = buf;
  1153. pdev->me_buf.buf_in_use--;
  1154. qdf_spin_unlock_bh(&pdev->tx_mutex);
  1155. }
  1156. #endif /* DP_TX_DESC_H */