dp_rx_mon_status.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /*
  2. * Copyright (c) 2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "hal_rx.h"
  22. #include "hal_api.h"
  23. #include "qdf_trace.h"
  24. #include "qdf_nbuf.h"
  25. #include "hal_api_mon.h"
  26. #include "ieee80211.h"
  27. #include "dp_rx_mon.h"
  28. /**
  29. * dp_rx_mon_status_process_tlv() - Process status TLV in status
  30. * buffer on Rx status Queue posted by status SRNG processing.
  31. * @soc: core txrx main context
  32. * @mac_id: mac_id which is one of 3 mac_ids _ring
  33. *
  34. * Return: none
  35. */
  36. static inline void
  37. dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id) {
  38. struct dp_pdev *pdev = soc->pdev_list[mac_id];
  39. struct hal_rx_ppdu_info *ppdu_info;
  40. qdf_nbuf_t status_nbuf;
  41. uint8_t *rx_tlv;
  42. uint8_t *rx_tlv_start;
  43. uint32_t tlv_status;
  44. #ifdef DP_INTR_POLL_BASED
  45. if (!pdev)
  46. return;
  47. #endif
  48. ppdu_info = &pdev->ppdu_info;
  49. if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
  50. return;
  51. while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
  52. status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
  53. rx_tlv = qdf_nbuf_data(status_nbuf);
  54. rx_tlv_start = rx_tlv;
  55. do {
  56. tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
  57. ppdu_info);
  58. rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
  59. if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
  60. break;
  61. } while (tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE);
  62. qdf_nbuf_free(status_nbuf);
  63. if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
  64. pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
  65. /* Temperary */
  66. pdev->mon_ppdu_status =
  67. DP_PPDU_STATUS_START;
  68. break;
  69. }
  70. }
  71. return;
  72. }
  73. /*
  74. * dp_rx_mon_status_srng_process() - Process monitor status ring
  75. * post the status ring buffer to Rx status Queue for later
  76. * processing when status ring is filled with status TLV.
  77. * Allocate a new buffer to status ring if the filled buffer
  78. * is posted.
  79. *
  80. * @soc: core txrx main context
  81. * @mac_id: mac_id which is one of 3 mac_ids
  82. * @quota: No. of ring entry that can be serviced in one shot.
  83. * Return: uint32_t: No. of ring entry that is processed.
  84. */
  85. static inline uint32_t
  86. dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
  87. uint32_t quota)
  88. {
  89. struct dp_pdev *pdev = soc->pdev_list[mac_id];
  90. void *hal_soc;
  91. void *mon_status_srng;
  92. void *rxdma_mon_status_ring_entry;
  93. QDF_STATUS status;
  94. uint32_t work_done = 0;
  95. #ifdef DP_INTR_POLL_BASED
  96. if (!pdev)
  97. return work_done;
  98. #endif
  99. mon_status_srng = pdev->rxdma_mon_status_ring.hal_srng;
  100. qdf_assert(mon_status_srng);
  101. hal_soc = soc->hal_soc;
  102. qdf_assert(hal_soc);
  103. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
  104. goto done;
  105. /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
  106. * BUFFER_ADDR_INFO STRUCT
  107. */
  108. while (qdf_likely((rxdma_mon_status_ring_entry =
  109. hal_srng_src_peek(hal_soc, mon_status_srng))
  110. && quota--)) {
  111. uint32_t rx_buf_cookie;
  112. qdf_nbuf_t status_nbuf;
  113. struct dp_rx_desc *rx_desc;
  114. uint8_t *status_buf;
  115. qdf_dma_addr_t paddr;
  116. uint64_t buf_addr;
  117. buf_addr =
  118. (HAL_RX_BUFFER_ADDR_31_0_GET(
  119. rxdma_mon_status_ring_entry) |
  120. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
  121. rxdma_mon_status_ring_entry)) << 32));
  122. if (qdf_likely(buf_addr)) {
  123. rx_buf_cookie =
  124. HAL_RX_BUF_COOKIE_GET(
  125. rxdma_mon_status_ring_entry);
  126. rx_desc = dp_rx_cookie_2_va_mon_status(soc,
  127. rx_buf_cookie);
  128. qdf_assert(rx_desc);
  129. status_nbuf = rx_desc->nbuf;
  130. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  131. QDF_DMA_FROM_DEVICE);
  132. status_buf = qdf_nbuf_data(status_nbuf);
  133. status = hal_get_rx_status_done(status_buf);
  134. if (status != QDF_STATUS_SUCCESS) {
  135. QDF_TRACE(QDF_MODULE_ID_DP,
  136. QDF_TRACE_LEVEL_WARN,
  137. "[%s][%d] status not done",
  138. __func__, __LINE__);
  139. break;
  140. }
  141. qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
  142. qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
  143. QDF_DMA_FROM_DEVICE);
  144. /* Put the status_nbuf to queue */
  145. qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
  146. } else {
  147. union dp_rx_desc_list_elem_t *desc_list = NULL;
  148. union dp_rx_desc_list_elem_t *tail = NULL;
  149. struct rx_desc_pool *rx_desc_pool;
  150. uint32_t num_alloc_desc;
  151. rx_desc_pool = &soc->rx_desc_status[mac_id];
  152. num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
  153. rx_desc_pool,
  154. 1,
  155. &desc_list,
  156. &tail);
  157. rx_desc = &desc_list->rx_desc;
  158. }
  159. /* Allocate a new skb */
  160. status_nbuf = qdf_nbuf_alloc(pdev->osif_pdev, RX_BUFFER_SIZE,
  161. RX_BUFFER_RESERVATION, RX_BUFFER_ALIGNMENT, FALSE);
  162. status_buf = qdf_nbuf_data(status_nbuf);
  163. hal_clear_rx_status_done(status_buf);
  164. qdf_nbuf_map_single(soc->osdev, status_nbuf,
  165. QDF_DMA_BIDIRECTIONAL);
  166. paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
  167. rx_desc->nbuf = status_nbuf;
  168. hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
  169. paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
  170. rxdma_mon_status_ring_entry =
  171. hal_srng_src_get_next(hal_soc, mon_status_srng);
  172. work_done++;
  173. }
  174. done:
  175. hal_srng_access_end(hal_soc, mon_status_srng);
  176. return work_done;
  177. }
  178. /*
  179. * dp_rx_mon_status_process() - Process monitor status ring and
  180. * TLV in status ring.
  181. *
  182. * @soc: core txrx main context
  183. * @mac_id: mac_id which is one of 3 mac_ids
  184. * @quota: No. of ring entry that can be serviced in one shot.
  185. * Return: uint32_t: No. of ring entry that is processed.
  186. */
  187. static inline uint32_t
  188. dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
  189. uint32_t work_done;
  190. work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
  191. dp_rx_mon_status_process_tlv(soc, mac_id);
  192. return work_done;
  193. }
  194. /**
  195. * dp_mon_process() - Main monitor mode processing roution.
  196. * This call monitor status ring process then monitor
  197. * destination ring process.
  198. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  199. * @soc: core txrx main context
  200. * @mac_id: mac_id which is one of 3 mac_ids
  201. * @quota: No. of status ring entry that can be serviced in one shot.
  202. * Return: uint32_t: No. of ring entry that is processed.
  203. */
  204. uint32_t
  205. dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
  206. uint32_t work_done;
  207. work_done = dp_rx_mon_status_process(soc, mac_id, quota);
  208. dp_rx_mon_dest_process(soc, mac_id, quota);
  209. return work_done;
  210. }
  211. /**
  212. * dp_rx_pdev_mon_detach() - detach dp rx for status ring
  213. * @pdev: core txrx pdev context
  214. *
  215. * This function will detach DP RX status ring from
  216. * main device context. will free DP Rx resources for
  217. * status ring
  218. *
  219. * Return: QDF_STATUS_SUCCESS: success
  220. * QDF_STATUS_E_RESOURCES: Error return
  221. */
  222. QDF_STATUS
  223. dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev)
  224. {
  225. uint8_t pdev_id = pdev->pdev_id;
  226. struct dp_soc *soc = pdev->soc;
  227. struct rx_desc_pool *rx_desc_pool;
  228. rx_desc_pool = &soc->rx_desc_status[pdev_id];
  229. dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
  230. return QDF_STATUS_SUCCESS;
  231. }
  232. /*
  233. * dp_rx_buffers_replenish() - replenish monitor status ring with
  234. * rx nbufs called during dp rx
  235. * monitor status ring initialization
  236. *
  237. * @soc: core txrx main context
  238. * @mac_id: mac_id which is one of 3 mac_ids
  239. * @dp_rxdma_srng: dp monitor status circular ring
  240. * @rx_desc_pool; Pointer to Rx descriptor pool
  241. * @num_req_buffers: number of buffer to be replenished
  242. * @desc_list: list of descs if called from dp rx monitor status
  243. * process or NULL during dp rx initialization or
  244. * out of buffer interrupt
  245. * @tail: tail of descs list
  246. * @owner: who owns the nbuf (host, NSS etc...)
  247. * Return: return success or failure
  248. */
  249. static inline
  250. QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
  251. uint32_t mac_id,
  252. struct dp_srng *dp_rxdma_srng,
  253. struct rx_desc_pool *rx_desc_pool,
  254. uint32_t num_req_buffers,
  255. union dp_rx_desc_list_elem_t **desc_list,
  256. union dp_rx_desc_list_elem_t **tail,
  257. uint8_t owner)
  258. {
  259. uint32_t num_alloc_desc;
  260. uint16_t num_desc_to_free = 0;
  261. uint32_t num_entries_avail;
  262. uint32_t count;
  263. int sync_hw_ptr = 1;
  264. qdf_dma_addr_t paddr;
  265. qdf_nbuf_t rx_netbuf;
  266. void *rxdma_ring_entry;
  267. union dp_rx_desc_list_elem_t *next;
  268. void *rxdma_srng;
  269. uint8_t *status_buf;
  270. rxdma_srng = dp_rxdma_srng->hal_srng;
  271. qdf_assert(rxdma_srng);
  272. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  273. "[%s][%d] requested %d buffers for replenish\n",
  274. __func__, __LINE__, num_req_buffers);
  275. /*
  276. * if desc_list is NULL, allocate the descs from freelist
  277. */
  278. if (!(*desc_list)) {
  279. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  280. rx_desc_pool,
  281. num_req_buffers,
  282. desc_list,
  283. tail);
  284. if (!num_alloc_desc) {
  285. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  286. "[%s][%d] no free rx_descs in freelist\n",
  287. __func__, __LINE__);
  288. return QDF_STATUS_E_NOMEM;
  289. }
  290. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  291. "[%s][%d] %d rx desc allocated\n", __func__, __LINE__,
  292. num_alloc_desc);
  293. num_req_buffers = num_alloc_desc;
  294. }
  295. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  296. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  297. rxdma_srng, sync_hw_ptr);
  298. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  299. "[%s][%d] no of availble entries in rxdma ring: %d\n",
  300. __func__, __LINE__, num_entries_avail);
  301. if (num_entries_avail < num_req_buffers) {
  302. num_desc_to_free = num_req_buffers - num_entries_avail;
  303. num_req_buffers = num_entries_avail;
  304. }
  305. for (count = 0; count < num_req_buffers; count++) {
  306. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  307. rxdma_srng);
  308. rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
  309. RX_BUFFER_SIZE,
  310. RX_BUFFER_RESERVATION,
  311. RX_BUFFER_ALIGNMENT,
  312. FALSE);
  313. status_buf = qdf_nbuf_data(rx_netbuf);
  314. hal_clear_rx_status_done(status_buf);
  315. memset(status_buf, 0, RX_BUFFER_SIZE);
  316. qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
  317. QDF_DMA_BIDIRECTIONAL);
  318. paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
  319. next = (*desc_list)->next;
  320. (*desc_list)->rx_desc.nbuf = rx_netbuf;
  321. hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
  322. (*desc_list)->rx_desc.cookie, owner);
  323. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  324. "[%s][%d] rx_desc=%p, cookie=%d, nbuf=%p, \
  325. status_buf=%p paddr=%p\n",
  326. __func__, __LINE__, &(*desc_list)->rx_desc,
  327. (*desc_list)->rx_desc.cookie, rx_netbuf,
  328. status_buf, (void *)paddr);
  329. *desc_list = next;
  330. }
  331. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  332. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  333. "successfully replenished %d buffers\n", num_req_buffers);
  334. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  335. "%d rx desc added back to free list\n", num_desc_to_free);
  336. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  337. "[%s][%d] desc_list=%p, tail=%p rx_desc=%p, cookie=%d\n",
  338. __func__, __LINE__, desc_list, tail, &(*desc_list)->rx_desc,
  339. (*desc_list)->rx_desc.cookie);
  340. /*
  341. * add any available free desc back to the free list
  342. */
  343. if (*desc_list) {
  344. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  345. mac_id, rx_desc_pool);
  346. }
  347. return QDF_STATUS_SUCCESS;
  348. }
  349. /**
  350. * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
  351. * @pdev: core txrx pdev context
  352. *
  353. * This function will attach a DP RX monitor status ring into pDEV
  354. * and replenish monitor status ring with buffer.
  355. *
  356. * Return: QDF_STATUS_SUCCESS: success
  357. * QDF_STATUS_E_RESOURCES: Error return
  358. */
  359. QDF_STATUS
  360. dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev) {
  361. uint8_t pdev_id = pdev->pdev_id;
  362. struct dp_soc *soc = pdev->soc;
  363. union dp_rx_desc_list_elem_t *desc_list = NULL;
  364. union dp_rx_desc_list_elem_t *tail = NULL;
  365. struct dp_srng *rxdma_srng;
  366. uint32_t rxdma_entries;
  367. struct rx_desc_pool *rx_desc_pool;
  368. rxdma_srng = &pdev->rxdma_mon_status_ring;
  369. rxdma_entries = rxdma_srng->alloc_size/hal_srng_get_entrysize(
  370. soc->hal_soc, RXDMA_MONITOR_STATUS);
  371. rx_desc_pool = &soc->rx_desc_status[pdev_id];
  372. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  373. "%s: Mon RX Status Pool[%d] allocation size=%d\n",
  374. __func__, pdev_id, rxdma_entries);
  375. dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries+1, rx_desc_pool);
  376. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  377. "%s: Mon RX Status Buffers Replenish pdev_id=%d\n",
  378. __func__, pdev_id);
  379. dp_rx_mon_status_buffers_replenish(soc, pdev_id, rxdma_srng,
  380. rx_desc_pool, rxdma_entries, &desc_list, &tail,
  381. HAL_RX_BUF_RBM_SW3_BM);
  382. qdf_nbuf_queue_init(&pdev->rx_status_q);
  383. pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
  384. return QDF_STATUS_SUCCESS;
  385. }