dp_rh_htt.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. /*
  2. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <htt.h>
  19. #include "dp_types.h"
  20. #include "dp_internal.h"
  21. #include "dp_rh_htt.h"
  22. #include "dp_rh_rx.h"
  23. #include "qdf_mem.h"
  24. #include "cdp_txrx_cmn_struct.h"
  25. #include "dp_tx_desc.h"
  26. #include "dp_rh.h"
  27. #define HTT_MSG_BUF_SIZE(msg_bytes) \
  28. ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
  29. #define HTT_T2H_MSG_BUF_REINIT(_buf, dev) \
  30. do { \
  31. qdf_nbuf_push_head(_buf, (HTC_HEADER_LEN) + \
  32. HTC_HDR_ALIGNMENT_PADDING); \
  33. qdf_nbuf_init_fast((_buf)); \
  34. qdf_mem_dma_sync_single_for_device(dev, \
  35. (QDF_NBUF_CB_PADDR(_buf)), \
  36. (skb_end_pointer(_buf) - \
  37. (_buf)->data), \
  38. PCI_DMA_FROMDEVICE); \
  39. } while (0)
  40. /**
  41. * dp_htt_flow_pool_map_handler_rh() - HTT_T2H_MSG_TYPE_FLOW_POOL_MAP handler
  42. * @soc: Handle to DP Soc structure
  43. * @flow_id: flow id
  44. * @flow_type: flow type
  45. * @flow_pool_id: pool id
  46. * @flow_pool_size: pool size
  47. *
  48. * Return: QDF_STATUS_SUCCESS - success, others - failure
  49. */
  50. static QDF_STATUS
  51. dp_htt_flow_pool_map_handler_rh(struct dp_soc *soc, uint8_t flow_id,
  52. uint8_t flow_type, uint8_t flow_pool_id,
  53. uint32_t flow_pool_size)
  54. {
  55. struct dp_vdev *vdev;
  56. struct dp_pdev *pdev;
  57. QDF_STATUS status;
  58. if (flow_pool_id >= MAX_TXDESC_POOLS) {
  59. dp_err("invalid flow_pool_id %d", flow_pool_id);
  60. return QDF_STATUS_E_INVAL;
  61. }
  62. vdev = dp_vdev_get_ref_by_id(soc, flow_id, DP_MOD_ID_HTT);
  63. if (vdev) {
  64. pdev = vdev->pdev;
  65. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
  66. } else {
  67. pdev = soc->pdev_list[0];
  68. }
  69. status = dp_tx_flow_pool_map_handler(pdev, flow_id, flow_type,
  70. flow_pool_id, flow_pool_size);
  71. if (QDF_IS_STATUS_ERROR(status)) {
  72. dp_err("failed to create tx flow pool %d", flow_pool_id);
  73. goto err_out;
  74. }
  75. return QDF_STATUS_SUCCESS;
  76. err_out:
  77. /* TODO: is assert needed ? */
  78. qdf_assert_always(0);
  79. return status;
  80. }
  81. /**
  82. * dp_htt_flow_pool_unmap_handler_rh() - HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP handler
  83. * @soc: Handle to DP Soc structure
  84. * @flow_id: flow id
  85. * @flow_type: flow type
  86. * @flow_pool_id: pool id
  87. *
  88. * Return: none
  89. */
  90. static void
  91. dp_htt_flow_pool_unmap_handler_rh(struct dp_soc *soc, uint8_t flow_id,
  92. uint8_t flow_type, uint8_t flow_pool_id)
  93. {
  94. struct dp_vdev *vdev;
  95. struct dp_pdev *pdev;
  96. if (flow_pool_id >= MAX_TXDESC_POOLS) {
  97. dp_err("invalid flow_pool_id %d", flow_pool_id);
  98. return;
  99. }
  100. vdev = dp_vdev_get_ref_by_id(soc, flow_id, DP_MOD_ID_HTT);
  101. if (vdev) {
  102. pdev = vdev->pdev;
  103. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
  104. } else {
  105. pdev = soc->pdev_list[0];
  106. }
  107. dp_tx_flow_pool_unmap_handler(pdev, flow_id, flow_type,
  108. flow_pool_id);
  109. }
  110. /*
  111. * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
  112. * @soc: SOC handle
  113. * @status: Completion status
  114. * @netbuf: HTT buffer
  115. */
  116. static void
  117. dp_htt_h2t_send_complete_free_netbuf(
  118. void *soc, A_STATUS status, qdf_nbuf_t netbuf)
  119. {
  120. qdf_nbuf_free(netbuf);
  121. }
  122. QDF_STATUS dp_htt_h2t_rx_ring_rfs_cfg(struct htt_soc *soc)
  123. {
  124. struct dp_htt_htc_pkt *pkt;
  125. qdf_nbuf_t msg;
  126. uint32_t *msg_word;
  127. QDF_STATUS status;
  128. uint8_t *htt_logger_bufp;
  129. /*
  130. * TODO check do we need ini support in Evros
  131. * Receive flow steering configuration,
  132. * disable gEnableFlowSteering(=0) in ini if
  133. * FW doesn't support it
  134. */
  135. /* reserve room for the HTC header */
  136. msg = qdf_nbuf_alloc(soc->osdev,
  137. HTT_MSG_BUF_SIZE(HTT_RFS_CFG_REQ_BYTES),
  138. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
  139. true);
  140. if (!msg) {
  141. dp_err("htt_msg alloc failed for RFS config");
  142. return QDF_STATUS_E_NOMEM;
  143. }
  144. /*
  145. * Set the length of the message.
  146. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  147. * separately during the below call to qdf_nbuf_push_head.
  148. * The contribution from the HTC header is added separately inside HTC.
  149. */
  150. qdf_nbuf_put_tail(msg, HTT_RFS_CFG_REQ_BYTES);
  151. /* fill in the message contents */
  152. msg_word = (uint32_t *)qdf_nbuf_data(msg);
  153. /* rewind beyond alignment pad to get to the HTC header reserved area */
  154. qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
  155. /* word 0 */
  156. *msg_word = 0;
  157. htt_logger_bufp = (uint8_t *)msg_word;
  158. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RFS_CONFIG);
  159. HTT_RX_RFS_CONFIG_SET(*msg_word, 1);
  160. /*
  161. * TODO value should be obtained from ini maxMSDUsPerRxInd
  162. * currently this ini is legacy ol and available only from cds
  163. * make this ini common to HL and evros DP
  164. */
  165. *msg_word |= ((32 & 0xff) << 16);
  166. dp_htt_info("RFS sent to F.W: 0x%08x", *msg_word);
  167. /*start*/
  168. pkt = htt_htc_pkt_alloc(soc);
  169. if (!pkt) {
  170. qdf_nbuf_free(msg);
  171. return QDF_STATUS_E_NOMEM;
  172. }
  173. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  174. SET_HTC_PACKET_INFO_TX(
  175. &pkt->htc_pkt,
  176. dp_htt_h2t_send_complete_free_netbuf,
  177. qdf_nbuf_data(msg),
  178. qdf_nbuf_len(msg),
  179. soc->htc_endpoint,
  180. HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
  181. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
  182. status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RFS_CONFIG,
  183. htt_logger_bufp);
  184. if (status != QDF_STATUS_SUCCESS) {
  185. qdf_nbuf_free(msg);
  186. htt_htc_pkt_free(soc, pkt);
  187. }
  188. return status;
  189. }
  190. static void
  191. dp_htt_rx_addba_handler_rh(struct dp_soc *soc, uint16_t peer_id,
  192. uint8_t tid, uint16_t win_sz)
  193. {
  194. }
  195. static QDF_STATUS
  196. dp_htt_rx_delba_ind_handler_rh(void *soc_handle, uint16_t peer_id,
  197. uint8_t tid, uint16_t win_sz)
  198. {
  199. return QDF_STATUS_SUCCESS;
  200. }
  201. /**
  202. * dp_htt_t2h_msg_handler_fast() - Fastpath specific message handler
  203. * @context: HTT context
  204. * @cmpl_msdus: netbuf completions
  205. * @num_cmpls: number of completions to be handled
  206. *
  207. * Return: None
  208. */
  209. static void
  210. dp_htt_t2h_msg_handler_fast(void *context, qdf_nbuf_t *cmpl_msdus,
  211. uint32_t num_cmpls)
  212. {
  213. struct htt_soc *soc = (struct htt_soc *)context;
  214. qdf_nbuf_t htt_t2h_msg;
  215. uint32_t *msg_word;
  216. uint32_t i;
  217. enum htt_t2h_msg_type msg_type;
  218. uint32_t msg_len;
  219. for (i = 0; i < num_cmpls; i++) {
  220. htt_t2h_msg = cmpl_msdus[i];
  221. msg_len = qdf_nbuf_len(htt_t2h_msg);
  222. /*
  223. * Move the data pointer to point to HTT header
  224. * past the HTC header + HTC header alignment padding
  225. */
  226. qdf_nbuf_pull_head(htt_t2h_msg, HTC_HEADER_LEN +
  227. HTC_HDR_ALIGNMENT_PADDING);
  228. msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
  229. msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
  230. switch (msg_type) {
  231. case HTT_T2H_MSG_TYPE_RX_DATA_IND:
  232. {
  233. uint16_t vdev_id, msdu_cnt;
  234. uint16_t peer_id, frag_ind;
  235. peer_id = HTT_RX_DATA_IND_PEER_ID_GET(*msg_word);
  236. frag_ind = HTT_RX_DATA_IND_FRAG_GET(*(msg_word + 1));
  237. vdev_id = HTT_RX_DATA_IND_VDEV_ID_GET(*msg_word);
  238. if (qdf_unlikely(frag_ind)) {
  239. dp_rx_frag_indication_handler(soc->dp_soc,
  240. htt_t2h_msg,
  241. vdev_id, peer_id);
  242. break;
  243. }
  244. msdu_cnt =
  245. HTT_RX_DATA_IND_MSDU_CNT_GET(*(msg_word + 1));
  246. dp_rx_data_indication_handler(soc->dp_soc, htt_t2h_msg,
  247. vdev_id, peer_id,
  248. msdu_cnt);
  249. break;
  250. }
  251. case HTT_T2H_MSG_TYPE_SOFT_UMAC_TX_COMPL_IND:
  252. {
  253. uint32_t num_msdus;
  254. num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
  255. if ((num_msdus * HTT_TX_MSDU_INFO_SIZE +
  256. HTT_SOFT_UMAC_TX_COMPL_IND_SIZE) > msg_len) {
  257. dp_htt_err("Invalid msdu count in tx compl indication %d", num_msdus);
  258. break;
  259. }
  260. dp_tx_compl_handler_rh(soc->dp_soc, htt_t2h_msg);
  261. break;
  262. }
  263. case HTT_T2H_MSG_TYPE_RX_PN_IND:
  264. {
  265. /* TODO check and add PN IND handling */
  266. break;
  267. }
  268. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  269. {
  270. uint16_t peer_id;
  271. uint8_t tid;
  272. uint16_t win_sz;
  273. /*
  274. * Update REO Queue Desc with new values
  275. */
  276. peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
  277. tid = HTT_RX_ADDBA_TID_GET(*msg_word);
  278. win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
  279. /*
  280. * Window size needs to be incremented by 1
  281. * since fw needs to represent a value of 256
  282. * using just 8 bits
  283. */
  284. dp_htt_rx_addba_handler_rh(soc->dp_soc, peer_id,
  285. tid, win_sz + 1);
  286. break;
  287. }
  288. case HTT_T2H_MSG_TYPE_RX_DELBA:
  289. {
  290. uint16_t peer_id;
  291. uint8_t tid;
  292. uint8_t win_sz;
  293. QDF_STATUS status;
  294. peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
  295. tid = HTT_RX_DELBA_TID_GET(*msg_word);
  296. win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
  297. status = dp_htt_rx_delba_ind_handler_rh(soc->dp_soc,
  298. peer_id, tid,
  299. win_sz);
  300. dp_htt_info("DELBA PeerID %d BAW %d TID %d stat %d",
  301. peer_id, win_sz, tid, status);
  302. break;
  303. }
  304. case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
  305. {
  306. qdf_nbuf_t nbuf_copy;
  307. HTC_PACKET htc_pkt = {0};
  308. nbuf_copy = qdf_nbuf_copy(htt_t2h_msg);
  309. if (qdf_unlikely(!nbuf_copy)) {
  310. dp_htt_err("NBUF copy failed for PPDU stats msg");
  311. break;
  312. }
  313. htc_pkt.Status = QDF_STATUS_SUCCESS;
  314. htc_pkt.pPktContext = (void *)nbuf_copy;
  315. dp_htt_t2h_msg_handler(context, &htc_pkt);
  316. break;
  317. }
  318. case HTT_T2H_MSG_TYPE_FLOW_POOL_MAP:
  319. {
  320. uint8_t num_flows;
  321. struct htt_flow_pool_map_payload_t *pool_map;
  322. num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
  323. if (((HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
  324. HTT_FLOW_POOL_MAP_HEADER_SZ) * num_flows + 1) * sizeof(*msg_word) > msg_len) {
  325. dp_htt_err("Invalid flow count in flow pool map message");
  326. WARN_ON(1);
  327. break;
  328. }
  329. msg_word++;
  330. while (num_flows) {
  331. pool_map = (struct htt_flow_pool_map_payload_t *)msg_word;
  332. dp_htt_flow_pool_map_handler_rh(
  333. soc->dp_soc, pool_map->flow_id,
  334. pool_map->flow_type,
  335. pool_map->flow_pool_id,
  336. pool_map->flow_pool_size);
  337. msg_word += (HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
  338. HTT_FLOW_POOL_MAP_HEADER_SZ);
  339. num_flows--;
  340. }
  341. break;
  342. }
  343. case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
  344. {
  345. struct htt_flow_pool_unmap_t *pool_unmap;
  346. if (msg_len < sizeof(struct htt_flow_pool_unmap_t)) {
  347. dp_htt_err("Invalid length in flow pool unmap message %d", msg_len);
  348. WARN_ON(1);
  349. break;
  350. }
  351. pool_unmap = (struct htt_flow_pool_unmap_t *)msg_word;
  352. dp_htt_flow_pool_unmap_handler_rh(
  353. soc->dp_soc, pool_unmap->flow_id,
  354. pool_unmap->flow_type,
  355. pool_unmap->flow_pool_id);
  356. break;
  357. }
  358. default:
  359. {
  360. HTC_PACKET htc_pkt = {0};
  361. htc_pkt.Status = QDF_STATUS_SUCCESS;
  362. htc_pkt.pPktContext = (void *)htt_t2h_msg;
  363. /*
  364. * Increment user count to protect buffer
  365. * from generic handler free count will be
  366. * reset to 1 during MSG_BUF_REINIT
  367. */
  368. qdf_nbuf_inc_users(htt_t2h_msg);
  369. dp_htt_t2h_msg_handler(context, &htc_pkt);
  370. break;
  371. }
  372. }
  373. /* Re-initialize the indication buffer */
  374. HTT_T2H_MSG_BUF_REINIT(htt_t2h_msg, soc->osdev);
  375. qdf_nbuf_set_pktlen(htt_t2h_msg, 0);
  376. }
  377. }
  378. static QDF_STATUS
  379. dp_htt_htc_attach(struct htt_soc *soc, uint16_t service_id)
  380. {
  381. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc->dp_soc);
  382. struct htc_service_connect_req connect;
  383. struct htc_service_connect_resp response;
  384. QDF_STATUS status;
  385. qdf_mem_zero(&connect, sizeof(connect));
  386. qdf_mem_zero(&response, sizeof(response));
  387. connect.pMetaData = NULL;
  388. connect.MetaDataLength = 0;
  389. connect.EpCallbacks.pContext = soc;
  390. connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
  391. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  392. /* fastpath handler will be used instead */
  393. connect.EpCallbacks.EpRecv = NULL;
  394. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  395. connect.EpCallbacks.EpRecvRefill = NULL;
  396. /* N/A, fill is done by HIF */
  397. connect.EpCallbacks.RecvRefillWaterMark = 1;
  398. connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
  399. /*
  400. * Specify how deep to let a queue get before htc_send_pkt will
  401. * call the EpSendFull function due to excessive send queue depth.
  402. */
  403. connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
  404. /* disable flow control for HTT data message service */
  405. connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  406. /* connect to control service */
  407. connect.service_id = service_id;
  408. status = htc_connect_service(soc->htc_soc, &connect, &response);
  409. if (status != QDF_STATUS_SUCCESS) {
  410. dp_htt_err("HTC connect svc failed for id:%u", service_id);
  411. return status;
  412. }
  413. if (service_id == HTT_DATA_MSG_SVC)
  414. soc->htc_endpoint = response.Endpoint;
  415. /* Save the EP_ID of the TX pipe that to be used during TX enqueue */
  416. if (service_id == HTT_DATA2_MSG_SVC)
  417. rh_soc->tx_endpoint = response.Endpoint;
  418. return QDF_STATUS_SUCCESS;
  419. }
  420. static QDF_STATUS
  421. dp_htt_htc_soc_attach_all(struct htt_soc *soc)
  422. {
  423. struct dp_soc *dp_soc = soc->dp_soc;
  424. int svc_list[3] = {HTT_DATA_MSG_SVC, HTT_DATA2_MSG_SVC,
  425. HTT_DATA3_MSG_SVC};
  426. QDF_STATUS status;
  427. int i;
  428. for (i = 0; i < QDF_ARRAY_SIZE(svc_list); i++) {
  429. status = dp_htt_htc_attach(soc, svc_list[i]);
  430. if (QDF_IS_STATUS_ERROR(status))
  431. return status;
  432. }
  433. dp_hif_update_pipe_callback(dp_soc, (void *)soc,
  434. dp_htt_hif_t2h_hp_callback,
  435. DP_HTT_T2H_HP_PIPE);
  436. /* Register fastpath cb handlers for RX CE's */
  437. if (hif_ce_fastpath_cb_register(dp_soc->hif_handle,
  438. dp_htt_t2h_msg_handler_fast, soc)) {
  439. dp_htt_err("failed to register fastpath callback");
  440. return QDF_STATUS_E_FAILURE;
  441. }
  442. return QDF_STATUS_SUCCESS;
  443. }
  444. /*
  445. * dp_htt_soc_initialize_rh() - SOC level HTT initialization
  446. * @htt_soc: Opaque htt SOC handle
  447. * @ctrl_psoc: Opaque ctrl SOC handle
  448. * @htc_soc: SOC level HTC handle
  449. * @hal_soc: Opaque HAL SOC handle
  450. * @osdev: QDF device
  451. *
  452. * Return: HTT handle on success; NULL on failure
  453. */
  454. void *
  455. dp_htt_soc_initialize_rh(struct htt_soc *htt_soc,
  456. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  457. HTC_HANDLE htc_soc,
  458. hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
  459. {
  460. struct htt_soc *soc = (struct htt_soc *)htt_soc;
  461. soc->osdev = osdev;
  462. soc->ctrl_psoc = ctrl_psoc;
  463. soc->htc_soc = htc_soc;
  464. soc->hal_soc = hal_soc_hdl;
  465. if (dp_htt_htc_soc_attach_all(soc))
  466. goto fail2;
  467. return soc;
  468. fail2:
  469. return NULL;
  470. }