htt.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * @file htt.c
  28. * @brief Provide functions to create+init and destroy a HTT instance.
  29. * @details
  30. * This file contains functions for creating a HTT instance; initializing
  31. * the HTT instance, e.g. by allocating a pool of HTT tx descriptors and
  32. * connecting the HTT service with HTC; and deleting a HTT instance.
  33. */
  34. #include <qdf_mem.h> /* qdf_mem_malloc */
  35. #include <qdf_types.h> /* qdf_device_t, qdf_print */
  36. #include <htt.h> /* htt_tx_msdu_desc_t */
  37. #include <ol_cfg.h>
  38. #include <ol_txrx_htt_api.h> /* ol_tx_dowload_done_ll, etc. */
  39. #include <ol_htt_api.h>
  40. #include <htt_internal.h>
  41. #include <ol_htt_tx_api.h>
  42. #include <cds_api.h>
  43. #include "hif.h"
  44. #include <cdp_txrx_handle.h>
  45. #define HTT_HTC_PKT_POOL_INIT_SIZE 100 /* enough for a large A-MPDU */
  46. QDF_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
  47. QDF_STATUS(*htt_h2t_rx_ring_rfs_cfg_msg)(struct htt_pdev_t *pdev);
  48. #ifdef IPA_OFFLOAD
  49. static QDF_STATUS htt_ipa_config(htt_pdev_handle pdev, QDF_STATUS status)
  50. {
  51. if ((QDF_STATUS_SUCCESS == status) &&
  52. ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
  53. status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev);
  54. return status;
  55. }
  56. #define HTT_IPA_CONFIG htt_ipa_config
  57. #else
  58. #define HTT_IPA_CONFIG(pdev, status) status /* no-op */
  59. #endif /* IPA_OFFLOAD */
  60. struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
  61. {
  62. struct htt_htc_pkt_union *pkt = NULL;
  63. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  64. if (pdev->htt_htc_pkt_freelist) {
  65. pkt = pdev->htt_htc_pkt_freelist;
  66. pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next;
  67. }
  68. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  69. if (pkt == NULL)
  70. pkt = qdf_mem_malloc(sizeof(*pkt));
  71. if (!pkt) {
  72. qdf_print("%s: HTC packet allocation failed\n", __func__);
  73. return NULL;
  74. }
  75. htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
  76. return &pkt->u.pkt; /* not actually a dereference */
  77. }
  78. void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
  79. {
  80. struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
  81. if (!u_pkt) {
  82. qdf_print("%s: HTC packet is NULL\n", __func__);
  83. return;
  84. }
  85. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  86. htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
  87. u_pkt->u.next = pdev->htt_htc_pkt_freelist;
  88. pdev->htt_htc_pkt_freelist = u_pkt;
  89. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  90. }
  91. void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
  92. {
  93. struct htt_htc_pkt_union *pkt, *next;
  94. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  95. pkt = pdev->htt_htc_pkt_freelist;
  96. pdev->htt_htc_pkt_freelist = NULL;
  97. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  98. while (pkt) {
  99. next = pkt->u.next;
  100. qdf_mem_free(pkt);
  101. pkt = next;
  102. }
  103. }
  104. #ifdef ATH_11AC_TXCOMPACT
  105. void
  106. htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level)
  107. {
  108. struct htt_htc_pkt_union *pkt, *next, *prev = NULL;
  109. int i = 0;
  110. qdf_nbuf_t netbuf;
  111. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  112. pkt = pdev->htt_htc_pkt_misclist;
  113. while (pkt) {
  114. next = pkt->u.next;
  115. /* trim the out grown list*/
  116. if (++i > level) {
  117. netbuf =
  118. (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
  119. qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
  120. qdf_nbuf_free(netbuf);
  121. qdf_mem_free(pkt);
  122. pkt = NULL;
  123. if (prev)
  124. prev->u.next = NULL;
  125. }
  126. prev = pkt;
  127. pkt = next;
  128. }
  129. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  130. }
  131. void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
  132. {
  133. struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
  134. int misclist_trim_level = htc_get_tx_queue_depth(pdev->htc_pdev,
  135. pkt->htc_pkt.Endpoint)
  136. + HTT_HTC_PKT_MISCLIST_SIZE;
  137. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  138. if (pdev->htt_htc_pkt_misclist) {
  139. u_pkt->u.next = pdev->htt_htc_pkt_misclist;
  140. pdev->htt_htc_pkt_misclist = u_pkt;
  141. } else {
  142. pdev->htt_htc_pkt_misclist = u_pkt;
  143. }
  144. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  145. /* only ce pipe size + tx_queue_depth could possibly be in use
  146. * free older packets in the msiclist
  147. */
  148. htt_htc_misc_pkt_list_trim(pdev, misclist_trim_level);
  149. }
  150. void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
  151. {
  152. struct htt_htc_pkt_union *pkt, *next;
  153. qdf_nbuf_t netbuf;
  154. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  155. pkt = pdev->htt_htc_pkt_misclist;
  156. pdev->htt_htc_pkt_misclist = NULL;
  157. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  158. while (pkt) {
  159. next = pkt->u.next;
  160. if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
  161. HTC_PACKET_MAGIC_COOKIE) {
  162. QDF_ASSERT(0);
  163. pkt = next;
  164. continue;
  165. }
  166. netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
  167. qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
  168. qdf_nbuf_free(netbuf);
  169. qdf_mem_free(pkt);
  170. pkt = next;
  171. }
  172. }
  173. #endif
  174. /* AR6004 don't need HTT layer. */
  175. #ifdef AR6004_HW
  176. #define NO_HTT_NEEDED true
  177. #else
  178. #define NO_HTT_NEEDED false
  179. #endif
  180. #if defined(QCA_TX_HTT2_SUPPORT) && defined(CONFIG_HL_SUPPORT)
  181. /**
  182. * htt_htc_tx_htt2_service_start() - Start TX HTT2 service
  183. *
  184. * @pdev: pointer to htt device.
  185. * @connect_req: pointer to service connection request information
  186. * @connect_resp: pointer to service connection response information
  187. *
  188. *
  189. * Return: None
  190. */
  191. static void
  192. htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
  193. struct htc_service_connect_req *connect_req,
  194. struct htc_service_connect_resp *connect_resp)
  195. {
  196. QDF_STATUS status;
  197. qdf_mem_set(connect_req, 0, sizeof(struct htc_service_connect_req));
  198. qdf_mem_set(connect_resp, 0, sizeof(struct htc_service_connect_resp));
  199. /* The same as HTT service but no RX. */
  200. connect_req->EpCallbacks.pContext = pdev;
  201. connect_req->EpCallbacks.EpTxComplete = htt_h2t_send_complete;
  202. connect_req->EpCallbacks.EpSendFull = htt_h2t_full;
  203. connect_req->MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
  204. /* Should NOT support credit flow control. */
  205. connect_req->ConnectionFlags |=
  206. HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  207. /* Enable HTC schedule mechanism for TX HTT2 service. */
  208. connect_req->ConnectionFlags |= HTC_CONNECT_FLAGS_ENABLE_HTC_SCHEDULE;
  209. connect_req->service_id = HTT_DATA2_MSG_SVC;
  210. status = htc_connect_service(pdev->htc_pdev, connect_req, connect_resp);
  211. if (status != QDF_STATUS_SUCCESS) {
  212. pdev->htc_tx_htt2_endpoint = ENDPOINT_UNUSED;
  213. pdev->htc_tx_htt2_max_size = 0;
  214. } else {
  215. pdev->htc_tx_htt2_endpoint = connect_resp->Endpoint;
  216. pdev->htc_tx_htt2_max_size = HTC_TX_HTT2_MAX_SIZE;
  217. }
  218. qdf_print("TX HTT %s, ep %d size %d\n",
  219. (status == QDF_STATUS_SUCCESS ? "ON" : "OFF"),
  220. pdev->htc_tx_htt2_endpoint,
  221. pdev->htc_tx_htt2_max_size);
  222. }
  223. #else
  224. static inline void
  225. htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
  226. struct htc_service_connect_req *connect_req,
  227. struct htc_service_connect_resp *connect_resp)
  228. {
  229. }
  230. #endif
  231. /**
  232. * htt_htc_credit_flow_disable() - disable flow control for
  233. * HTT data message service
  234. *
  235. * @pdev: pointer to htt device.
  236. * @connect_req: pointer to service connection request information
  237. *
  238. * HTC Credit mechanism is disabled based on
  239. * default_tx_comp_req as throughput will be lower
  240. * if we disable htc credit mechanism with default_tx_comp_req
  241. * set since txrx download packet will be limited by ota
  242. * completion.
  243. *
  244. * Return: None
  245. */
  246. static
  247. void htt_htc_credit_flow_disable(struct htt_pdev_t *pdev,
  248. struct htc_service_connect_req *connect_req)
  249. {
  250. if (pdev->osdev->bus_type == QDF_BUS_TYPE_SDIO) {
  251. /*
  252. * TODO:Conditional disabling will be removed once firmware
  253. * with reduced tx completion is pushed into release builds.
  254. */
  255. if (!pdev->cfg.default_tx_comp_req)
  256. connect_req->ConnectionFlags |=
  257. HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  258. } else {
  259. connect_req->ConnectionFlags |=
  260. HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  261. }
  262. }
  263. #if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
  264. /**
  265. * htt_dump_bundle_stats() - dump wlan stats
  266. * @pdev: handle to the HTT instance
  267. *
  268. * Return: None
  269. */
  270. void htt_dump_bundle_stats(htt_pdev_handle pdev)
  271. {
  272. htc_dump_bundle_stats(pdev->htc_pdev);
  273. }
  274. /**
  275. * htt_clear_bundle_stats() - clear wlan stats
  276. * @pdev: handle to the HTT instance
  277. *
  278. * Return: None
  279. */
  280. void htt_clear_bundle_stats(htt_pdev_handle pdev)
  281. {
  282. htc_clear_bundle_stats(pdev->htc_pdev);
  283. }
  284. #endif
  285. #if defined(QCA_WIFI_3_0_ADRASTEA)
  286. /**
  287. * htt_htc_attach_all() - Connect to HTC service for HTT
  288. * @pdev: pdev ptr
  289. *
  290. * Return: 0 for success or error code.
  291. */
  292. static int
  293. htt_htc_attach_all(struct htt_pdev_t *pdev)
  294. {
  295. if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC))
  296. return -EIO;
  297. if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC))
  298. return -EIO;
  299. if (htt_htc_attach(pdev, HTT_DATA3_MSG_SVC))
  300. return -EIO;
  301. return 0;
  302. }
  303. #else
  304. /**
  305. * htt_htc_attach_all() - Connect to HTC service for HTT
  306. * @pdev: pdev ptr
  307. *
  308. * Return: 0 for success or error code.
  309. */
  310. static int
  311. htt_htc_attach_all(struct htt_pdev_t *pdev)
  312. {
  313. return htt_htc_attach(pdev, HTT_DATA_MSG_SVC);
  314. }
  315. #endif
  316. /**
  317. * htt_pdev_alloc() - allocate HTT pdev
  318. * @txrx_pdev: txrx pdev
  319. * @ctrl_pdev: cfg pdev
  320. * @htc_pdev: HTC pdev
  321. * @osdev: os device
  322. *
  323. * Return: HTT pdev handle
  324. */
  325. htt_pdev_handle
  326. htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
  327. struct cdp_cfg *ctrl_pdev,
  328. HTC_HANDLE htc_pdev, qdf_device_t osdev)
  329. {
  330. struct htt_pdev_t *pdev;
  331. struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
  332. if (!osc)
  333. goto fail1;
  334. pdev = qdf_mem_malloc(sizeof(*pdev));
  335. if (!pdev)
  336. goto fail1;
  337. pdev->osdev = osdev;
  338. pdev->ctrl_pdev = ctrl_pdev;
  339. pdev->txrx_pdev = txrx_pdev;
  340. pdev->htc_pdev = htc_pdev;
  341. pdev->htt_htc_pkt_freelist = NULL;
  342. #ifdef ATH_11AC_TXCOMPACT
  343. pdev->htt_htc_pkt_misclist = NULL;
  344. #endif
  345. /* for efficiency, store a local copy of the is_high_latency flag */
  346. pdev->cfg.is_high_latency = ol_cfg_is_high_latency(pdev->ctrl_pdev);
  347. pdev->cfg.default_tx_comp_req =
  348. !ol_cfg_tx_free_at_download(pdev->ctrl_pdev);
  349. pdev->cfg.is_full_reorder_offload =
  350. ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev);
  351. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
  352. "is_full_reorder_offloaded? %d",
  353. (int)pdev->cfg.is_full_reorder_offload);
  354. pdev->cfg.ce_classify_enabled =
  355. ol_cfg_is_ce_classify_enabled(ctrl_pdev);
  356. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
  357. "ce_classify_enabled? %d",
  358. pdev->cfg.ce_classify_enabled);
  359. if (pdev->cfg.is_high_latency) {
  360. qdf_atomic_init(&pdev->htt_tx_credit.target_delta);
  361. qdf_atomic_init(&pdev->htt_tx_credit.bus_delta);
  362. qdf_atomic_add(HTT_MAX_BUS_CREDIT,
  363. &pdev->htt_tx_credit.bus_delta);
  364. }
  365. pdev->targetdef = htc_get_targetdef(htc_pdev);
  366. #if defined(HELIUMPLUS)
  367. HTT_SET_WIFI_IP(pdev, 2, 0);
  368. #endif /* defined(HELIUMPLUS) */
  369. if (NO_HTT_NEEDED)
  370. goto success;
  371. /*
  372. * Connect to HTC service.
  373. * This has to be done before calling htt_rx_attach,
  374. * since htt_rx_attach involves sending a rx ring configure
  375. * message to the target.
  376. */
  377. HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex);
  378. HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);
  379. HTT_TX_MUTEX_INIT(&pdev->credit_mutex);
  380. if (htt_htc_attach_all(pdev))
  381. goto htt_htc_attach_fail;
  382. if (hif_ce_fastpath_cb_register(osc, htt_t2h_msg_handler_fast, pdev))
  383. qdf_print("failed to register fastpath callback\n");
  384. success:
  385. return pdev;
  386. htt_htc_attach_fail:
  387. HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex);
  388. HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
  389. HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
  390. qdf_mem_free(pdev);
  391. fail1:
  392. return NULL;
  393. }
  394. /**
  395. * htt_attach() - Allocate and setup HTT TX/RX descriptors
  396. * @pdev: pdev ptr
  397. * @desc_pool_size: size of tx descriptors
  398. *
  399. * Return: 0 for success or error code.
  400. */
  401. int
  402. htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
  403. {
  404. int i;
  405. int ret = 0;
  406. ret = htt_tx_attach(pdev, desc_pool_size);
  407. if (ret)
  408. goto fail1;
  409. ret = htt_rx_attach(pdev);
  410. if (ret)
  411. goto fail2;
  412. /* pre-allocate some HTC_PACKET objects */
  413. for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
  414. struct htt_htc_pkt_union *pkt;
  415. pkt = qdf_mem_malloc(sizeof(*pkt));
  416. if (!pkt)
  417. break;
  418. htt_htc_pkt_free(pdev, &pkt->u.pkt);
  419. }
  420. if (pdev->cfg.is_high_latency) {
  421. /*
  422. * HL - download the whole frame.
  423. * Specify a download length greater than the max MSDU size,
  424. * so the downloads will be limited by the actual frame sizes.
  425. */
  426. pdev->download_len = 5000;
  427. if (ol_cfg_tx_free_at_download(pdev->ctrl_pdev))
  428. pdev->tx_send_complete_part2 =
  429. ol_tx_download_done_hl_free;
  430. else
  431. pdev->tx_send_complete_part2 =
  432. ol_tx_download_done_hl_retain;
  433. /*
  434. * CHECK THIS LATER: does the HL HTT version of
  435. * htt_rx_mpdu_desc_list_next
  436. * (which is not currently implemented) present the
  437. * adf_nbuf_data(rx_ind_msg)
  438. * as the abstract rx descriptor?
  439. * If not, the rx_fw_desc_offset initialization
  440. * here will have to be adjusted accordingly.
  441. * NOTE: for HL, because fw rx desc is in ind msg,
  442. * not in rx desc, so the
  443. * offset should be negtive value
  444. */
  445. pdev->rx_fw_desc_offset =
  446. HTT_ENDIAN_BYTE_IDX_SWAP(
  447. HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET
  448. - HTT_RX_IND_HL_BYTES);
  449. htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_hl;
  450. htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_hl;
  451. /* initialize the txrx credit count */
  452. ol_tx_target_credit_update(
  453. pdev->txrx_pdev, ol_cfg_target_tx_credit(
  454. pdev->ctrl_pdev));
  455. } else {
  456. enum wlan_frm_fmt frm_type;
  457. /*
  458. * LL - download just the initial portion of the frame.
  459. * Download enough to cover the encapsulation headers checked
  460. * by the target's tx classification descriptor engine.
  461. *
  462. * For LL, the FW rx desc directly referenced at its location
  463. * inside the rx indication message.
  464. */
  465. /* account for the 802.3 or 802.11 header */
  466. frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
  467. if (frm_type == wlan_frm_fmt_native_wifi) {
  468. pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
  469. } else if (frm_type == wlan_frm_fmt_802_3) {
  470. pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
  471. } else {
  472. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  473. "Unexpected frame type spec: %d", frm_type);
  474. HTT_ASSERT0(0);
  475. }
  476. /*
  477. * Account for the optional L2 / ethernet header fields:
  478. * 802.1Q, LLC/SNAP
  479. */
  480. pdev->download_len +=
  481. HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
  482. /*
  483. * Account for the portion of the L3 (IP) payload that the
  484. * target needs for its tx classification.
  485. */
  486. pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
  487. /*
  488. * Account for the HTT tx descriptor, including the
  489. * HTC header + alignment padding.
  490. */
  491. pdev->download_len += sizeof(struct htt_host_tx_desc_t);
  492. /*
  493. * The TXCOMPACT htt_tx_sched function uses pdev->download_len
  494. * to apply for all requeued tx frames. Thus,
  495. * pdev->download_len has to be the largest download length of
  496. * any tx frame that will be downloaded.
  497. * This maximum download length is for management tx frames,
  498. * which have an 802.11 header.
  499. */
  500. #ifdef ATH_11AC_TXCOMPACT
  501. pdev->download_len = sizeof(struct htt_host_tx_desc_t)
  502. + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
  503. + HTT_TX_HDR_SIZE_802_1Q
  504. + HTT_TX_HDR_SIZE_LLC_SNAP
  505. + ol_cfg_tx_download_size(pdev->ctrl_pdev);
  506. #endif
  507. pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
  508. /*
  509. * For LL, the FW rx desc is alongside the HW rx desc fields in
  510. * the htt_host_rx_desc_base struct/.
  511. */
  512. pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
  513. htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
  514. htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_ll;
  515. }
  516. return 0;
  517. fail2:
  518. htt_tx_detach(pdev);
  519. fail1:
  520. return ret;
  521. }
  522. QDF_STATUS htt_attach_target(htt_pdev_handle pdev)
  523. {
  524. QDF_STATUS status;
  525. status = htt_h2t_ver_req_msg(pdev);
  526. if (status != QDF_STATUS_SUCCESS) {
  527. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  528. "%s:%d: could not send h2t_ver_req msg",
  529. __func__, __LINE__);
  530. return status;
  531. }
  532. #if defined(HELIUMPLUS)
  533. /*
  534. * Send the frag_desc info to target.
  535. */
  536. status = htt_h2t_frag_desc_bank_cfg_msg(pdev);
  537. if (status != QDF_STATUS_SUCCESS) {
  538. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  539. "%s:%d: could not send h2t_frag_desc_bank_cfg msg",
  540. __func__, __LINE__);
  541. return status;
  542. }
  543. #endif /* defined(HELIUMPLUS) */
  544. /*
  545. * If applicable, send the rx ring config message to the target.
  546. * The host could wait for the HTT version number confirmation message
  547. * from the target before sending any further HTT messages, but it's
  548. * reasonable to assume that the host and target HTT version numbers
  549. * match, and proceed immediately with the remaining configuration
  550. * handshaking.
  551. */
  552. status = htt_h2t_rx_ring_rfs_cfg_msg(pdev);
  553. if (status != QDF_STATUS_SUCCESS) {
  554. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  555. "%s:%d: could not send h2t_rx_ring_rfs_cfg msg",
  556. __func__, __LINE__);
  557. return status;
  558. }
  559. status = htt_h2t_rx_ring_cfg_msg(pdev);
  560. if (status != QDF_STATUS_SUCCESS) {
  561. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  562. "%s:%d: could not send h2t_rx_ring_cfg msg",
  563. __func__, __LINE__);
  564. return status;
  565. }
  566. status = HTT_IPA_CONFIG(pdev, status);
  567. if (status != QDF_STATUS_SUCCESS) {
  568. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  569. "%s:%d: could not send h2t_ipa_uc_rsc_cfg msg",
  570. __func__, __LINE__);
  571. return status;
  572. }
  573. return status;
  574. }
  575. void htt_detach(htt_pdev_handle pdev)
  576. {
  577. htt_rx_detach(pdev);
  578. htt_tx_detach(pdev);
  579. htt_htc_pkt_pool_free(pdev);
  580. #ifdef ATH_11AC_TXCOMPACT
  581. htt_htc_misc_pkt_pool_free(pdev);
  582. #endif
  583. HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex);
  584. HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
  585. HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
  586. }
  587. /**
  588. * htt_pdev_free() - Free HTT pdev
  589. * @pdev: htt pdev
  590. *
  591. * Return: none
  592. */
  593. void htt_pdev_free(htt_pdev_handle pdev)
  594. {
  595. qdf_mem_free(pdev);
  596. }
  597. void htt_detach_target(htt_pdev_handle pdev)
  598. {
  599. }
  600. static inline
  601. int htt_update_endpoint(struct htt_pdev_t *pdev,
  602. uint16_t service_id, HTC_ENDPOINT_ID ep)
  603. {
  604. struct hif_opaque_softc *hif_ctx;
  605. uint8_t ul = 0xff, dl = 0xff;
  606. int ul_polled, dl_polled;
  607. int tx_service = 0;
  608. int rc = 0;
  609. hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
  610. if (qdf_unlikely(NULL == hif_ctx)) {
  611. QDF_ASSERT(NULL != hif_ctx);
  612. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  613. "%s:%d: assuming non-tx service.",
  614. __func__, __LINE__);
  615. } else {
  616. ul = dl = 0xff;
  617. if (QDF_STATUS_SUCCESS !=
  618. hif_map_service_to_pipe(hif_ctx, service_id,
  619. &ul, &dl,
  620. &ul_polled, &dl_polled))
  621. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
  622. "%s:%d: assuming non-tx srv.",
  623. __func__, __LINE__);
  624. else
  625. tx_service = (ul != 0xff);
  626. }
  627. if (tx_service) {
  628. /* currently we have only one OUT htt tx service */
  629. QDF_BUG(service_id == HTT_DATA_MSG_SVC);
  630. pdev->htc_tx_endpoint = ep;
  631. hif_save_htc_htt_config_endpoint(hif_ctx, ep);
  632. rc = 1;
  633. }
  634. return rc;
  635. }
  636. int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id)
  637. {
  638. struct htc_service_connect_req connect;
  639. struct htc_service_connect_resp response;
  640. QDF_STATUS status;
  641. qdf_mem_set(&connect, sizeof(connect), 0);
  642. qdf_mem_set(&response, sizeof(response), 0);
  643. connect.pMetaData = NULL;
  644. connect.MetaDataLength = 0;
  645. connect.EpCallbacks.pContext = pdev;
  646. connect.EpCallbacks.EpTxComplete = htt_h2t_send_complete;
  647. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  648. connect.EpCallbacks.EpRecv = htt_t2h_msg_handler;
  649. connect.EpCallbacks.ep_resume_tx_queue = htt_tx_resume_handler;
  650. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  651. connect.EpCallbacks.EpRecvRefill = NULL;
  652. connect.EpCallbacks.RecvRefillWaterMark = 1;
  653. /* N/A, fill is done by HIF */
  654. connect.EpCallbacks.EpSendFull = htt_h2t_full;
  655. /*
  656. * Specify how deep to let a queue get before htc_send_pkt will
  657. * call the EpSendFull function due to excessive send queue depth.
  658. */
  659. connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
  660. /* disable flow control for HTT data message service */
  661. htt_htc_credit_flow_disable(pdev, &connect);
  662. /* connect to control service */
  663. connect.service_id = service_id;
  664. status = htc_connect_service(pdev->htc_pdev, &connect, &response);
  665. if (status != QDF_STATUS_SUCCESS) {
  666. if (cds_is_fw_down())
  667. return -EIO;
  668. if (status == QDF_STATUS_E_NOMEM ||
  669. cds_is_self_recovery_enabled())
  670. return qdf_status_to_os_return(status);
  671. QDF_BUG(0);
  672. }
  673. htt_update_endpoint(pdev, service_id, response.Endpoint);
  674. /* Start TX HTT2 service if the target support it. */
  675. htt_htc_tx_htt2_service_start(pdev, &connect, &response);
  676. return 0; /* success */
  677. }
  678. void htt_log_rx_ring_info(htt_pdev_handle pdev)
  679. {
  680. if (!pdev) {
  681. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  682. "%s: htt pdev is NULL", __func__);
  683. return;
  684. }
  685. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG,
  686. "%s: Data Stall Detected with reason 4 (=FW_RX_REFILL_FAILED)."
  687. "src htt rx ring: space for %d elements, filled with %d buffers, buffers in the ring %d, refill debt %d",
  688. __func__, pdev->rx_ring.size, pdev->rx_ring.fill_level,
  689. pdev->rx_ring.fill_cnt,
  690. qdf_atomic_read(&pdev->rx_ring.refill_debt));
  691. }
  692. #if HTT_DEBUG_LEVEL > 5
  693. void htt_display(htt_pdev_handle pdev, int indent)
  694. {
  695. qdf_print("%*s%s:\n", indent, " ", "HTT");
  696. qdf_print("%*stx desc pool: %d elems of %d bytes, %d allocated\n",
  697. indent + 4, " ",
  698. pdev->tx_descs.pool_elems,
  699. pdev->tx_descs.size, pdev->tx_descs.alloc_cnt);
  700. qdf_print("%*srx ring: space for %d elems, filled with %d buffers\n",
  701. indent + 4, " ",
  702. pdev->rx_ring.size, pdev->rx_ring.fill_level);
  703. qdf_print("%*sat %pK (%llx paddr)\n", indent + 8, " ",
  704. pdev->rx_ring.buf.paddrs_ring,
  705. (unsigned long long)pdev->rx_ring.base_paddr);
  706. qdf_print("%*snetbuf ring @ %pK\n", indent + 8, " ",
  707. pdev->rx_ring.buf.netbufs_ring);
  708. qdf_print("%*sFW_IDX shadow register: vaddr = %pK, paddr = %llx\n",
  709. indent + 8, " ",
  710. pdev->rx_ring.alloc_idx.vaddr,
  711. (unsigned long long)pdev->rx_ring.alloc_idx.paddr);
  712. qdf_print("%*sSW enqueue idx= %d, SW dequeue idx: desc= %d, buf= %d\n",
  713. indent + 8, " ", *pdev->rx_ring.alloc_idx.vaddr,
  714. pdev->rx_ring.sw_rd_idx.msdu_desc,
  715. pdev->rx_ring.sw_rd_idx.msdu_payld);
  716. }
  717. #endif
  718. #ifdef IPA_OFFLOAD
  719. /**
  720. * htt_ipa_uc_attach() - Allocate UC data path resources
  721. * @pdev: handle to the HTT instance
  722. *
  723. * Return: 0 success
  724. * none 0 fail
  725. */
  726. int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
  727. {
  728. int error;
  729. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO, "%s: enter",
  730. __func__);
  731. /* TX resource attach */
  732. error = htt_tx_ipa_uc_attach(
  733. pdev,
  734. ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
  735. ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev),
  736. ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev));
  737. if (error) {
  738. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  739. "HTT IPA UC TX attach fail code %d", error);
  740. HTT_ASSERT0(0);
  741. return error;
  742. }
  743. /* RX resource attach */
  744. error = htt_rx_ipa_uc_attach(
  745. pdev, qdf_get_pwr2(pdev->rx_ring.fill_level));
  746. if (error) {
  747. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  748. "HTT IPA UC RX attach fail code %d", error);
  749. htt_tx_ipa_uc_detach(pdev);
  750. HTT_ASSERT0(0);
  751. return error;
  752. }
  753. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO, "%s: exit",
  754. __func__);
  755. return 0; /* success */
  756. }
  757. /**
  758. * htt_ipa_uc_attach() - Remove UC data path resources
  759. * @pdev: handle to the HTT instance
  760. *
  761. * Return: None
  762. */
  763. void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
  764. {
  765. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO, "%s: enter",
  766. __func__);
  767. /* TX IPA micro controller detach */
  768. htt_tx_ipa_uc_detach(pdev);
  769. /* RX IPA micro controller detach */
  770. htt_rx_ipa_uc_detach(pdev);
  771. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO, "%s: exit",
  772. __func__);
  773. }
  774. /**
  775. * htt_ipa_uc_get_resource() - Get uc resource from htt and lower layer
  776. * @pdev: handle to the HTT instance
  777. * @ce_sr_base_paddr: copy engine source ring base physical address
  778. * @ce_sr_ring_size: copy engine source ring size
  779. * @ce_reg_paddr: copy engine register physical address
  780. * @tx_comp_ring_base_paddr: tx comp ring base physical address
  781. * @tx_comp_ring_size: tx comp ring size
  782. * @tx_num_alloc_buffer: number of allocated tx buffer
  783. * @rx_rdy_ring_base_paddr: rx ready ring base physical address
  784. * @rx_rdy_ring_size: rx ready ring size
  785. * @rx_proc_done_idx_paddr: rx process done index physical address
  786. * @rx_proc_done_idx_vaddr: rx process done index virtual address
  787. * @rx2_rdy_ring_base_paddr: rx done ring base physical address
  788. * @rx2_rdy_ring_size: rx done ring size
  789. * @rx2_proc_done_idx_paddr: rx done index physical address
  790. * @rx2_proc_done_idx_vaddr: rx done index virtual address
  791. *
  792. * Return: 0 success
  793. */
  794. int
  795. htt_ipa_uc_get_resource(htt_pdev_handle pdev,
  796. qdf_dma_addr_t *ce_sr_base_paddr,
  797. uint32_t *ce_sr_ring_size,
  798. qdf_dma_addr_t *ce_reg_paddr,
  799. qdf_dma_addr_t *tx_comp_ring_base_paddr,
  800. uint32_t *tx_comp_ring_size,
  801. uint32_t *tx_num_alloc_buffer,
  802. qdf_dma_addr_t *rx_rdy_ring_base_paddr,
  803. uint32_t *rx_rdy_ring_size,
  804. qdf_dma_addr_t *rx_proc_done_idx_paddr,
  805. void **rx_proc_done_idx_vaddr,
  806. qdf_dma_addr_t *rx2_rdy_ring_base_paddr,
  807. uint32_t *rx2_rdy_ring_size,
  808. qdf_dma_addr_t *rx2_proc_done_idx_paddr,
  809. void **rx2_proc_done_idx_vaddr)
  810. {
  811. /* Release allocated resource to client */
  812. *tx_comp_ring_base_paddr =
  813. pdev->ipa_uc_tx_rsc.tx_comp_base.paddr;
  814. *tx_comp_ring_size =
  815. (uint32_t) ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev);
  816. *tx_num_alloc_buffer = (uint32_t) pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  817. *rx_rdy_ring_base_paddr =
  818. pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr;
  819. *rx_rdy_ring_size = (uint32_t) pdev->ipa_uc_rx_rsc.rx_ind_ring_size;
  820. *rx_proc_done_idx_paddr =
  821. pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr;
  822. *rx_proc_done_idx_vaddr =
  823. (void *)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr;
  824. *rx2_rdy_ring_base_paddr =
  825. pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr;
  826. *rx2_rdy_ring_size = (uint32_t) pdev->ipa_uc_rx_rsc.rx2_ind_ring_size;
  827. *rx2_proc_done_idx_paddr =
  828. pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr;
  829. *rx2_proc_done_idx_vaddr =
  830. (void *)pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr;
  831. /* Get copy engine, bus resource */
  832. htc_ipa_get_ce_resource(pdev->htc_pdev,
  833. ce_sr_base_paddr,
  834. ce_sr_ring_size, ce_reg_paddr);
  835. return 0;
  836. }
  837. /**
  838. * htt_ipa_uc_set_doorbell_paddr() - Propagate IPA doorbell address
  839. * @pdev: handle to the HTT instance
  840. * @ipa_uc_tx_doorbell_paddr: TX doorbell base physical address
  841. * @ipa_uc_rx_doorbell_paddr: RX doorbell base physical address
  842. *
  843. * Return: 0 success
  844. */
  845. int
  846. htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
  847. qdf_dma_addr_t ipa_uc_tx_doorbell_paddr,
  848. qdf_dma_addr_t ipa_uc_rx_doorbell_paddr)
  849. {
  850. pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr;
  851. pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr;
  852. return 0;
  853. }
  854. #endif /* IPA_OFFLOAD */
  855. /**
  856. * htt_mark_first_wakeup_packet() - set flag to indicate that
  857. * fw is compatible for marking first packet after wow wakeup
  858. * @pdev: pointer to htt pdev
  859. * @value: 1 for enabled/ 0 for disabled
  860. *
  861. * Return: None
  862. */
  863. void htt_mark_first_wakeup_packet(htt_pdev_handle pdev,
  864. uint8_t value)
  865. {
  866. if (!pdev) {
  867. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  868. "%s: htt pdev is NULL", __func__);
  869. return;
  870. }
  871. pdev->cfg.is_first_wakeup_packet = value;
  872. }