htt.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019
  1. /*
  2. * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /**
  19. * @file htt.c
  20. * @brief Provide functions to create+init and destroy a HTT instance.
  21. * @details
  22. * This file contains functions for creating a HTT instance; initializing
  23. * the HTT instance, e.g. by allocating a pool of HTT tx descriptors and
  24. * connecting the HTT service with HTC; and deleting a HTT instance.
  25. */
  26. #include <qdf_mem.h> /* qdf_mem_malloc */
  27. #include <qdf_types.h> /* qdf_device_t, qdf_print */
  28. #include <htt.h> /* htt_tx_msdu_desc_t */
  29. #include <ol_cfg.h>
  30. #include <ol_txrx_htt_api.h> /* ol_tx_dowload_done_ll, etc. */
  31. #include <ol_htt_api.h>
  32. #include <htt_internal.h>
  33. #include <ol_htt_tx_api.h>
  34. #include <cds_api.h>
  35. #include "hif.h"
  36. #include <cdp_txrx_handle.h>
  37. #include <ol_txrx_peer_find.h>
  38. #define HTT_HTC_PKT_POOL_INIT_SIZE 100 /* enough for a large A-MPDU */
  39. QDF_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
  40. QDF_STATUS(*htt_h2t_rx_ring_rfs_cfg_msg)(struct htt_pdev_t *pdev);
  41. #ifdef IPA_OFFLOAD
  42. static QDF_STATUS htt_ipa_config(htt_pdev_handle pdev, QDF_STATUS status)
  43. {
  44. if ((QDF_STATUS_SUCCESS == status) &&
  45. ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
  46. status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev);
  47. return status;
  48. }
  49. #define HTT_IPA_CONFIG htt_ipa_config
  50. #else
  51. #define HTT_IPA_CONFIG(pdev, status) status /* no-op */
  52. #endif /* IPA_OFFLOAD */
  53. struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
  54. {
  55. struct htt_htc_pkt_union *pkt = NULL;
  56. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  57. if (pdev->htt_htc_pkt_freelist) {
  58. pkt = pdev->htt_htc_pkt_freelist;
  59. pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next;
  60. }
  61. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  62. if (!pkt)
  63. pkt = qdf_mem_malloc(sizeof(*pkt));
  64. if (!pkt)
  65. return NULL;
  66. htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
  67. return &pkt->u.pkt; /* not actually a dereference */
  68. }
  69. void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
  70. {
  71. struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
  72. if (!u_pkt) {
  73. qdf_print("HTC packet is NULL");
  74. return;
  75. }
  76. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  77. htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
  78. u_pkt->u.next = pdev->htt_htc_pkt_freelist;
  79. pdev->htt_htc_pkt_freelist = u_pkt;
  80. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  81. }
  82. void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
  83. {
  84. struct htt_htc_pkt_union *pkt, *next;
  85. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  86. pkt = pdev->htt_htc_pkt_freelist;
  87. pdev->htt_htc_pkt_freelist = NULL;
  88. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  89. while (pkt) {
  90. next = pkt->u.next;
  91. qdf_mem_free(pkt);
  92. pkt = next;
  93. }
  94. }
  95. #ifdef ATH_11AC_TXCOMPACT
  96. void
  97. htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level)
  98. {
  99. struct htt_htc_pkt_union *pkt, *next, *prev = NULL;
  100. int i = 0;
  101. qdf_nbuf_t netbuf;
  102. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  103. pkt = pdev->htt_htc_pkt_misclist;
  104. while (pkt) {
  105. next = pkt->u.next;
  106. /* trim the out grown list*/
  107. if (++i > level) {
  108. netbuf =
  109. (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
  110. qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
  111. qdf_nbuf_free(netbuf);
  112. qdf_mem_free(pkt);
  113. pkt = NULL;
  114. if (prev)
  115. prev->u.next = NULL;
  116. }
  117. prev = pkt;
  118. pkt = next;
  119. }
  120. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  121. }
  122. void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
  123. {
  124. struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
  125. int misclist_trim_level = htc_get_tx_queue_depth(pdev->htc_pdev,
  126. pkt->htc_pkt.Endpoint)
  127. + HTT_HTC_PKT_MISCLIST_SIZE;
  128. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  129. if (pdev->htt_htc_pkt_misclist) {
  130. u_pkt->u.next = pdev->htt_htc_pkt_misclist;
  131. pdev->htt_htc_pkt_misclist = u_pkt;
  132. } else {
  133. pdev->htt_htc_pkt_misclist = u_pkt;
  134. }
  135. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  136. /* only ce pipe size + tx_queue_depth could possibly be in use
  137. * free older packets in the msiclist
  138. */
  139. htt_htc_misc_pkt_list_trim(pdev, misclist_trim_level);
  140. }
  141. void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
  142. {
  143. struct htt_htc_pkt_union *pkt, *next;
  144. qdf_nbuf_t netbuf;
  145. HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
  146. pkt = pdev->htt_htc_pkt_misclist;
  147. pdev->htt_htc_pkt_misclist = NULL;
  148. HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
  149. while (pkt) {
  150. next = pkt->u.next;
  151. if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
  152. HTC_PACKET_MAGIC_COOKIE) {
  153. QDF_ASSERT(0);
  154. pkt = next;
  155. continue;
  156. }
  157. netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
  158. qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
  159. qdf_nbuf_free(netbuf);
  160. qdf_mem_free(pkt);
  161. pkt = next;
  162. }
  163. }
  164. #endif
  165. /* AR6004 don't need HTT layer. */
  166. #ifdef AR6004_HW
  167. #define NO_HTT_NEEDED true
  168. #else
  169. #define NO_HTT_NEEDED false
  170. #endif
  171. #if defined(QCA_TX_HTT2_SUPPORT) && defined(CONFIG_HL_SUPPORT)
  172. /**
  173. * htt_htc_tx_htt2_service_start() - Start TX HTT2 service
  174. *
  175. * @pdev: pointer to htt device.
  176. * @connect_req: pointer to service connection request information
  177. * @connect_resp: pointer to service connection response information
  178. *
  179. *
  180. * Return: None
  181. */
  182. static void
  183. htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
  184. struct htc_service_connect_req *connect_req,
  185. struct htc_service_connect_resp *connect_resp)
  186. {
  187. QDF_STATUS status;
  188. qdf_mem_zero(connect_req, sizeof(struct htc_service_connect_req));
  189. qdf_mem_zero(connect_resp, sizeof(struct htc_service_connect_resp));
  190. /* The same as HTT service but no RX. */
  191. connect_req->EpCallbacks.pContext = pdev;
  192. connect_req->EpCallbacks.EpTxComplete = htt_h2t_send_complete;
  193. connect_req->EpCallbacks.EpSendFull = htt_h2t_full;
  194. connect_req->MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
  195. /* Should NOT support credit flow control. */
  196. connect_req->ConnectionFlags |=
  197. HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  198. /* Enable HTC schedule mechanism for TX HTT2 service. */
  199. connect_req->ConnectionFlags |= HTC_CONNECT_FLAGS_ENABLE_HTC_SCHEDULE;
  200. connect_req->service_id = HTT_DATA2_MSG_SVC;
  201. status = htc_connect_service(pdev->htc_pdev, connect_req, connect_resp);
  202. if (status != QDF_STATUS_SUCCESS) {
  203. pdev->htc_tx_htt2_endpoint = ENDPOINT_UNUSED;
  204. pdev->htc_tx_htt2_max_size = 0;
  205. } else {
  206. pdev->htc_tx_htt2_endpoint = connect_resp->Endpoint;
  207. pdev->htc_tx_htt2_max_size = HTC_TX_HTT2_MAX_SIZE;
  208. }
  209. qdf_print("TX HTT %s, ep %d size %d\n",
  210. (status == QDF_STATUS_SUCCESS ? "ON" : "OFF"),
  211. pdev->htc_tx_htt2_endpoint,
  212. pdev->htc_tx_htt2_max_size);
  213. }
  214. #else
  215. static inline void
  216. htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
  217. struct htc_service_connect_req *connect_req,
  218. struct htc_service_connect_resp *connect_resp)
  219. {
  220. }
  221. #endif
  222. /**
  223. * htt_htc_credit_flow_disable() - disable flow control for
  224. * HTT data message service
  225. *
  226. * @pdev: pointer to htt device.
  227. * @connect_req: pointer to service connection request information
  228. *
  229. * HTC Credit mechanism is disabled based on
  230. * default_tx_comp_req as throughput will be lower
  231. * if we disable htc credit mechanism with default_tx_comp_req
  232. * set since txrx download packet will be limited by ota
  233. * completion.
  234. *
  235. * Return: None
  236. */
  237. static
  238. void htt_htc_credit_flow_disable(struct htt_pdev_t *pdev,
  239. struct htc_service_connect_req *connect_req)
  240. {
  241. if (pdev->osdev->bus_type == QDF_BUS_TYPE_SDIO) {
  242. /*
  243. * TODO:Conditional disabling will be removed once firmware
  244. * with reduced tx completion is pushed into release builds.
  245. */
  246. if (!pdev->cfg.default_tx_comp_req)
  247. connect_req->ConnectionFlags |=
  248. HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  249. } else {
  250. connect_req->ConnectionFlags |=
  251. HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  252. }
  253. }
  254. #if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
  255. /**
  256. * htt_dump_bundle_stats() - dump wlan stats
  257. * @pdev: handle to the HTT instance
  258. *
  259. * Return: None
  260. */
  261. void htt_dump_bundle_stats(htt_pdev_handle pdev)
  262. {
  263. htc_dump_bundle_stats(pdev->htc_pdev);
  264. }
  265. /**
  266. * htt_clear_bundle_stats() - clear wlan stats
  267. * @pdev: handle to the HTT instance
  268. *
  269. * Return: None
  270. */
  271. void htt_clear_bundle_stats(htt_pdev_handle pdev)
  272. {
  273. htc_clear_bundle_stats(pdev->htc_pdev);
  274. }
  275. #endif
  276. #if defined(QCA_WIFI_3_0_ADRASTEA)
  277. /**
  278. * htt_htc_attach_all() - Connect to HTC service for HTT
  279. * @pdev: pdev ptr
  280. *
  281. * Return: 0 for success or error code.
  282. */
  283. #if defined(QCN7605_SUPPORT) && defined(IPA_OFFLOAD)
  284. /* In case of QCN7605 with IPA offload only 2 CE
  285. * are used for RFS
  286. */
  287. static int
  288. htt_htc_attach_all(struct htt_pdev_t *pdev)
  289. {
  290. if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC))
  291. goto flush_endpoint;
  292. if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC))
  293. goto flush_endpoint;
  294. return 0;
  295. flush_endpoint:
  296. htc_flush_endpoint(pdev->htc_pdev, ENDPOINT_0, HTC_TX_PACKET_TAG_ALL);
  297. return -EIO;
  298. }
  299. #else
  300. static int
  301. htt_htc_attach_all(struct htt_pdev_t *pdev)
  302. {
  303. if (htt_htc_attach(pdev, HTT_DATA_MSG_SVC))
  304. goto flush_endpoint;
  305. if (htt_htc_attach(pdev, HTT_DATA2_MSG_SVC))
  306. goto flush_endpoint;
  307. if (htt_htc_attach(pdev, HTT_DATA3_MSG_SVC))
  308. goto flush_endpoint;
  309. return 0;
  310. flush_endpoint:
  311. htc_flush_endpoint(pdev->htc_pdev, ENDPOINT_0, HTC_TX_PACKET_TAG_ALL);
  312. return -EIO;
  313. }
  314. #endif
  315. #else
  316. /**
  317. * htt_htc_attach_all() - Connect to HTC service for HTT
  318. * @pdev: pdev ptr
  319. *
  320. * Return: 0 for success or error code.
  321. */
  322. static int
  323. htt_htc_attach_all(struct htt_pdev_t *pdev)
  324. {
  325. return htt_htc_attach(pdev, HTT_DATA_MSG_SVC);
  326. }
  327. #endif
  328. /**
  329. * htt_pdev_alloc() - allocate HTT pdev
  330. * @txrx_pdev: txrx pdev
  331. * @ctrl_pdev: cfg pdev
  332. * @htc_pdev: HTC pdev
  333. * @osdev: os device
  334. *
  335. * Return: HTT pdev handle
  336. */
  337. htt_pdev_handle
  338. htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
  339. struct cdp_cfg *ctrl_pdev,
  340. HTC_HANDLE htc_pdev, qdf_device_t osdev)
  341. {
  342. struct htt_pdev_t *pdev;
  343. struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
  344. if (!osc)
  345. goto fail1;
  346. pdev = qdf_mem_malloc(sizeof(*pdev));
  347. if (!pdev)
  348. goto fail1;
  349. pdev->osdev = osdev;
  350. pdev->ctrl_pdev = ctrl_pdev;
  351. pdev->txrx_pdev = txrx_pdev;
  352. pdev->htc_pdev = htc_pdev;
  353. pdev->htt_htc_pkt_freelist = NULL;
  354. #ifdef ATH_11AC_TXCOMPACT
  355. pdev->htt_htc_pkt_misclist = NULL;
  356. #endif
  357. /* for efficiency, store a local copy of the is_high_latency flag */
  358. pdev->cfg.is_high_latency = ol_cfg_is_high_latency(pdev->ctrl_pdev);
  359. /*
  360. * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
  361. * enabled or not.
  362. */
  363. pdev->cfg.credit_update_enabled =
  364. ol_cfg_is_credit_update_enabled(pdev->ctrl_pdev);
  365. pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
  366. cds_is_packet_log_enabled();
  367. pdev->cfg.default_tx_comp_req =
  368. !ol_cfg_tx_free_at_download(pdev->ctrl_pdev);
  369. pdev->cfg.is_full_reorder_offload =
  370. ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev);
  371. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
  372. "full_reorder_offloaded %d",
  373. (int)pdev->cfg.is_full_reorder_offload);
  374. pdev->cfg.ce_classify_enabled =
  375. ol_cfg_is_ce_classify_enabled(ctrl_pdev);
  376. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
  377. "ce_classify %d",
  378. pdev->cfg.ce_classify_enabled);
  379. if (pdev->cfg.is_high_latency) {
  380. qdf_atomic_init(&pdev->htt_tx_credit.target_delta);
  381. qdf_atomic_init(&pdev->htt_tx_credit.bus_delta);
  382. qdf_atomic_add(HTT_MAX_BUS_CREDIT,
  383. &pdev->htt_tx_credit.bus_delta);
  384. }
  385. pdev->targetdef = htc_get_targetdef(htc_pdev);
  386. #if defined(HELIUMPLUS)
  387. HTT_SET_WIFI_IP(pdev, 2, 0);
  388. #endif /* defined(HELIUMPLUS) */
  389. if (NO_HTT_NEEDED)
  390. goto success;
  391. /*
  392. * Connect to HTC service.
  393. * This has to be done before calling htt_rx_attach,
  394. * since htt_rx_attach involves sending a rx ring configure
  395. * message to the target.
  396. */
  397. HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex);
  398. HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);
  399. HTT_TX_MUTEX_INIT(&pdev->credit_mutex);
  400. if (htt_htc_attach_all(pdev))
  401. goto htt_htc_attach_fail;
  402. if (hif_ce_fastpath_cb_register(osc, htt_t2h_msg_handler_fast, pdev))
  403. qdf_print("failed to register fastpath callback\n");
  404. success:
  405. return pdev;
  406. htt_htc_attach_fail:
  407. HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex);
  408. HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
  409. HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
  410. qdf_mem_free(pdev);
  411. fail1:
  412. return NULL;
  413. }
  414. /**
  415. * htt_attach() - Allocate and setup HTT TX/RX descriptors
  416. * @pdev: pdev ptr
  417. * @desc_pool_size: size of tx descriptors
  418. *
  419. * Return: 0 for success or error code.
  420. */
  421. int
  422. htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
  423. {
  424. int i;
  425. int ret = 0;
  426. pdev->is_ipa_uc_enabled = false;
  427. if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
  428. pdev->is_ipa_uc_enabled = true;
  429. pdev->new_htt_format_enabled = false;
  430. if (ol_cfg_is_htt_new_format_enabled(pdev->ctrl_pdev))
  431. pdev->new_htt_format_enabled = true;
  432. htc_enable_hdr_length_check(pdev->htc_pdev,
  433. pdev->new_htt_format_enabled);
  434. ret = htt_tx_attach(pdev, desc_pool_size);
  435. if (ret)
  436. goto fail1;
  437. ret = htt_rx_attach(pdev);
  438. if (ret)
  439. goto fail2;
  440. /* pre-allocate some HTC_PACKET objects */
  441. for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
  442. struct htt_htc_pkt_union *pkt;
  443. pkt = qdf_mem_malloc(sizeof(*pkt));
  444. if (!pkt)
  445. break;
  446. htt_htc_pkt_free(pdev, &pkt->u.pkt);
  447. }
  448. if (pdev->cfg.is_high_latency) {
  449. /*
  450. * HL - download the whole frame.
  451. * Specify a download length greater than the max MSDU size,
  452. * so the downloads will be limited by the actual frame sizes.
  453. */
  454. pdev->download_len = 5000;
  455. if (ol_cfg_tx_free_at_download(pdev->ctrl_pdev) &&
  456. !pdev->cfg.request_tx_comp)
  457. pdev->tx_send_complete_part2 =
  458. ol_tx_download_done_hl_free;
  459. else
  460. pdev->tx_send_complete_part2 =
  461. ol_tx_download_done_hl_retain;
  462. /*
  463. * CHECK THIS LATER: does the HL HTT version of
  464. * htt_rx_mpdu_desc_list_next
  465. * (which is not currently implemented) present the
  466. * adf_nbuf_data(rx_ind_msg)
  467. * as the abstract rx descriptor?
  468. * If not, the rx_fw_desc_offset initialization
  469. * here will have to be adjusted accordingly.
  470. * NOTE: for HL, because fw rx desc is in ind msg,
  471. * not in rx desc, so the
  472. * offset should be negtive value
  473. */
  474. pdev->rx_fw_desc_offset =
  475. HTT_ENDIAN_BYTE_IDX_SWAP(
  476. HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET
  477. - HTT_RX_IND_HL_BYTES);
  478. htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_hl;
  479. htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_hl;
  480. /* initialize the txrx credit count */
  481. ol_tx_target_credit_update(
  482. pdev->txrx_pdev, ol_cfg_target_tx_credit(
  483. pdev->ctrl_pdev));
  484. DPTRACE(qdf_dp_trace_credit_record(QDF_HTT_ATTACH,
  485. QDF_CREDIT_INC,
  486. ol_cfg_target_tx_credit(pdev->ctrl_pdev),
  487. qdf_atomic_read(&pdev->txrx_pdev->target_tx_credit),
  488. qdf_atomic_read(&pdev->txrx_pdev->txq_grps[0].credit),
  489. qdf_atomic_read(&pdev->txrx_pdev->txq_grps[1].credit)));
  490. } else {
  491. enum wlan_frm_fmt frm_type;
  492. /*
  493. * LL - download just the initial portion of the frame.
  494. * Download enough to cover the encapsulation headers checked
  495. * by the target's tx classification descriptor engine.
  496. *
  497. * For LL, the FW rx desc directly referenced at its location
  498. * inside the rx indication message.
  499. */
  500. /* account for the 802.3 or 802.11 header */
  501. frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
  502. if (frm_type == wlan_frm_fmt_native_wifi) {
  503. pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
  504. } else if (frm_type == wlan_frm_fmt_802_3) {
  505. pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
  506. } else {
  507. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  508. "Unexpected frame type spec: %d", frm_type);
  509. HTT_ASSERT0(0);
  510. }
  511. /*
  512. * Account for the optional L2 / ethernet header fields:
  513. * 802.1Q, LLC/SNAP
  514. */
  515. pdev->download_len +=
  516. HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
  517. /*
  518. * Account for the portion of the L3 (IP) payload that the
  519. * target needs for its tx classification.
  520. */
  521. pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
  522. /*
  523. * Account for the HTT tx descriptor, including the
  524. * HTC header + alignment padding.
  525. */
  526. pdev->download_len += sizeof(struct htt_host_tx_desc_t);
  527. /*
  528. * The TXCOMPACT htt_tx_sched function uses pdev->download_len
  529. * to apply for all requeued tx frames. Thus,
  530. * pdev->download_len has to be the largest download length of
  531. * any tx frame that will be downloaded.
  532. * This maximum download length is for management tx frames,
  533. * which have an 802.11 header.
  534. */
  535. #ifdef ATH_11AC_TXCOMPACT
  536. pdev->download_len = sizeof(struct htt_host_tx_desc_t)
  537. + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
  538. + HTT_TX_HDR_SIZE_802_1Q
  539. + HTT_TX_HDR_SIZE_LLC_SNAP
  540. + ol_cfg_tx_download_size(pdev->ctrl_pdev);
  541. #endif
  542. pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
  543. /*
  544. * For LL, the FW rx desc is alongside the HW rx desc fields in
  545. * the htt_host_rx_desc_base struct/.
  546. */
  547. pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
  548. htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
  549. htt_h2t_rx_ring_rfs_cfg_msg = htt_h2t_rx_ring_rfs_cfg_msg_ll;
  550. }
  551. return 0;
  552. fail2:
  553. htt_tx_detach(pdev);
  554. fail1:
  555. return ret;
  556. }
  557. QDF_STATUS htt_attach_target(htt_pdev_handle pdev)
  558. {
  559. QDF_STATUS status;
  560. status = htt_h2t_ver_req_msg(pdev);
  561. if (status != QDF_STATUS_SUCCESS) {
  562. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  563. "%s:%d: could not send h2t_ver_req msg",
  564. __func__, __LINE__);
  565. return status;
  566. }
  567. #if defined(HELIUMPLUS)
  568. /*
  569. * Send the frag_desc info to target.
  570. */
  571. status = htt_h2t_frag_desc_bank_cfg_msg(pdev);
  572. if (status != QDF_STATUS_SUCCESS) {
  573. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  574. "%s:%d: could not send h2t_frag_desc_bank_cfg msg",
  575. __func__, __LINE__);
  576. return status;
  577. }
  578. #endif /* defined(HELIUMPLUS) */
  579. /*
  580. * If applicable, send the rx ring config message to the target.
  581. * The host could wait for the HTT version number confirmation message
  582. * from the target before sending any further HTT messages, but it's
  583. * reasonable to assume that the host and target HTT version numbers
  584. * match, and proceed immediately with the remaining configuration
  585. * handshaking.
  586. */
  587. status = htt_h2t_rx_ring_rfs_cfg_msg(pdev);
  588. if (status != QDF_STATUS_SUCCESS) {
  589. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  590. "%s:%d: could not send h2t_rx_ring_rfs_cfg msg",
  591. __func__, __LINE__);
  592. return status;
  593. }
  594. status = htt_h2t_rx_ring_cfg_msg(pdev);
  595. if (status != QDF_STATUS_SUCCESS) {
  596. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  597. "%s:%d: could not send h2t_rx_ring_cfg msg",
  598. __func__, __LINE__);
  599. return status;
  600. }
  601. status = HTT_IPA_CONFIG(pdev, status);
  602. if (status != QDF_STATUS_SUCCESS) {
  603. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  604. "%s:%d: could not send h2t_ipa_uc_rsc_cfg msg",
  605. __func__, __LINE__);
  606. return status;
  607. }
  608. return status;
  609. }
  610. void htt_detach(htt_pdev_handle pdev)
  611. {
  612. htt_rx_detach(pdev);
  613. htt_tx_detach(pdev);
  614. htt_htc_pkt_pool_free(pdev);
  615. #ifdef ATH_11AC_TXCOMPACT
  616. htt_htc_misc_pkt_pool_free(pdev);
  617. #endif
  618. HTT_TX_MUTEX_DESTROY(&pdev->credit_mutex);
  619. HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
  620. HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
  621. }
  622. /**
  623. * htt_pdev_free() - Free HTT pdev
  624. * @pdev: htt pdev
  625. *
  626. * Return: none
  627. */
  628. void htt_pdev_free(htt_pdev_handle pdev)
  629. {
  630. qdf_mem_free(pdev);
  631. }
  632. void htt_detach_target(htt_pdev_handle pdev)
  633. {
  634. }
  635. static inline
  636. int htt_update_endpoint(struct htt_pdev_t *pdev,
  637. uint16_t service_id, HTC_ENDPOINT_ID ep)
  638. {
  639. struct hif_opaque_softc *hif_ctx;
  640. uint8_t ul = 0xff, dl = 0xff;
  641. int ul_polled, dl_polled;
  642. int tx_service = 0;
  643. int rc = 0;
  644. hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
  645. if (qdf_unlikely(!hif_ctx)) {
  646. QDF_ASSERT(hif_ctx);
  647. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  648. "%s:%d: assuming non-tx service.",
  649. __func__, __LINE__);
  650. } else {
  651. ul = dl = 0xff;
  652. if (QDF_STATUS_SUCCESS !=
  653. hif_map_service_to_pipe(hif_ctx, service_id,
  654. &ul, &dl,
  655. &ul_polled, &dl_polled))
  656. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
  657. "%s:%d: assuming non-tx srv.",
  658. __func__, __LINE__);
  659. else
  660. tx_service = (ul != 0xff);
  661. }
  662. if (tx_service) {
  663. /* currently we have only one OUT htt tx service */
  664. QDF_BUG(service_id == HTT_DATA_MSG_SVC);
  665. pdev->htc_tx_endpoint = ep;
  666. hif_save_htc_htt_config_endpoint(hif_ctx, ep);
  667. rc = 1;
  668. }
  669. return rc;
  670. }
  671. int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id)
  672. {
  673. struct htc_service_connect_req connect;
  674. struct htc_service_connect_resp response;
  675. QDF_STATUS status;
  676. qdf_mem_zero(&connect, sizeof(connect));
  677. qdf_mem_zero(&response, sizeof(response));
  678. connect.pMetaData = NULL;
  679. connect.MetaDataLength = 0;
  680. connect.EpCallbacks.pContext = pdev;
  681. connect.EpCallbacks.EpTxComplete = htt_h2t_send_complete;
  682. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  683. connect.EpCallbacks.EpRecv = htt_t2h_msg_handler;
  684. connect.EpCallbacks.ep_resume_tx_queue = htt_tx_resume_handler;
  685. connect.EpCallbacks.ep_padding_credit_update =
  686. htt_tx_padding_credit_update_handler;
  687. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  688. connect.EpCallbacks.EpRecvRefill = NULL;
  689. connect.EpCallbacks.RecvRefillWaterMark = 1;
  690. /* N/A, fill is done by HIF */
  691. connect.EpCallbacks.EpSendFull = htt_h2t_full;
  692. /*
  693. * Specify how deep to let a queue get before htc_send_pkt will
  694. * call the EpSendFull function due to excessive send queue depth.
  695. */
  696. connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
  697. /* disable flow control for HTT data message service */
  698. htt_htc_credit_flow_disable(pdev, &connect);
  699. /* connect to control service */
  700. connect.service_id = service_id;
  701. status = htc_connect_service(pdev->htc_pdev, &connect, &response);
  702. if (status != QDF_STATUS_SUCCESS) {
  703. if (cds_is_fw_down())
  704. return -EIO;
  705. if (status == QDF_STATUS_E_NOMEM ||
  706. cds_is_self_recovery_enabled())
  707. return qdf_status_to_os_return(status);
  708. QDF_BUG(0);
  709. }
  710. htt_update_endpoint(pdev, service_id, response.Endpoint);
  711. /* Start TX HTT2 service if the target support it. */
  712. htt_htc_tx_htt2_service_start(pdev, &connect, &response);
  713. return 0; /* success */
  714. }
  715. void htt_log_rx_ring_info(htt_pdev_handle pdev)
  716. {
  717. if (!pdev) {
  718. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  719. "%s: htt pdev is NULL", __func__);
  720. return;
  721. }
  722. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG,
  723. "%s: Data Stall Detected with reason 4 (=FW_RX_REFILL_FAILED)."
  724. "src htt rx ring: space for %d elements, filled with %d buffers, buffers in the ring %d, refill debt %d",
  725. __func__, pdev->rx_ring.size, pdev->rx_ring.fill_level,
  726. pdev->rx_ring.fill_cnt,
  727. qdf_atomic_read(&pdev->rx_ring.refill_debt));
  728. }
  729. #if HTT_DEBUG_LEVEL > 5
  730. void htt_display(htt_pdev_handle pdev, int indent)
  731. {
  732. qdf_print("%*s%s:\n", indent, " ", "HTT");
  733. qdf_print("%*stx desc pool: %d elems of %d bytes, %d allocated\n",
  734. indent + 4, " ",
  735. pdev->tx_descs.pool_elems,
  736. pdev->tx_descs.size, pdev->tx_descs.alloc_cnt);
  737. qdf_print("%*srx ring: space for %d elems, filled with %d buffers\n",
  738. indent + 4, " ",
  739. pdev->rx_ring.size, pdev->rx_ring.fill_level);
  740. qdf_print("%*sat %pK (%llx paddr)\n", indent + 8, " ",
  741. pdev->rx_ring.buf.paddrs_ring,
  742. (unsigned long long)pdev->rx_ring.base_paddr);
  743. qdf_print("%*snetbuf ring @ %pK\n", indent + 8, " ",
  744. pdev->rx_ring.buf.netbufs_ring);
  745. qdf_print("%*sFW_IDX shadow register: vaddr = %pK, paddr = %llx\n",
  746. indent + 8, " ",
  747. pdev->rx_ring.alloc_idx.vaddr,
  748. (unsigned long long)pdev->rx_ring.alloc_idx.paddr);
  749. qdf_print("%*sSW enqueue idx= %d, SW dequeue idx: desc= %d, buf= %d\n",
  750. indent + 8, " ", *pdev->rx_ring.alloc_idx.vaddr,
  751. pdev->rx_ring.sw_rd_idx.msdu_desc,
  752. pdev->rx_ring.sw_rd_idx.msdu_payld);
  753. }
  754. #endif
  755. #ifdef IPA_OFFLOAD
  756. /**
  757. * htt_ipa_uc_attach() - Allocate UC data path resources
  758. * @pdev: handle to the HTT instance
  759. *
  760. * Return: 0 success
  761. * none 0 fail
  762. */
  763. int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
  764. {
  765. int error;
  766. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
  767. __func__);
  768. /* TX resource attach */
  769. error = htt_tx_ipa_uc_attach(
  770. pdev,
  771. ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
  772. ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev),
  773. ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev));
  774. if (error) {
  775. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  776. "HTT IPA UC TX attach fail code %d", error);
  777. HTT_ASSERT0(0);
  778. return error;
  779. }
  780. /* RX resource attach */
  781. error = htt_rx_ipa_uc_attach(
  782. pdev, qdf_get_pwr2(pdev->rx_ring.fill_level));
  783. if (error) {
  784. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  785. "HTT IPA UC RX attach fail code %d", error);
  786. htt_tx_ipa_uc_detach(pdev);
  787. HTT_ASSERT0(0);
  788. return error;
  789. }
  790. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit",
  791. __func__);
  792. return 0; /* success */
  793. }
  794. /**
  795. * htt_ipa_uc_attach() - Remove UC data path resources
  796. * @pdev: handle to the HTT instance
  797. *
  798. * Return: None
  799. */
  800. void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
  801. {
  802. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
  803. __func__);
  804. /* TX IPA micro controller detach */
  805. htt_tx_ipa_uc_detach(pdev);
  806. /* RX IPA micro controller detach */
  807. htt_rx_ipa_uc_detach(pdev);
  808. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit",
  809. __func__);
  810. }
  811. int
  812. htt_ipa_uc_get_resource(htt_pdev_handle pdev,
  813. qdf_shared_mem_t **ce_sr,
  814. qdf_shared_mem_t **tx_comp_ring,
  815. qdf_shared_mem_t **rx_rdy_ring,
  816. qdf_shared_mem_t **rx2_rdy_ring,
  817. qdf_shared_mem_t **rx_proc_done_idx,
  818. qdf_shared_mem_t **rx2_proc_done_idx,
  819. uint32_t *ce_sr_ring_size,
  820. qdf_dma_addr_t *ce_reg_paddr,
  821. uint32_t *tx_num_alloc_buffer)
  822. {
  823. /* Release allocated resource to client */
  824. *tx_comp_ring = pdev->ipa_uc_tx_rsc.tx_comp_ring;
  825. *rx_rdy_ring = pdev->ipa_uc_rx_rsc.rx_ind_ring;
  826. *rx2_rdy_ring = pdev->ipa_uc_rx_rsc.rx2_ind_ring;
  827. *rx_proc_done_idx = pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx;
  828. *rx2_proc_done_idx = pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx;
  829. *tx_num_alloc_buffer = (uint32_t)pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  830. /* Get copy engine, bus resource */
  831. htc_ipa_get_ce_resource(pdev->htc_pdev, ce_sr,
  832. ce_sr_ring_size, ce_reg_paddr);
  833. return 0;
  834. }
  835. /**
  836. * htt_ipa_uc_set_doorbell_paddr() - Propagate IPA doorbell address
  837. * @pdev: handle to the HTT instance
  838. * @ipa_uc_tx_doorbell_paddr: TX doorbell base physical address
  839. * @ipa_uc_rx_doorbell_paddr: RX doorbell base physical address
  840. *
  841. * Return: 0 success
  842. */
  843. int
  844. htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
  845. qdf_dma_addr_t ipa_uc_tx_doorbell_paddr,
  846. qdf_dma_addr_t ipa_uc_rx_doorbell_paddr)
  847. {
  848. pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr;
  849. pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr;
  850. return 0;
  851. }
  852. #endif /* IPA_OFFLOAD */
  853. /**
  854. * htt_mark_first_wakeup_packet() - set flag to indicate that
  855. * fw is compatible for marking first packet after wow wakeup
  856. * @pdev: pointer to htt pdev
  857. * @value: 1 for enabled/ 0 for disabled
  858. *
  859. * Return: None
  860. */
  861. void htt_mark_first_wakeup_packet(htt_pdev_handle pdev,
  862. uint8_t value)
  863. {
  864. if (!pdev) {
  865. QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
  866. "%s: htt pdev is NULL", __func__);
  867. return;
  868. }
  869. pdev->cfg.is_first_wakeup_packet = value;
  870. }