htc.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (c) 2005-2011 Atheros Communications Inc.
  4. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  5. */
  6. #include "core.h"
  7. #include "hif.h"
  8. #include "debug.h"
  9. /********/
  10. /* Send */
  11. /********/
  12. static void ath10k_htc_control_tx_complete(struct ath10k *ar,
  13. struct sk_buff *skb)
  14. {
  15. kfree_skb(skb);
  16. }
  17. static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
  18. {
  19. struct sk_buff *skb;
  20. struct ath10k_skb_cb *skb_cb;
  21. skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
  22. if (!skb)
  23. return NULL;
  24. skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
  25. WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  26. skb_cb = ATH10K_SKB_CB(skb);
  27. memset(skb_cb, 0, sizeof(*skb_cb));
  28. ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
  29. return skb;
  30. }
  31. static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
  32. struct sk_buff *skb)
  33. {
  34. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  35. if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
  36. dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
  37. skb_pull(skb, sizeof(struct ath10k_htc_hdr));
  38. }
  39. void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
  40. struct sk_buff *skb)
  41. {
  42. struct ath10k *ar = ep->htc->ar;
  43. struct ath10k_htc_hdr *hdr;
  44. ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
  45. ep->eid, skb);
  46. /* A corner case where the copy completion is reaching to host but still
  47. * copy engine is processing it due to which host unmaps corresponding
  48. * memory and causes SMMU fault, hence as workaround adding delay
  49. * the unmapping memory to avoid SMMU faults.
  50. */
  51. if (ar->hw_params.delay_unmap_buffer &&
  52. ep->ul_pipe_id == 3)
  53. mdelay(2);
  54. hdr = (struct ath10k_htc_hdr *)skb->data;
  55. ath10k_htc_restore_tx_skb(ep->htc, skb);
  56. if (!ep->ep_ops.ep_tx_complete) {
  57. ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
  58. dev_kfree_skb_any(skb);
  59. return;
  60. }
  61. if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
  62. dev_kfree_skb_any(skb);
  63. return;
  64. }
  65. ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
  66. }
  67. EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
  68. static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
  69. struct sk_buff *skb)
  70. {
  71. struct ath10k_htc_hdr *hdr;
  72. hdr = (struct ath10k_htc_hdr *)skb->data;
  73. memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
  74. hdr->eid = ep->eid;
  75. hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
  76. hdr->flags = 0;
  77. if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
  78. hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
  79. spin_lock_bh(&ep->htc->tx_lock);
  80. hdr->seq_no = ep->seq_no++;
  81. spin_unlock_bh(&ep->htc->tx_lock);
  82. }
  83. static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
  84. unsigned int len,
  85. bool consume)
  86. {
  87. struct ath10k_htc *htc = ep->htc;
  88. struct ath10k *ar = htc->ar;
  89. enum ath10k_htc_ep_id eid = ep->eid;
  90. int credits, ret = 0;
  91. if (!ep->tx_credit_flow_enabled)
  92. return 0;
  93. credits = DIV_ROUND_UP(len, ep->tx_credit_size);
  94. spin_lock_bh(&htc->tx_lock);
  95. if (ep->tx_credits < credits) {
  96. ath10k_dbg(ar, ATH10K_DBG_HTC,
  97. "htc insufficient credits ep %d required %d available %d consume %d\n",
  98. eid, credits, ep->tx_credits, consume);
  99. ret = -EAGAIN;
  100. goto unlock;
  101. }
  102. if (consume) {
  103. ep->tx_credits -= credits;
  104. ath10k_dbg(ar, ATH10K_DBG_HTC,
  105. "htc ep %d consumed %d credits total %d\n",
  106. eid, credits, ep->tx_credits);
  107. }
  108. unlock:
  109. spin_unlock_bh(&htc->tx_lock);
  110. return ret;
  111. }
  112. static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
  113. {
  114. struct ath10k_htc *htc = ep->htc;
  115. struct ath10k *ar = htc->ar;
  116. enum ath10k_htc_ep_id eid = ep->eid;
  117. int credits;
  118. if (!ep->tx_credit_flow_enabled)
  119. return;
  120. credits = DIV_ROUND_UP(len, ep->tx_credit_size);
  121. spin_lock_bh(&htc->tx_lock);
  122. ep->tx_credits += credits;
  123. ath10k_dbg(ar, ATH10K_DBG_HTC,
  124. "htc ep %d reverted %d credits back total %d\n",
  125. eid, credits, ep->tx_credits);
  126. spin_unlock_bh(&htc->tx_lock);
  127. if (ep->ep_ops.ep_tx_credits)
  128. ep->ep_ops.ep_tx_credits(htc->ar);
  129. }
  130. int ath10k_htc_send(struct ath10k_htc *htc,
  131. enum ath10k_htc_ep_id eid,
  132. struct sk_buff *skb)
  133. {
  134. struct ath10k *ar = htc->ar;
  135. struct ath10k_htc_ep *ep = &htc->endpoint[eid];
  136. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  137. struct ath10k_hif_sg_item sg_item;
  138. struct device *dev = htc->ar->dev;
  139. int ret;
  140. unsigned int skb_len;
  141. if (htc->ar->state == ATH10K_STATE_WEDGED)
  142. return -ECOMM;
  143. if (eid >= ATH10K_HTC_EP_COUNT) {
  144. ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
  145. return -ENOENT;
  146. }
  147. skb_push(skb, sizeof(struct ath10k_htc_hdr));
  148. skb_len = skb->len;
  149. ret = ath10k_htc_consume_credit(ep, skb_len, true);
  150. if (ret)
  151. goto err_pull;
  152. ath10k_htc_prepare_tx_skb(ep, skb);
  153. skb_cb->eid = eid;
  154. if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
  155. skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
  156. DMA_TO_DEVICE);
  157. ret = dma_mapping_error(dev, skb_cb->paddr);
  158. if (ret) {
  159. ret = -EIO;
  160. goto err_credits;
  161. }
  162. }
  163. sg_item.transfer_id = ep->eid;
  164. sg_item.transfer_context = skb;
  165. sg_item.vaddr = skb->data;
  166. sg_item.paddr = skb_cb->paddr;
  167. sg_item.len = skb->len;
  168. ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
  169. if (ret)
  170. goto err_unmap;
  171. return 0;
  172. err_unmap:
  173. if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
  174. dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
  175. err_credits:
  176. ath10k_htc_release_credit(ep, skb_len);
  177. err_pull:
  178. skb_pull(skb, sizeof(struct ath10k_htc_hdr));
  179. return ret;
  180. }
  181. void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
  182. {
  183. struct ath10k_htc *htc = &ar->htc;
  184. struct ath10k_skb_cb *skb_cb;
  185. struct ath10k_htc_ep *ep;
  186. if (WARN_ON_ONCE(!skb))
  187. return;
  188. skb_cb = ATH10K_SKB_CB(skb);
  189. ep = &htc->endpoint[skb_cb->eid];
  190. ath10k_htc_notify_tx_completion(ep, skb);
  191. /* the skb now belongs to the completion handler */
  192. }
  193. EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
  194. /***********/
  195. /* Receive */
  196. /***********/
  197. static void
  198. ath10k_htc_process_credit_report(struct ath10k_htc *htc,
  199. const struct ath10k_htc_credit_report *report,
  200. int len,
  201. enum ath10k_htc_ep_id eid)
  202. {
  203. struct ath10k *ar = htc->ar;
  204. struct ath10k_htc_ep *ep;
  205. int i, n_reports;
  206. if (len % sizeof(*report))
  207. ath10k_warn(ar, "Uneven credit report len %d", len);
  208. n_reports = len / sizeof(*report);
  209. spin_lock_bh(&htc->tx_lock);
  210. for (i = 0; i < n_reports; i++, report++) {
  211. if (report->eid >= ATH10K_HTC_EP_COUNT)
  212. break;
  213. ep = &htc->endpoint[report->eid];
  214. ep->tx_credits += report->credits;
  215. ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
  216. report->eid, report->credits, ep->tx_credits);
  217. if (ep->ep_ops.ep_tx_credits) {
  218. spin_unlock_bh(&htc->tx_lock);
  219. ep->ep_ops.ep_tx_credits(htc->ar);
  220. spin_lock_bh(&htc->tx_lock);
  221. }
  222. }
  223. spin_unlock_bh(&htc->tx_lock);
  224. }
  225. static int
  226. ath10k_htc_process_lookahead(struct ath10k_htc *htc,
  227. const struct ath10k_htc_lookahead_report *report,
  228. int len,
  229. enum ath10k_htc_ep_id eid,
  230. void *next_lookaheads,
  231. int *next_lookaheads_len)
  232. {
  233. struct ath10k *ar = htc->ar;
  234. /* Invalid lookahead flags are actually transmitted by
  235. * the target in the HTC control message.
  236. * Since this will happen at every boot we silently ignore
  237. * the lookahead in this case
  238. */
  239. if (report->pre_valid != ((~report->post_valid) & 0xFF))
  240. return 0;
  241. if (next_lookaheads && next_lookaheads_len) {
  242. ath10k_dbg(ar, ATH10K_DBG_HTC,
  243. "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
  244. report->pre_valid, report->post_valid);
  245. /* look ahead bytes are valid, copy them over */
  246. memcpy((u8 *)next_lookaheads, report->lookahead, 4);
  247. *next_lookaheads_len = 1;
  248. }
  249. return 0;
  250. }
  251. static int
  252. ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
  253. const struct ath10k_htc_lookahead_bundle *report,
  254. int len,
  255. enum ath10k_htc_ep_id eid,
  256. void *next_lookaheads,
  257. int *next_lookaheads_len)
  258. {
  259. struct ath10k *ar = htc->ar;
  260. int bundle_cnt = len / sizeof(*report);
  261. if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
  262. ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
  263. bundle_cnt);
  264. return -EINVAL;
  265. }
  266. if (next_lookaheads && next_lookaheads_len) {
  267. int i;
  268. for (i = 0; i < bundle_cnt; i++) {
  269. memcpy(((u8 *)next_lookaheads) + 4 * i,
  270. report->lookahead, 4);
  271. report++;
  272. }
  273. *next_lookaheads_len = bundle_cnt;
  274. }
  275. return 0;
  276. }
  277. int ath10k_htc_process_trailer(struct ath10k_htc *htc,
  278. u8 *buffer,
  279. int length,
  280. enum ath10k_htc_ep_id src_eid,
  281. void *next_lookaheads,
  282. int *next_lookaheads_len)
  283. {
  284. struct ath10k_htc_lookahead_bundle *bundle;
  285. struct ath10k *ar = htc->ar;
  286. int status = 0;
  287. struct ath10k_htc_record *record;
  288. u8 *orig_buffer;
  289. int orig_length;
  290. size_t len;
  291. orig_buffer = buffer;
  292. orig_length = length;
  293. while (length > 0) {
  294. record = (struct ath10k_htc_record *)buffer;
  295. if (length < sizeof(record->hdr)) {
  296. status = -EINVAL;
  297. break;
  298. }
  299. if (record->hdr.len > length) {
  300. /* no room left in buffer for record */
  301. ath10k_warn(ar, "Invalid record length: %d\n",
  302. record->hdr.len);
  303. status = -EINVAL;
  304. break;
  305. }
  306. switch (record->hdr.id) {
  307. case ATH10K_HTC_RECORD_CREDITS:
  308. len = sizeof(struct ath10k_htc_credit_report);
  309. if (record->hdr.len < len) {
  310. ath10k_warn(ar, "Credit report too long\n");
  311. status = -EINVAL;
  312. break;
  313. }
  314. ath10k_htc_process_credit_report(htc,
  315. record->credit_report,
  316. record->hdr.len,
  317. src_eid);
  318. break;
  319. case ATH10K_HTC_RECORD_LOOKAHEAD:
  320. len = sizeof(struct ath10k_htc_lookahead_report);
  321. if (record->hdr.len < len) {
  322. ath10k_warn(ar, "Lookahead report too long\n");
  323. status = -EINVAL;
  324. break;
  325. }
  326. status = ath10k_htc_process_lookahead(htc,
  327. record->lookahead_report,
  328. record->hdr.len,
  329. src_eid,
  330. next_lookaheads,
  331. next_lookaheads_len);
  332. break;
  333. case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
  334. bundle = record->lookahead_bundle;
  335. status = ath10k_htc_process_lookahead_bundle(htc,
  336. bundle,
  337. record->hdr.len,
  338. src_eid,
  339. next_lookaheads,
  340. next_lookaheads_len);
  341. break;
  342. default:
  343. ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
  344. record->hdr.id, record->hdr.len);
  345. break;
  346. }
  347. if (status)
  348. break;
  349. /* multiple records may be present in a trailer */
  350. buffer += sizeof(record->hdr) + record->hdr.len;
  351. length -= sizeof(record->hdr) + record->hdr.len;
  352. }
  353. if (status)
  354. ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
  355. orig_buffer, orig_length);
  356. return status;
  357. }
  358. EXPORT_SYMBOL(ath10k_htc_process_trailer);
  359. void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
  360. {
  361. int status = 0;
  362. struct ath10k_htc *htc = &ar->htc;
  363. struct ath10k_htc_hdr *hdr;
  364. struct ath10k_htc_ep *ep;
  365. u16 payload_len;
  366. u32 trailer_len = 0;
  367. size_t min_len;
  368. u8 eid;
  369. bool trailer_present;
  370. hdr = (struct ath10k_htc_hdr *)skb->data;
  371. skb_pull(skb, sizeof(*hdr));
  372. eid = hdr->eid;
  373. if (eid >= ATH10K_HTC_EP_COUNT) {
  374. ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
  375. ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
  376. hdr, sizeof(*hdr));
  377. goto out;
  378. }
  379. ep = &htc->endpoint[eid];
  380. if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
  381. ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
  382. goto out;
  383. }
  384. payload_len = __le16_to_cpu(hdr->len);
  385. if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
  386. ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
  387. payload_len + sizeof(*hdr));
  388. ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
  389. hdr, sizeof(*hdr));
  390. goto out;
  391. }
  392. if (skb->len < payload_len) {
  393. ath10k_dbg(ar, ATH10K_DBG_HTC,
  394. "HTC Rx: insufficient length, got %d, expected %d\n",
  395. skb->len, payload_len);
  396. ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
  397. "", hdr, sizeof(*hdr));
  398. goto out;
  399. }
  400. /* get flags to check for trailer */
  401. trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
  402. if (trailer_present) {
  403. u8 *trailer;
  404. trailer_len = hdr->trailer_len;
  405. min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
  406. if ((trailer_len < min_len) ||
  407. (trailer_len > payload_len)) {
  408. ath10k_warn(ar, "Invalid trailer length: %d\n",
  409. trailer_len);
  410. goto out;
  411. }
  412. trailer = (u8 *)hdr;
  413. trailer += sizeof(*hdr);
  414. trailer += payload_len;
  415. trailer -= trailer_len;
  416. status = ath10k_htc_process_trailer(htc, trailer,
  417. trailer_len, hdr->eid,
  418. NULL, NULL);
  419. if (status)
  420. goto out;
  421. skb_trim(skb, skb->len - trailer_len);
  422. }
  423. if (((int)payload_len - (int)trailer_len) <= 0)
  424. /* zero length packet with trailer data, just drop these */
  425. goto out;
  426. ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
  427. eid, skb);
  428. ep->ep_ops.ep_rx_complete(ar, skb);
  429. /* skb is now owned by the rx completion handler */
  430. skb = NULL;
  431. out:
  432. kfree_skb(skb);
  433. }
  434. EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
  435. static void ath10k_htc_control_rx_complete(struct ath10k *ar,
  436. struct sk_buff *skb)
  437. {
  438. struct ath10k_htc *htc = &ar->htc;
  439. struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
  440. switch (__le16_to_cpu(msg->hdr.message_id)) {
  441. case ATH10K_HTC_MSG_READY_ID:
  442. case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
  443. /* handle HTC control message */
  444. if (completion_done(&htc->ctl_resp)) {
  445. /* this is a fatal error, target should not be
  446. * sending unsolicited messages on the ep 0
  447. */
  448. ath10k_warn(ar, "HTC rx ctrl still processing\n");
  449. complete(&htc->ctl_resp);
  450. goto out;
  451. }
  452. htc->control_resp_len =
  453. min_t(int, skb->len,
  454. ATH10K_HTC_MAX_CTRL_MSG_LEN);
  455. memcpy(htc->control_resp_buffer, skb->data,
  456. htc->control_resp_len);
  457. complete(&htc->ctl_resp);
  458. break;
  459. case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
  460. htc->htc_ops.target_send_suspend_complete(ar);
  461. break;
  462. default:
  463. ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
  464. break;
  465. }
  466. out:
  467. kfree_skb(skb);
  468. }
  469. /***************/
  470. /* Init/Deinit */
  471. /***************/
  472. static const char *htc_service_name(enum ath10k_htc_svc_id id)
  473. {
  474. switch (id) {
  475. case ATH10K_HTC_SVC_ID_RESERVED:
  476. return "Reserved";
  477. case ATH10K_HTC_SVC_ID_RSVD_CTRL:
  478. return "Control";
  479. case ATH10K_HTC_SVC_ID_WMI_CONTROL:
  480. return "WMI";
  481. case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
  482. return "DATA BE";
  483. case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
  484. return "DATA BK";
  485. case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
  486. return "DATA VI";
  487. case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
  488. return "DATA VO";
  489. case ATH10K_HTC_SVC_ID_NMI_CONTROL:
  490. return "NMI Control";
  491. case ATH10K_HTC_SVC_ID_NMI_DATA:
  492. return "NMI Data";
  493. case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
  494. return "HTT Data";
  495. case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
  496. return "HTT Data";
  497. case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
  498. return "HTT Data";
  499. case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
  500. return "RAW";
  501. case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
  502. return "PKTLOG";
  503. }
  504. return "Unknown";
  505. }
  506. static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
  507. {
  508. struct ath10k_htc_ep *ep;
  509. int i;
  510. for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
  511. ep = &htc->endpoint[i];
  512. ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
  513. ep->max_ep_message_len = 0;
  514. ep->max_tx_queue_depth = 0;
  515. ep->eid = i;
  516. ep->htc = htc;
  517. ep->tx_credit_flow_enabled = true;
  518. }
  519. }
  520. static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
  521. u16 service_id)
  522. {
  523. u8 allocation = 0;
  524. /* The WMI control service is the only service with flow control.
  525. * Let it have all transmit credits.
  526. */
  527. if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
  528. allocation = htc->total_transmit_credits;
  529. return allocation;
  530. }
  531. static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
  532. struct sk_buff *bundle_skb,
  533. struct sk_buff_head *tx_save_head)
  534. {
  535. struct ath10k_hif_sg_item sg_item;
  536. struct ath10k_htc *htc = ep->htc;
  537. struct ath10k *ar = htc->ar;
  538. struct sk_buff *skb;
  539. int ret, cn = 0;
  540. unsigned int skb_len;
  541. ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
  542. skb_len = bundle_skb->len;
  543. ret = ath10k_htc_consume_credit(ep, skb_len, true);
  544. if (!ret) {
  545. sg_item.transfer_id = ep->eid;
  546. sg_item.transfer_context = bundle_skb;
  547. sg_item.vaddr = bundle_skb->data;
  548. sg_item.len = bundle_skb->len;
  549. ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
  550. if (ret)
  551. ath10k_htc_release_credit(ep, skb_len);
  552. }
  553. if (ret)
  554. dev_kfree_skb_any(bundle_skb);
  555. for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
  556. if (ret) {
  557. skb_pull(skb, sizeof(struct ath10k_htc_hdr));
  558. skb_queue_head(&ep->tx_req_head, skb);
  559. } else {
  560. skb_queue_tail(&ep->tx_complete_head, skb);
  561. }
  562. }
  563. if (!ret)
  564. queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
  565. ath10k_dbg(ar, ATH10K_DBG_HTC,
  566. "bundle tx status %d eid %d req count %d count %d len %d\n",
  567. ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
  568. return ret;
  569. }
  570. static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
  571. {
  572. struct ath10k_htc *htc = ep->htc;
  573. struct ath10k *ar = htc->ar;
  574. int ret;
  575. ret = ath10k_htc_send(htc, ep->eid, skb);
  576. if (ret)
  577. skb_queue_head(&ep->tx_req_head, skb);
  578. ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
  579. ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
  580. }
  581. static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
  582. {
  583. struct ath10k_htc *htc = ep->htc;
  584. struct sk_buff *bundle_skb, *skb;
  585. struct sk_buff_head tx_save_head;
  586. struct ath10k_htc_hdr *hdr;
  587. u8 *bundle_buf;
  588. int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
  589. if (htc->ar->state == ATH10K_STATE_WEDGED)
  590. return -ECOMM;
  591. if (ep->tx_credit_flow_enabled &&
  592. ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
  593. return 0;
  594. bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
  595. bundle_skb = dev_alloc_skb(bundles_left);
  596. if (!bundle_skb)
  597. return -ENOMEM;
  598. bundle_buf = bundle_skb->data;
  599. skb_queue_head_init(&tx_save_head);
  600. while (true) {
  601. skb = skb_dequeue(&ep->tx_req_head);
  602. if (!skb)
  603. break;
  604. credit_pad = 0;
  605. trans_len = skb->len + sizeof(*hdr);
  606. credit_remainder = trans_len % ep->tx_credit_size;
  607. if (credit_remainder != 0) {
  608. credit_pad = ep->tx_credit_size - credit_remainder;
  609. trans_len += credit_pad;
  610. }
  611. ret = ath10k_htc_consume_credit(ep,
  612. bundle_buf + trans_len - bundle_skb->data,
  613. false);
  614. if (ret) {
  615. skb_queue_head(&ep->tx_req_head, skb);
  616. break;
  617. }
  618. if (bundles_left < trans_len) {
  619. bundle_skb->len = bundle_buf - bundle_skb->data;
  620. ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
  621. if (ret) {
  622. skb_queue_head(&ep->tx_req_head, skb);
  623. return ret;
  624. }
  625. if (skb_queue_len(&ep->tx_req_head) == 0) {
  626. ath10k_htc_send_one_skb(ep, skb);
  627. return ret;
  628. }
  629. if (ep->tx_credit_flow_enabled &&
  630. ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
  631. skb_queue_head(&ep->tx_req_head, skb);
  632. return 0;
  633. }
  634. bundles_left =
  635. ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
  636. bundle_skb = dev_alloc_skb(bundles_left);
  637. if (!bundle_skb) {
  638. skb_queue_head(&ep->tx_req_head, skb);
  639. return -ENOMEM;
  640. }
  641. bundle_buf = bundle_skb->data;
  642. skb_queue_head_init(&tx_save_head);
  643. }
  644. skb_push(skb, sizeof(struct ath10k_htc_hdr));
  645. ath10k_htc_prepare_tx_skb(ep, skb);
  646. memcpy(bundle_buf, skb->data, skb->len);
  647. hdr = (struct ath10k_htc_hdr *)bundle_buf;
  648. hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
  649. hdr->pad_len = __cpu_to_le16(credit_pad);
  650. bundle_buf += trans_len;
  651. bundles_left -= trans_len;
  652. skb_queue_tail(&tx_save_head, skb);
  653. }
  654. if (bundle_buf != bundle_skb->data) {
  655. bundle_skb->len = bundle_buf - bundle_skb->data;
  656. ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
  657. } else {
  658. dev_kfree_skb_any(bundle_skb);
  659. }
  660. return ret;
  661. }
  662. static void ath10k_htc_bundle_tx_work(struct work_struct *work)
  663. {
  664. struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
  665. struct ath10k_htc_ep *ep;
  666. struct sk_buff *skb;
  667. int i;
  668. for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
  669. ep = &ar->htc.endpoint[i];
  670. if (!ep->bundle_tx)
  671. continue;
  672. ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
  673. ep->eid, skb_queue_len(&ep->tx_req_head));
  674. if (skb_queue_len(&ep->tx_req_head) >=
  675. ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
  676. ath10k_htc_send_bundle_skbs(ep);
  677. } else {
  678. skb = skb_dequeue(&ep->tx_req_head);
  679. if (!skb)
  680. continue;
  681. ath10k_htc_send_one_skb(ep, skb);
  682. }
  683. }
  684. }
  685. static void ath10k_htc_tx_complete_work(struct work_struct *work)
  686. {
  687. struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
  688. struct ath10k_htc_ep *ep;
  689. enum ath10k_htc_ep_id eid;
  690. struct sk_buff *skb;
  691. int i;
  692. for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
  693. ep = &ar->htc.endpoint[i];
  694. eid = ep->eid;
  695. if (ep->bundle_tx && eid == ar->htt.eid) {
  696. ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
  697. ep->eid, skb_queue_len(&ep->tx_complete_head));
  698. while (true) {
  699. skb = skb_dequeue(&ep->tx_complete_head);
  700. if (!skb)
  701. break;
  702. ath10k_htc_notify_tx_completion(ep, skb);
  703. }
  704. }
  705. }
  706. }
  707. int ath10k_htc_send_hl(struct ath10k_htc *htc,
  708. enum ath10k_htc_ep_id eid,
  709. struct sk_buff *skb)
  710. {
  711. struct ath10k_htc_ep *ep = &htc->endpoint[eid];
  712. struct ath10k *ar = htc->ar;
  713. if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
  714. ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
  715. return -ENOMEM;
  716. }
  717. ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
  718. eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
  719. if (ep->bundle_tx) {
  720. skb_queue_tail(&ep->tx_req_head, skb);
  721. queue_work(ar->workqueue, &ar->bundle_tx_work);
  722. return 0;
  723. } else {
  724. return ath10k_htc_send(htc, eid, skb);
  725. }
  726. }
  727. void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
  728. {
  729. if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
  730. !ep->bundle_tx) {
  731. ep->bundle_tx = true;
  732. skb_queue_head_init(&ep->tx_req_head);
  733. skb_queue_head_init(&ep->tx_complete_head);
  734. }
  735. }
  736. void ath10k_htc_stop_hl(struct ath10k *ar)
  737. {
  738. struct ath10k_htc_ep *ep;
  739. int i;
  740. cancel_work_sync(&ar->bundle_tx_work);
  741. cancel_work_sync(&ar->tx_complete_work);
  742. for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
  743. ep = &ar->htc.endpoint[i];
  744. if (!ep->bundle_tx)
  745. continue;
  746. ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
  747. ep->eid, skb_queue_len(&ep->tx_req_head));
  748. skb_queue_purge(&ep->tx_req_head);
  749. }
  750. }
  751. int ath10k_htc_wait_target(struct ath10k_htc *htc)
  752. {
  753. struct ath10k *ar = htc->ar;
  754. int i, status = 0;
  755. unsigned long time_left;
  756. struct ath10k_htc_msg *msg;
  757. u16 message_id;
  758. time_left = wait_for_completion_timeout(&htc->ctl_resp,
  759. ATH10K_HTC_WAIT_TIMEOUT_HZ);
  760. if (!time_left) {
  761. /* Workaround: In some cases the PCI HIF doesn't
  762. * receive interrupt for the control response message
  763. * even if the buffer was completed. It is suspected
  764. * iomap writes unmasking PCI CE irqs aren't propagated
  765. * properly in KVM PCI-passthrough sometimes.
  766. */
  767. ath10k_warn(ar, "failed to receive control response completion, polling..\n");
  768. for (i = 0; i < CE_COUNT; i++)
  769. ath10k_hif_send_complete_check(htc->ar, i, 1);
  770. time_left =
  771. wait_for_completion_timeout(&htc->ctl_resp,
  772. ATH10K_HTC_WAIT_TIMEOUT_HZ);
  773. if (!time_left)
  774. status = -ETIMEDOUT;
  775. }
  776. if (status < 0) {
  777. ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
  778. return status;
  779. }
  780. if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
  781. ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
  782. htc->control_resp_len);
  783. return -ECOMM;
  784. }
  785. msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
  786. message_id = __le16_to_cpu(msg->hdr.message_id);
  787. if (message_id != ATH10K_HTC_MSG_READY_ID) {
  788. ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
  789. return -ECOMM;
  790. }
  791. if (ar->hw_params.use_fw_tx_credits)
  792. htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
  793. else
  794. htc->total_transmit_credits = 1;
  795. htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
  796. ath10k_dbg(ar, ATH10K_DBG_HTC,
  797. "Target ready! transmit resources: %d size:%d actual credits:%d\n",
  798. htc->total_transmit_credits,
  799. htc->target_credit_size,
  800. msg->ready.credit_count);
  801. if ((htc->total_transmit_credits == 0) ||
  802. (htc->target_credit_size == 0)) {
  803. ath10k_err(ar, "Invalid credit size received\n");
  804. return -ECOMM;
  805. }
  806. /* The only way to determine if the ready message is an extended
  807. * message is from the size.
  808. */
  809. if (htc->control_resp_len >=
  810. sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
  811. htc->alt_data_credit_size =
  812. __le16_to_cpu(msg->ready_ext.reserved) &
  813. ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
  814. htc->max_msgs_per_htc_bundle =
  815. min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
  816. HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
  817. ath10k_dbg(ar, ATH10K_DBG_HTC,
  818. "Extended ready message RX bundle size %d alt size %d\n",
  819. htc->max_msgs_per_htc_bundle,
  820. htc->alt_data_credit_size);
  821. }
  822. INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
  823. INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
  824. return 0;
  825. }
  826. void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
  827. enum ath10k_htc_ep_id eid,
  828. bool enable)
  829. {
  830. struct ath10k *ar = htc->ar;
  831. struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
  832. ep->tx_credit_flow_enabled = enable;
  833. }
  834. int ath10k_htc_connect_service(struct ath10k_htc *htc,
  835. struct ath10k_htc_svc_conn_req *conn_req,
  836. struct ath10k_htc_svc_conn_resp *conn_resp)
  837. {
  838. struct ath10k *ar = htc->ar;
  839. struct ath10k_htc_msg *msg;
  840. struct ath10k_htc_conn_svc *req_msg;
  841. struct ath10k_htc_conn_svc_response resp_msg_dummy;
  842. struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
  843. enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
  844. struct ath10k_htc_ep *ep;
  845. struct sk_buff *skb;
  846. unsigned int max_msg_size = 0;
  847. int length, status;
  848. unsigned long time_left;
  849. bool disable_credit_flow_ctrl = false;
  850. u16 message_id, service_id, flags = 0;
  851. u8 tx_alloc = 0;
  852. /* special case for HTC pseudo control service */
  853. if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
  854. disable_credit_flow_ctrl = true;
  855. assigned_eid = ATH10K_HTC_EP_0;
  856. max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
  857. memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
  858. goto setup;
  859. }
  860. tx_alloc = ath10k_htc_get_credit_allocation(htc,
  861. conn_req->service_id);
  862. if (!tx_alloc)
  863. ath10k_dbg(ar, ATH10K_DBG_BOOT,
  864. "boot htc service %s does not allocate target credits\n",
  865. htc_service_name(conn_req->service_id));
  866. skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
  867. if (!skb) {
  868. ath10k_err(ar, "Failed to allocate HTC packet\n");
  869. return -ENOMEM;
  870. }
  871. length = sizeof(msg->hdr) + sizeof(msg->connect_service);
  872. skb_put(skb, length);
  873. memset(skb->data, 0, length);
  874. msg = (struct ath10k_htc_msg *)skb->data;
  875. msg->hdr.message_id =
  876. __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
  877. flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
  878. /* Only enable credit flow control for WMI ctrl service */
  879. if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
  880. flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  881. disable_credit_flow_ctrl = true;
  882. }
  883. req_msg = &msg->connect_service;
  884. req_msg->flags = __cpu_to_le16(flags);
  885. req_msg->service_id = __cpu_to_le16(conn_req->service_id);
  886. reinit_completion(&htc->ctl_resp);
  887. status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
  888. if (status) {
  889. kfree_skb(skb);
  890. return status;
  891. }
  892. /* wait for response */
  893. time_left = wait_for_completion_timeout(&htc->ctl_resp,
  894. ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
  895. if (!time_left) {
  896. ath10k_err(ar, "Service connect timeout\n");
  897. return -ETIMEDOUT;
  898. }
  899. /* we controlled the buffer creation, it's aligned */
  900. msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
  901. resp_msg = &msg->connect_service_response;
  902. message_id = __le16_to_cpu(msg->hdr.message_id);
  903. service_id = __le16_to_cpu(resp_msg->service_id);
  904. if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
  905. (htc->control_resp_len < sizeof(msg->hdr) +
  906. sizeof(msg->connect_service_response))) {
  907. ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
  908. return -EPROTO;
  909. }
  910. ath10k_dbg(ar, ATH10K_DBG_HTC,
  911. "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
  912. htc_service_name(service_id),
  913. resp_msg->status, resp_msg->eid);
  914. conn_resp->connect_resp_code = resp_msg->status;
  915. /* check response status */
  916. if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
  917. ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
  918. htc_service_name(service_id),
  919. resp_msg->status);
  920. return -EPROTO;
  921. }
  922. assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
  923. max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
  924. setup:
  925. if (assigned_eid >= ATH10K_HTC_EP_COUNT)
  926. return -EPROTO;
  927. if (max_msg_size == 0)
  928. return -EPROTO;
  929. ep = &htc->endpoint[assigned_eid];
  930. ep->eid = assigned_eid;
  931. if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
  932. return -EPROTO;
  933. /* return assigned endpoint to caller */
  934. conn_resp->eid = assigned_eid;
  935. conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
  936. /* setup the endpoint */
  937. ep->service_id = conn_req->service_id;
  938. ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
  939. ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
  940. ep->tx_credits = tx_alloc;
  941. ep->tx_credit_size = htc->target_credit_size;
  942. if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
  943. htc->alt_data_credit_size != 0)
  944. ep->tx_credit_size = htc->alt_data_credit_size;
  945. /* copy all the callbacks */
  946. ep->ep_ops = conn_req->ep_ops;
  947. status = ath10k_hif_map_service_to_pipe(htc->ar,
  948. ep->service_id,
  949. &ep->ul_pipe_id,
  950. &ep->dl_pipe_id);
  951. if (status) {
  952. ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
  953. ep->service_id);
  954. return status;
  955. }
  956. ath10k_dbg(ar, ATH10K_DBG_BOOT,
  957. "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
  958. htc_service_name(ep->service_id), ep->ul_pipe_id,
  959. ep->dl_pipe_id, ep->eid);
  960. if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
  961. ep->tx_credit_flow_enabled = false;
  962. ath10k_dbg(ar, ATH10K_DBG_BOOT,
  963. "boot htc service '%s' eid %d TX flow control disabled\n",
  964. htc_service_name(ep->service_id), assigned_eid);
  965. }
  966. return status;
  967. }
  968. struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
  969. {
  970. struct sk_buff *skb;
  971. skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
  972. if (!skb)
  973. return NULL;
  974. skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
  975. /* FW/HTC requires 4-byte aligned streams */
  976. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  977. ath10k_warn(ar, "Unaligned HTC tx skb\n");
  978. return skb;
  979. }
  980. static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
  981. {
  982. trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
  983. dev_kfree_skb_any(skb);
  984. }
  985. static int ath10k_htc_pktlog_connect(struct ath10k *ar)
  986. {
  987. struct ath10k_htc_svc_conn_resp conn_resp;
  988. struct ath10k_htc_svc_conn_req conn_req;
  989. int status;
  990. memset(&conn_req, 0, sizeof(conn_req));
  991. memset(&conn_resp, 0, sizeof(conn_resp));
  992. conn_req.ep_ops.ep_tx_complete = NULL;
  993. conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
  994. conn_req.ep_ops.ep_tx_credits = NULL;
  995. /* connect to control service */
  996. conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
  997. status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
  998. if (status) {
  999. ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
  1000. status);
  1001. return status;
  1002. }
  1003. return 0;
  1004. }
  1005. static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
  1006. {
  1007. u8 ul_pipe_id;
  1008. u8 dl_pipe_id;
  1009. int status;
  1010. status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
  1011. &ul_pipe_id,
  1012. &dl_pipe_id);
  1013. if (status) {
  1014. ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
  1015. ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
  1016. return false;
  1017. }
  1018. return true;
  1019. }
  1020. int ath10k_htc_start(struct ath10k_htc *htc)
  1021. {
  1022. struct ath10k *ar = htc->ar;
  1023. struct sk_buff *skb;
  1024. int status = 0;
  1025. struct ath10k_htc_msg *msg;
  1026. skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
  1027. if (!skb)
  1028. return -ENOMEM;
  1029. skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
  1030. memset(skb->data, 0, skb->len);
  1031. msg = (struct ath10k_htc_msg *)skb->data;
  1032. msg->hdr.message_id =
  1033. __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
  1034. if (ar->hif.bus == ATH10K_BUS_SDIO) {
  1035. /* Extra setup params used by SDIO */
  1036. msg->setup_complete_ext.flags =
  1037. __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
  1038. msg->setup_complete_ext.max_msgs_per_bundled_recv =
  1039. htc->max_msgs_per_htc_bundle;
  1040. }
  1041. ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
  1042. status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
  1043. if (status) {
  1044. kfree_skb(skb);
  1045. return status;
  1046. }
  1047. if (ath10k_htc_pktlog_svc_supported(ar)) {
  1048. status = ath10k_htc_pktlog_connect(ar);
  1049. if (status) {
  1050. ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
  1051. return status;
  1052. }
  1053. }
  1054. return 0;
  1055. }
  1056. /* registered target arrival callback from the HIF layer */
  1057. int ath10k_htc_init(struct ath10k *ar)
  1058. {
  1059. int status;
  1060. struct ath10k_htc *htc = &ar->htc;
  1061. struct ath10k_htc_svc_conn_req conn_req;
  1062. struct ath10k_htc_svc_conn_resp conn_resp;
  1063. spin_lock_init(&htc->tx_lock);
  1064. ath10k_htc_reset_endpoint_states(htc);
  1065. htc->ar = ar;
  1066. /* setup our pseudo HTC control endpoint connection */
  1067. memset(&conn_req, 0, sizeof(conn_req));
  1068. memset(&conn_resp, 0, sizeof(conn_resp));
  1069. conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
  1070. conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
  1071. conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
  1072. conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
  1073. /* connect fake service */
  1074. status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
  1075. if (status) {
  1076. ath10k_err(ar, "could not connect to htc service (%d)\n",
  1077. status);
  1078. return status;
  1079. }
  1080. init_completion(&htc->ctl_resp);
  1081. return 0;
  1082. }