ce.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081
  1. // SPDX-License-Identifier: BSD-3-Clause-Clear
  2. /*
  3. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "dp_rx.h"
  7. #include "debug.h"
  8. #include "hif.h"
  9. const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
  10. /* CE0: host->target HTC control and raw streams */
  11. {
  12. .flags = CE_ATTR_FLAGS,
  13. .src_nentries = 16,
  14. .src_sz_max = 2048,
  15. .dest_nentries = 0,
  16. .send_cb = ath11k_htc_tx_completion_handler,
  17. },
  18. /* CE1: target->host HTT + HTC control */
  19. {
  20. .flags = CE_ATTR_FLAGS,
  21. .src_nentries = 0,
  22. .src_sz_max = 2048,
  23. .dest_nentries = 512,
  24. .recv_cb = ath11k_htc_rx_completion_handler,
  25. },
  26. /* CE2: target->host WMI */
  27. {
  28. .flags = CE_ATTR_FLAGS,
  29. .src_nentries = 0,
  30. .src_sz_max = 2048,
  31. .dest_nentries = 512,
  32. .recv_cb = ath11k_htc_rx_completion_handler,
  33. },
  34. /* CE3: host->target WMI (mac0) */
  35. {
  36. .flags = CE_ATTR_FLAGS,
  37. .src_nentries = 32,
  38. .src_sz_max = 2048,
  39. .dest_nentries = 0,
  40. .send_cb = ath11k_htc_tx_completion_handler,
  41. },
  42. /* CE4: host->target HTT */
  43. {
  44. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  45. .src_nentries = 2048,
  46. .src_sz_max = 256,
  47. .dest_nentries = 0,
  48. },
  49. /* CE5: target->host pktlog */
  50. {
  51. .flags = CE_ATTR_FLAGS,
  52. .src_nentries = 0,
  53. .src_sz_max = 2048,
  54. .dest_nentries = 512,
  55. .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
  56. },
  57. /* CE6: target autonomous hif_memcpy */
  58. {
  59. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  60. .src_nentries = 0,
  61. .src_sz_max = 0,
  62. .dest_nentries = 0,
  63. },
  64. /* CE7: host->target WMI (mac1) */
  65. {
  66. .flags = CE_ATTR_FLAGS,
  67. .src_nentries = 32,
  68. .src_sz_max = 2048,
  69. .dest_nentries = 0,
  70. .send_cb = ath11k_htc_tx_completion_handler,
  71. },
  72. /* CE8: target autonomous hif_memcpy */
  73. {
  74. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  75. .src_nentries = 0,
  76. .src_sz_max = 0,
  77. .dest_nentries = 0,
  78. },
  79. /* CE9: host->target WMI (mac2) */
  80. {
  81. .flags = CE_ATTR_FLAGS,
  82. .src_nentries = 32,
  83. .src_sz_max = 2048,
  84. .dest_nentries = 0,
  85. .send_cb = ath11k_htc_tx_completion_handler,
  86. },
  87. /* CE10: target->host HTT */
  88. {
  89. .flags = CE_ATTR_FLAGS,
  90. .src_nentries = 0,
  91. .src_sz_max = 2048,
  92. .dest_nentries = 512,
  93. .recv_cb = ath11k_htc_rx_completion_handler,
  94. },
  95. /* CE11: Not used */
  96. {
  97. .flags = CE_ATTR_FLAGS,
  98. .src_nentries = 0,
  99. .src_sz_max = 0,
  100. .dest_nentries = 0,
  101. },
  102. };
  103. const struct ce_attr ath11k_host_ce_config_qca6390[] = {
  104. /* CE0: host->target HTC control and raw streams */
  105. {
  106. .flags = CE_ATTR_FLAGS,
  107. .src_nentries = 16,
  108. .src_sz_max = 2048,
  109. .dest_nentries = 0,
  110. },
  111. /* CE1: target->host HTT + HTC control */
  112. {
  113. .flags = CE_ATTR_FLAGS,
  114. .src_nentries = 0,
  115. .src_sz_max = 2048,
  116. .dest_nentries = 512,
  117. .recv_cb = ath11k_htc_rx_completion_handler,
  118. },
  119. /* CE2: target->host WMI */
  120. {
  121. .flags = CE_ATTR_FLAGS,
  122. .src_nentries = 0,
  123. .src_sz_max = 2048,
  124. .dest_nentries = 512,
  125. .recv_cb = ath11k_htc_rx_completion_handler,
  126. },
  127. /* CE3: host->target WMI (mac0) */
  128. {
  129. .flags = CE_ATTR_FLAGS,
  130. .src_nentries = 32,
  131. .src_sz_max = 2048,
  132. .dest_nentries = 0,
  133. .send_cb = ath11k_htc_tx_completion_handler,
  134. },
  135. /* CE4: host->target HTT */
  136. {
  137. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  138. .src_nentries = 2048,
  139. .src_sz_max = 256,
  140. .dest_nentries = 0,
  141. },
  142. /* CE5: target->host pktlog */
  143. {
  144. .flags = CE_ATTR_FLAGS,
  145. .src_nentries = 0,
  146. .src_sz_max = 2048,
  147. .dest_nentries = 512,
  148. .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
  149. },
  150. /* CE6: target autonomous hif_memcpy */
  151. {
  152. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  153. .src_nentries = 0,
  154. .src_sz_max = 0,
  155. .dest_nentries = 0,
  156. },
  157. /* CE7: host->target WMI (mac1) */
  158. {
  159. .flags = CE_ATTR_FLAGS,
  160. .src_nentries = 32,
  161. .src_sz_max = 2048,
  162. .dest_nentries = 0,
  163. .send_cb = ath11k_htc_tx_completion_handler,
  164. },
  165. /* CE8: target autonomous hif_memcpy */
  166. {
  167. .flags = CE_ATTR_FLAGS,
  168. .src_nentries = 0,
  169. .src_sz_max = 0,
  170. .dest_nentries = 0,
  171. },
  172. };
  173. const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
  174. /* CE0: host->target HTC control and raw streams */
  175. {
  176. .flags = CE_ATTR_FLAGS,
  177. .src_nentries = 16,
  178. .src_sz_max = 2048,
  179. .dest_nentries = 0,
  180. },
  181. /* CE1: target->host HTT + HTC control */
  182. {
  183. .flags = CE_ATTR_FLAGS,
  184. .src_nentries = 0,
  185. .src_sz_max = 2048,
  186. .dest_nentries = 512,
  187. .recv_cb = ath11k_htc_rx_completion_handler,
  188. },
  189. /* CE2: target->host WMI */
  190. {
  191. .flags = CE_ATTR_FLAGS,
  192. .src_nentries = 0,
  193. .src_sz_max = 2048,
  194. .dest_nentries = 32,
  195. .recv_cb = ath11k_htc_rx_completion_handler,
  196. },
  197. /* CE3: host->target WMI (mac0) */
  198. {
  199. .flags = CE_ATTR_FLAGS,
  200. .src_nentries = 32,
  201. .src_sz_max = 2048,
  202. .dest_nentries = 0,
  203. .send_cb = ath11k_htc_tx_completion_handler,
  204. },
  205. /* CE4: host->target HTT */
  206. {
  207. .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  208. .src_nentries = 2048,
  209. .src_sz_max = 256,
  210. .dest_nentries = 0,
  211. },
  212. /* CE5: target->host pktlog */
  213. {
  214. .flags = CE_ATTR_FLAGS,
  215. .src_nentries = 0,
  216. .src_sz_max = 2048,
  217. .dest_nentries = 512,
  218. .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
  219. },
  220. };
  221. static bool ath11k_ce_need_shadow_fix(int ce_id)
  222. {
  223. /* only ce4 needs shadow workaround */
  224. if (ce_id == 4)
  225. return true;
  226. return false;
  227. }
  228. void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
  229. {
  230. int i;
  231. if (!ab->hw_params.supports_shadow_regs)
  232. return;
  233. for (i = 0; i < ab->hw_params.ce_count; i++)
  234. if (ath11k_ce_need_shadow_fix(i))
  235. ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
  236. }
  237. static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
  238. struct sk_buff *skb, dma_addr_t paddr)
  239. {
  240. struct ath11k_base *ab = pipe->ab;
  241. struct ath11k_ce_ring *ring = pipe->dest_ring;
  242. struct hal_srng *srng;
  243. unsigned int write_index;
  244. unsigned int nentries_mask = ring->nentries_mask;
  245. u32 *desc;
  246. int ret;
  247. lockdep_assert_held(&ab->ce.ce_lock);
  248. write_index = ring->write_index;
  249. srng = &ab->hal.srng_list[ring->hal_ring_id];
  250. spin_lock_bh(&srng->lock);
  251. ath11k_hal_srng_access_begin(ab, srng);
  252. if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
  253. ret = -ENOSPC;
  254. goto exit;
  255. }
  256. desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
  257. if (!desc) {
  258. ret = -ENOSPC;
  259. goto exit;
  260. }
  261. ath11k_hal_ce_dst_set_desc(desc, paddr);
  262. ring->skb[write_index] = skb;
  263. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  264. ring->write_index = write_index;
  265. pipe->rx_buf_needed--;
  266. ret = 0;
  267. exit:
  268. ath11k_hal_srng_access_end(ab, srng);
  269. spin_unlock_bh(&srng->lock);
  270. return ret;
  271. }
  272. static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
  273. {
  274. struct ath11k_base *ab = pipe->ab;
  275. struct sk_buff *skb;
  276. dma_addr_t paddr;
  277. int ret = 0;
  278. if (!(pipe->dest_ring || pipe->status_ring))
  279. return 0;
  280. spin_lock_bh(&ab->ce.ce_lock);
  281. while (pipe->rx_buf_needed) {
  282. skb = dev_alloc_skb(pipe->buf_sz);
  283. if (!skb) {
  284. ret = -ENOMEM;
  285. goto exit;
  286. }
  287. WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
  288. paddr = dma_map_single(ab->dev, skb->data,
  289. skb->len + skb_tailroom(skb),
  290. DMA_FROM_DEVICE);
  291. if (unlikely(dma_mapping_error(ab->dev, paddr))) {
  292. ath11k_warn(ab, "failed to dma map ce rx buf\n");
  293. dev_kfree_skb_any(skb);
  294. ret = -EIO;
  295. goto exit;
  296. }
  297. ATH11K_SKB_RXCB(skb)->paddr = paddr;
  298. ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
  299. if (ret) {
  300. ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
  301. dma_unmap_single(ab->dev, paddr,
  302. skb->len + skb_tailroom(skb),
  303. DMA_FROM_DEVICE);
  304. dev_kfree_skb_any(skb);
  305. goto exit;
  306. }
  307. }
  308. exit:
  309. spin_unlock_bh(&ab->ce.ce_lock);
  310. return ret;
  311. }
  312. static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
  313. struct sk_buff **skb, int *nbytes)
  314. {
  315. struct ath11k_base *ab = pipe->ab;
  316. struct hal_srng *srng;
  317. unsigned int sw_index;
  318. unsigned int nentries_mask;
  319. u32 *desc;
  320. int ret = 0;
  321. spin_lock_bh(&ab->ce.ce_lock);
  322. sw_index = pipe->dest_ring->sw_index;
  323. nentries_mask = pipe->dest_ring->nentries_mask;
  324. srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
  325. spin_lock_bh(&srng->lock);
  326. ath11k_hal_srng_access_begin(ab, srng);
  327. desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
  328. if (!desc) {
  329. ret = -EIO;
  330. goto err;
  331. }
  332. *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
  333. if (*nbytes == 0) {
  334. ret = -EIO;
  335. goto err;
  336. }
  337. *skb = pipe->dest_ring->skb[sw_index];
  338. pipe->dest_ring->skb[sw_index] = NULL;
  339. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  340. pipe->dest_ring->sw_index = sw_index;
  341. pipe->rx_buf_needed++;
  342. err:
  343. ath11k_hal_srng_access_end(ab, srng);
  344. spin_unlock_bh(&srng->lock);
  345. spin_unlock_bh(&ab->ce.ce_lock);
  346. return ret;
  347. }
  348. static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
  349. {
  350. struct ath11k_base *ab = pipe->ab;
  351. struct sk_buff *skb;
  352. struct sk_buff_head list;
  353. unsigned int nbytes, max_nbytes;
  354. int ret;
  355. __skb_queue_head_init(&list);
  356. while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
  357. max_nbytes = skb->len + skb_tailroom(skb);
  358. dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
  359. max_nbytes, DMA_FROM_DEVICE);
  360. if (unlikely(max_nbytes < nbytes)) {
  361. ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
  362. nbytes, max_nbytes);
  363. dev_kfree_skb_any(skb);
  364. continue;
  365. }
  366. skb_put(skb, nbytes);
  367. __skb_queue_tail(&list, skb);
  368. }
  369. while ((skb = __skb_dequeue(&list))) {
  370. ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
  371. pipe->pipe_num, skb->len);
  372. pipe->recv_cb(ab, skb);
  373. }
  374. ret = ath11k_ce_rx_post_pipe(pipe);
  375. if (ret && ret != -ENOSPC) {
  376. ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
  377. pipe->pipe_num, ret);
  378. mod_timer(&ab->rx_replenish_retry,
  379. jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
  380. }
  381. }
  382. static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
  383. {
  384. struct ath11k_base *ab = pipe->ab;
  385. struct hal_srng *srng;
  386. unsigned int sw_index;
  387. unsigned int nentries_mask;
  388. struct sk_buff *skb;
  389. u32 *desc;
  390. spin_lock_bh(&ab->ce.ce_lock);
  391. sw_index = pipe->src_ring->sw_index;
  392. nentries_mask = pipe->src_ring->nentries_mask;
  393. srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
  394. spin_lock_bh(&srng->lock);
  395. ath11k_hal_srng_access_begin(ab, srng);
  396. desc = ath11k_hal_srng_src_reap_next(ab, srng);
  397. if (!desc) {
  398. skb = ERR_PTR(-EIO);
  399. goto err_unlock;
  400. }
  401. skb = pipe->src_ring->skb[sw_index];
  402. pipe->src_ring->skb[sw_index] = NULL;
  403. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  404. pipe->src_ring->sw_index = sw_index;
  405. err_unlock:
  406. spin_unlock_bh(&srng->lock);
  407. spin_unlock_bh(&ab->ce.ce_lock);
  408. return skb;
  409. }
  410. static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
  411. {
  412. struct ath11k_base *ab = pipe->ab;
  413. struct sk_buff *skb;
  414. struct sk_buff_head list;
  415. __skb_queue_head_init(&list);
  416. while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
  417. if (!skb)
  418. continue;
  419. dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
  420. DMA_TO_DEVICE);
  421. if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
  422. dev_kfree_skb_any(skb);
  423. continue;
  424. }
  425. __skb_queue_tail(&list, skb);
  426. }
  427. while ((skb = __skb_dequeue(&list))) {
  428. ath11k_dbg(ab, ATH11K_DBG_AHB, "tx ce pipe %d len %d\n",
  429. pipe->pipe_num, skb->len);
  430. pipe->send_cb(ab, skb);
  431. }
  432. }
  433. static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
  434. struct hal_srng_params *ring_params)
  435. {
  436. u32 msi_data_start;
  437. u32 msi_data_count, msi_data_idx;
  438. u32 msi_irq_start;
  439. u32 addr_lo;
  440. u32 addr_hi;
  441. int ret;
  442. ret = ath11k_get_user_msi_vector(ab, "CE",
  443. &msi_data_count, &msi_data_start,
  444. &msi_irq_start);
  445. if (ret)
  446. return;
  447. ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
  448. ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
  449. ring_params->msi_addr = addr_lo;
  450. ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
  451. ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
  452. ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
  453. }
  454. static int ath11k_ce_init_ring(struct ath11k_base *ab,
  455. struct ath11k_ce_ring *ce_ring,
  456. int ce_id, enum hal_ring_type type)
  457. {
  458. struct hal_srng_params params = { 0 };
  459. int ret;
  460. params.ring_base_paddr = ce_ring->base_addr_ce_space;
  461. params.ring_base_vaddr = ce_ring->base_addr_owner_space;
  462. params.num_entries = ce_ring->nentries;
  463. if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
  464. ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
  465. switch (type) {
  466. case HAL_CE_SRC:
  467. if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
  468. params.intr_batch_cntr_thres_entries = 1;
  469. break;
  470. case HAL_CE_DST:
  471. params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
  472. if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
  473. params.intr_timer_thres_us = 1024;
  474. params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
  475. params.low_threshold = ce_ring->nentries - 3;
  476. }
  477. break;
  478. case HAL_CE_DST_STATUS:
  479. if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
  480. params.intr_batch_cntr_thres_entries = 1;
  481. params.intr_timer_thres_us = 0x1000;
  482. }
  483. break;
  484. default:
  485. ath11k_warn(ab, "Invalid CE ring type %d\n", type);
  486. return -EINVAL;
  487. }
  488. /* TODO: Init other params needed by HAL to init the ring */
  489. ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
  490. if (ret < 0) {
  491. ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
  492. ret, ce_id);
  493. return ret;
  494. }
  495. ce_ring->hal_ring_id = ret;
  496. if (ab->hw_params.supports_shadow_regs &&
  497. ath11k_ce_need_shadow_fix(ce_id))
  498. ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
  499. ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
  500. ce_ring->hal_ring_id);
  501. return 0;
  502. }
  503. static struct ath11k_ce_ring *
  504. ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
  505. {
  506. struct ath11k_ce_ring *ce_ring;
  507. dma_addr_t base_addr;
  508. ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
  509. if (ce_ring == NULL)
  510. return ERR_PTR(-ENOMEM);
  511. ce_ring->nentries = nentries;
  512. ce_ring->nentries_mask = nentries - 1;
  513. /* Legacy platforms that do not support cache
  514. * coherent DMA are unsupported
  515. */
  516. ce_ring->base_addr_owner_space_unaligned =
  517. dma_alloc_coherent(ab->dev,
  518. nentries * desc_sz + CE_DESC_RING_ALIGN,
  519. &base_addr, GFP_KERNEL);
  520. if (!ce_ring->base_addr_owner_space_unaligned) {
  521. kfree(ce_ring);
  522. return ERR_PTR(-ENOMEM);
  523. }
  524. ce_ring->base_addr_ce_space_unaligned = base_addr;
  525. ce_ring->base_addr_owner_space = PTR_ALIGN(
  526. ce_ring->base_addr_owner_space_unaligned,
  527. CE_DESC_RING_ALIGN);
  528. ce_ring->base_addr_ce_space = ALIGN(
  529. ce_ring->base_addr_ce_space_unaligned,
  530. CE_DESC_RING_ALIGN);
  531. return ce_ring;
  532. }
  533. static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
  534. {
  535. struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
  536. const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
  537. struct ath11k_ce_ring *ring;
  538. int nentries;
  539. int desc_sz;
  540. pipe->attr_flags = attr->flags;
  541. if (attr->src_nentries) {
  542. pipe->send_cb = attr->send_cb;
  543. nentries = roundup_pow_of_two(attr->src_nentries);
  544. desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
  545. ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
  546. if (IS_ERR(ring))
  547. return PTR_ERR(ring);
  548. pipe->src_ring = ring;
  549. }
  550. if (attr->dest_nentries) {
  551. pipe->recv_cb = attr->recv_cb;
  552. nentries = roundup_pow_of_two(attr->dest_nentries);
  553. desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
  554. ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
  555. if (IS_ERR(ring))
  556. return PTR_ERR(ring);
  557. pipe->dest_ring = ring;
  558. desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
  559. ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
  560. if (IS_ERR(ring))
  561. return PTR_ERR(ring);
  562. pipe->status_ring = ring;
  563. }
  564. return 0;
  565. }
  566. void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
  567. {
  568. struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
  569. const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
  570. if (attr->src_nentries)
  571. ath11k_ce_tx_process_cb(pipe);
  572. if (pipe->recv_cb)
  573. ath11k_ce_recv_process_cb(pipe);
  574. }
  575. void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
  576. {
  577. struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
  578. const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
  579. if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
  580. ath11k_ce_tx_process_cb(pipe);
  581. }
  582. EXPORT_SYMBOL(ath11k_ce_per_engine_service);
  583. int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
  584. u16 transfer_id)
  585. {
  586. struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
  587. struct hal_srng *srng;
  588. u32 *desc;
  589. unsigned int write_index, sw_index;
  590. unsigned int nentries_mask;
  591. int ret = 0;
  592. u8 byte_swap_data = 0;
  593. int num_used;
  594. /* Check if some entries could be regained by handling tx completion if
  595. * the CE has interrupts disabled and the used entries is more than the
  596. * defined usage threshold.
  597. */
  598. if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
  599. spin_lock_bh(&ab->ce.ce_lock);
  600. write_index = pipe->src_ring->write_index;
  601. sw_index = pipe->src_ring->sw_index;
  602. if (write_index >= sw_index)
  603. num_used = write_index - sw_index;
  604. else
  605. num_used = pipe->src_ring->nentries - sw_index +
  606. write_index;
  607. spin_unlock_bh(&ab->ce.ce_lock);
  608. if (num_used > ATH11K_CE_USAGE_THRESHOLD)
  609. ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
  610. }
  611. if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
  612. return -ESHUTDOWN;
  613. spin_lock_bh(&ab->ce.ce_lock);
  614. write_index = pipe->src_ring->write_index;
  615. nentries_mask = pipe->src_ring->nentries_mask;
  616. srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
  617. spin_lock_bh(&srng->lock);
  618. ath11k_hal_srng_access_begin(ab, srng);
  619. if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
  620. ath11k_hal_srng_access_end(ab, srng);
  621. ret = -ENOBUFS;
  622. goto err_unlock;
  623. }
  624. desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
  625. if (!desc) {
  626. ath11k_hal_srng_access_end(ab, srng);
  627. ret = -ENOBUFS;
  628. goto err_unlock;
  629. }
  630. if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
  631. byte_swap_data = 1;
  632. ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
  633. skb->len, transfer_id, byte_swap_data);
  634. pipe->src_ring->skb[write_index] = skb;
  635. pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
  636. write_index);
  637. ath11k_hal_srng_access_end(ab, srng);
  638. if (ath11k_ce_need_shadow_fix(pipe_id))
  639. ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
  640. spin_unlock_bh(&srng->lock);
  641. spin_unlock_bh(&ab->ce.ce_lock);
  642. return 0;
  643. err_unlock:
  644. spin_unlock_bh(&srng->lock);
  645. spin_unlock_bh(&ab->ce.ce_lock);
  646. return ret;
  647. }
  648. static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
  649. {
  650. struct ath11k_base *ab = pipe->ab;
  651. struct ath11k_ce_ring *ring = pipe->dest_ring;
  652. struct sk_buff *skb;
  653. int i;
  654. if (!(ring && pipe->buf_sz))
  655. return;
  656. for (i = 0; i < ring->nentries; i++) {
  657. skb = ring->skb[i];
  658. if (!skb)
  659. continue;
  660. ring->skb[i] = NULL;
  661. dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
  662. skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
  663. dev_kfree_skb_any(skb);
  664. }
  665. }
  666. static void ath11k_ce_shadow_config(struct ath11k_base *ab)
  667. {
  668. int i;
  669. for (i = 0; i < ab->hw_params.ce_count; i++) {
  670. if (ab->hw_params.host_ce_config[i].src_nentries)
  671. ath11k_hal_srng_update_shadow_config(ab,
  672. HAL_CE_SRC, i);
  673. if (ab->hw_params.host_ce_config[i].dest_nentries) {
  674. ath11k_hal_srng_update_shadow_config(ab,
  675. HAL_CE_DST, i);
  676. ath11k_hal_srng_update_shadow_config(ab,
  677. HAL_CE_DST_STATUS, i);
  678. }
  679. }
  680. }
  681. void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
  682. u32 **shadow_cfg, u32 *shadow_cfg_len)
  683. {
  684. if (!ab->hw_params.supports_shadow_regs)
  685. return;
  686. ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
  687. /* shadow is already configured */
  688. if (*shadow_cfg_len)
  689. return;
  690. /* shadow isn't configured yet, configure now.
  691. * non-CE srngs are configured firstly, then
  692. * all CE srngs.
  693. */
  694. ath11k_hal_srng_shadow_config(ab);
  695. ath11k_ce_shadow_config(ab);
  696. /* get the shadow configuration */
  697. ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
  698. }
  699. EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
  700. void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
  701. {
  702. struct ath11k_ce_pipe *pipe;
  703. int pipe_num;
  704. ath11k_ce_stop_shadow_timers(ab);
  705. for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
  706. pipe = &ab->ce.ce_pipe[pipe_num];
  707. ath11k_ce_rx_pipe_cleanup(pipe);
  708. /* Cleanup any src CE's which have interrupts disabled */
  709. ath11k_ce_poll_send_completed(ab, pipe_num);
  710. /* NOTE: Should we also clean up tx buffer in all pipes? */
  711. }
  712. }
  713. EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
  714. void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
  715. {
  716. struct ath11k_ce_pipe *pipe;
  717. int i;
  718. int ret;
  719. for (i = 0; i < ab->hw_params.ce_count; i++) {
  720. pipe = &ab->ce.ce_pipe[i];
  721. ret = ath11k_ce_rx_post_pipe(pipe);
  722. if (ret) {
  723. if (ret == -ENOSPC)
  724. continue;
  725. ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
  726. i, ret);
  727. mod_timer(&ab->rx_replenish_retry,
  728. jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
  729. return;
  730. }
  731. }
  732. }
  733. EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
  734. void ath11k_ce_rx_replenish_retry(struct timer_list *t)
  735. {
  736. struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
  737. ath11k_ce_rx_post_buf(ab);
  738. }
  739. int ath11k_ce_init_pipes(struct ath11k_base *ab)
  740. {
  741. struct ath11k_ce_pipe *pipe;
  742. int i;
  743. int ret;
  744. for (i = 0; i < ab->hw_params.ce_count; i++) {
  745. pipe = &ab->ce.ce_pipe[i];
  746. if (pipe->src_ring) {
  747. ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
  748. HAL_CE_SRC);
  749. if (ret) {
  750. ath11k_warn(ab, "failed to init src ring: %d\n",
  751. ret);
  752. /* Should we clear any partial init */
  753. return ret;
  754. }
  755. pipe->src_ring->write_index = 0;
  756. pipe->src_ring->sw_index = 0;
  757. }
  758. if (pipe->dest_ring) {
  759. ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
  760. HAL_CE_DST);
  761. if (ret) {
  762. ath11k_warn(ab, "failed to init dest ring: %d\n",
  763. ret);
  764. /* Should we clear any partial init */
  765. return ret;
  766. }
  767. pipe->rx_buf_needed = pipe->dest_ring->nentries ?
  768. pipe->dest_ring->nentries - 2 : 0;
  769. pipe->dest_ring->write_index = 0;
  770. pipe->dest_ring->sw_index = 0;
  771. }
  772. if (pipe->status_ring) {
  773. ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
  774. HAL_CE_DST_STATUS);
  775. if (ret) {
  776. ath11k_warn(ab, "failed to init dest status ing: %d\n",
  777. ret);
  778. /* Should we clear any partial init */
  779. return ret;
  780. }
  781. pipe->status_ring->write_index = 0;
  782. pipe->status_ring->sw_index = 0;
  783. }
  784. }
  785. return 0;
  786. }
  787. void ath11k_ce_free_pipes(struct ath11k_base *ab)
  788. {
  789. struct ath11k_ce_pipe *pipe;
  790. struct ath11k_ce_ring *ce_ring;
  791. int desc_sz;
  792. int i;
  793. for (i = 0; i < ab->hw_params.ce_count; i++) {
  794. pipe = &ab->ce.ce_pipe[i];
  795. if (ath11k_ce_need_shadow_fix(i))
  796. ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
  797. if (pipe->src_ring) {
  798. desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
  799. ce_ring = pipe->src_ring;
  800. dma_free_coherent(ab->dev,
  801. pipe->src_ring->nentries * desc_sz +
  802. CE_DESC_RING_ALIGN,
  803. ce_ring->base_addr_owner_space_unaligned,
  804. ce_ring->base_addr_ce_space_unaligned);
  805. kfree(pipe->src_ring);
  806. pipe->src_ring = NULL;
  807. }
  808. if (pipe->dest_ring) {
  809. desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
  810. ce_ring = pipe->dest_ring;
  811. dma_free_coherent(ab->dev,
  812. pipe->dest_ring->nentries * desc_sz +
  813. CE_DESC_RING_ALIGN,
  814. ce_ring->base_addr_owner_space_unaligned,
  815. ce_ring->base_addr_ce_space_unaligned);
  816. kfree(pipe->dest_ring);
  817. pipe->dest_ring = NULL;
  818. }
  819. if (pipe->status_ring) {
  820. desc_sz =
  821. ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
  822. ce_ring = pipe->status_ring;
  823. dma_free_coherent(ab->dev,
  824. pipe->status_ring->nentries * desc_sz +
  825. CE_DESC_RING_ALIGN,
  826. ce_ring->base_addr_owner_space_unaligned,
  827. ce_ring->base_addr_ce_space_unaligned);
  828. kfree(pipe->status_ring);
  829. pipe->status_ring = NULL;
  830. }
  831. }
  832. }
  833. EXPORT_SYMBOL(ath11k_ce_free_pipes);
  834. int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
  835. {
  836. struct ath11k_ce_pipe *pipe;
  837. int i;
  838. int ret;
  839. const struct ce_attr *attr;
  840. spin_lock_init(&ab->ce.ce_lock);
  841. for (i = 0; i < ab->hw_params.ce_count; i++) {
  842. attr = &ab->hw_params.host_ce_config[i];
  843. pipe = &ab->ce.ce_pipe[i];
  844. pipe->pipe_num = i;
  845. pipe->ab = ab;
  846. pipe->buf_sz = attr->src_sz_max;
  847. ret = ath11k_ce_alloc_pipe(ab, i);
  848. if (ret) {
  849. /* Free any partial successful allocation */
  850. ath11k_ce_free_pipes(ab);
  851. return ret;
  852. }
  853. }
  854. return 0;
  855. }
  856. EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
  857. /* For Big Endian Host, Copy Engine byte_swap is enabled
  858. * When Copy Engine does byte_swap, need to byte swap again for the
  859. * Host to get/put buffer content in the correct byte order
  860. */
  861. void ath11k_ce_byte_swap(void *mem, u32 len)
  862. {
  863. int i;
  864. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
  865. if (!mem)
  866. return;
  867. for (i = 0; i < (len / 4); i++) {
  868. *(u32 *)mem = swab32(*(u32 *)mem);
  869. mem += 4;
  870. }
  871. }
  872. }
  873. int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
  874. {
  875. if (ce_id >= ab->hw_params.ce_count)
  876. return -EINVAL;
  877. return ab->hw_params.host_ce_config[ce_id].flags;
  878. }
  879. EXPORT_SYMBOL(ath11k_ce_get_attr_flags);