k3-udma-glue.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * K3 NAVSS DMA glue interface
  4. *
  5. * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
  6. *
  7. */
  8. #include <linux/atomic.h>
  9. #include <linux/delay.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/io.h>
  12. #include <linux/init.h>
  13. #include <linux/of.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/soc/ti/k3-ringacc.h>
  16. #include <linux/dma/ti-cppi5.h>
  17. #include <linux/dma/k3-udma-glue.h>
  18. #include "k3-udma.h"
  19. #include "k3-psil-priv.h"
  20. struct k3_udma_glue_common {
  21. struct device *dev;
  22. struct device chan_dev;
  23. struct udma_dev *udmax;
  24. const struct udma_tisci_rm *tisci_rm;
  25. struct k3_ringacc *ringacc;
  26. u32 src_thread;
  27. u32 dst_thread;
  28. u32 hdesc_size;
  29. bool epib;
  30. u32 psdata_size;
  31. u32 swdata_size;
  32. u32 atype_asel;
  33. struct psil_endpoint_config *ep_config;
  34. };
  35. struct k3_udma_glue_tx_channel {
  36. struct k3_udma_glue_common common;
  37. struct udma_tchan *udma_tchanx;
  38. int udma_tchan_id;
  39. struct k3_ring *ringtx;
  40. struct k3_ring *ringtxcq;
  41. bool psil_paired;
  42. int virq;
  43. atomic_t free_pkts;
  44. bool tx_pause_on_err;
  45. bool tx_filt_einfo;
  46. bool tx_filt_pswords;
  47. bool tx_supr_tdpkt;
  48. int udma_tflow_id;
  49. };
  50. struct k3_udma_glue_rx_flow {
  51. struct udma_rflow *udma_rflow;
  52. int udma_rflow_id;
  53. struct k3_ring *ringrx;
  54. struct k3_ring *ringrxfdq;
  55. int virq;
  56. };
  57. struct k3_udma_glue_rx_channel {
  58. struct k3_udma_glue_common common;
  59. struct udma_rchan *udma_rchanx;
  60. int udma_rchan_id;
  61. bool remote;
  62. bool psil_paired;
  63. u32 swdata_size;
  64. int flow_id_base;
  65. struct k3_udma_glue_rx_flow *flows;
  66. u32 flow_num;
  67. u32 flows_ready;
  68. };
  69. static void k3_udma_chan_dev_release(struct device *dev)
  70. {
  71. /* The struct containing the device is devm managed */
  72. }
  73. static struct class k3_udma_glue_devclass = {
  74. .name = "k3_udma_glue_chan",
  75. .dev_release = k3_udma_chan_dev_release,
  76. };
  77. #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
  78. static int of_k3_udma_glue_parse(struct device_node *udmax_np,
  79. struct k3_udma_glue_common *common)
  80. {
  81. common->udmax = of_xudma_dev_get(udmax_np, NULL);
  82. if (IS_ERR(common->udmax))
  83. return PTR_ERR(common->udmax);
  84. common->ringacc = xudma_get_ringacc(common->udmax);
  85. common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
  86. return 0;
  87. }
  88. static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
  89. const char *name, struct k3_udma_glue_common *common,
  90. bool tx_chn)
  91. {
  92. struct of_phandle_args dma_spec;
  93. u32 thread_id;
  94. int ret = 0;
  95. int index;
  96. if (unlikely(!name))
  97. return -EINVAL;
  98. index = of_property_match_string(chn_np, "dma-names", name);
  99. if (index < 0)
  100. return index;
  101. if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
  102. &dma_spec))
  103. return -ENOENT;
  104. ret = of_k3_udma_glue_parse(dma_spec.np, common);
  105. if (ret)
  106. goto out_put_spec;
  107. thread_id = dma_spec.args[0];
  108. if (dma_spec.args_count == 2) {
  109. if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
  110. dev_err(common->dev, "Invalid channel atype: %u\n",
  111. dma_spec.args[1]);
  112. ret = -EINVAL;
  113. goto out_put_spec;
  114. }
  115. if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
  116. dev_err(common->dev, "Invalid channel asel: %u\n",
  117. dma_spec.args[1]);
  118. ret = -EINVAL;
  119. goto out_put_spec;
  120. }
  121. common->atype_asel = dma_spec.args[1];
  122. }
  123. if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
  124. ret = -EINVAL;
  125. goto out_put_spec;
  126. }
  127. if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
  128. ret = -EINVAL;
  129. goto out_put_spec;
  130. }
  131. /* get psil endpoint config */
  132. common->ep_config = psil_get_ep_config(thread_id);
  133. if (IS_ERR(common->ep_config)) {
  134. dev_err(common->dev,
  135. "No configuration for psi-l thread 0x%04x\n",
  136. thread_id);
  137. ret = PTR_ERR(common->ep_config);
  138. goto out_put_spec;
  139. }
  140. common->epib = common->ep_config->needs_epib;
  141. common->psdata_size = common->ep_config->psd_size;
  142. if (tx_chn)
  143. common->dst_thread = thread_id;
  144. else
  145. common->src_thread = thread_id;
  146. out_put_spec:
  147. of_node_put(dma_spec.np);
  148. return ret;
  149. };
  150. static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  151. {
  152. struct device *dev = tx_chn->common.dev;
  153. dev_dbg(dev, "dump_tx_chn:\n"
  154. "udma_tchan_id: %d\n"
  155. "src_thread: %08x\n"
  156. "dst_thread: %08x\n",
  157. tx_chn->udma_tchan_id,
  158. tx_chn->common.src_thread,
  159. tx_chn->common.dst_thread);
  160. }
  161. static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
  162. char *mark)
  163. {
  164. struct device *dev = chn->common.dev;
  165. dev_dbg(dev, "=== dump ===> %s\n", mark);
  166. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
  167. xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
  168. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
  169. xudma_tchanrt_read(chn->udma_tchanx,
  170. UDMA_CHAN_RT_PEER_RT_EN_REG));
  171. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
  172. xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
  173. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
  174. xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
  175. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
  176. xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
  177. }
  178. static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  179. {
  180. const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
  181. struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
  182. memset(&req, 0, sizeof(req));
  183. req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
  184. TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
  185. TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
  186. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
  187. TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
  188. TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
  189. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
  190. TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
  191. req.nav_id = tisci_rm->tisci_dev_id;
  192. req.index = tx_chn->udma_tchan_id;
  193. if (tx_chn->tx_pause_on_err)
  194. req.tx_pause_on_err = 1;
  195. if (tx_chn->tx_filt_einfo)
  196. req.tx_filt_einfo = 1;
  197. if (tx_chn->tx_filt_pswords)
  198. req.tx_filt_pswords = 1;
  199. req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
  200. if (tx_chn->tx_supr_tdpkt)
  201. req.tx_supr_tdpkt = 1;
  202. req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
  203. req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
  204. req.tx_atype = tx_chn->common.atype_asel;
  205. return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
  206. }
  207. struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
  208. const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
  209. {
  210. struct k3_udma_glue_tx_channel *tx_chn;
  211. int ret;
  212. tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
  213. if (!tx_chn)
  214. return ERR_PTR(-ENOMEM);
  215. tx_chn->common.dev = dev;
  216. tx_chn->common.swdata_size = cfg->swdata_size;
  217. tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
  218. tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
  219. tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
  220. tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
  221. /* parse of udmap channel */
  222. ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
  223. &tx_chn->common, true);
  224. if (ret)
  225. goto err;
  226. tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
  227. tx_chn->common.psdata_size,
  228. tx_chn->common.swdata_size);
  229. if (xudma_is_pktdma(tx_chn->common.udmax))
  230. tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
  231. else
  232. tx_chn->udma_tchan_id = -1;
  233. /* request and cfg UDMAP TX channel */
  234. tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
  235. tx_chn->udma_tchan_id);
  236. if (IS_ERR(tx_chn->udma_tchanx)) {
  237. ret = PTR_ERR(tx_chn->udma_tchanx);
  238. dev_err(dev, "UDMAX tchanx get err %d\n", ret);
  239. goto err;
  240. }
  241. tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
  242. tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
  243. tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
  244. dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
  245. tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
  246. ret = device_register(&tx_chn->common.chan_dev);
  247. if (ret) {
  248. dev_err(dev, "Channel Device registration failed %d\n", ret);
  249. put_device(&tx_chn->common.chan_dev);
  250. tx_chn->common.chan_dev.parent = NULL;
  251. goto err;
  252. }
  253. if (xudma_is_pktdma(tx_chn->common.udmax)) {
  254. /* prepare the channel device as coherent */
  255. tx_chn->common.chan_dev.dma_coherent = true;
  256. dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
  257. DMA_BIT_MASK(48));
  258. }
  259. atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
  260. if (xudma_is_pktdma(tx_chn->common.udmax))
  261. tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
  262. else
  263. tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
  264. /* request and cfg rings */
  265. ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
  266. tx_chn->udma_tflow_id, -1,
  267. &tx_chn->ringtx,
  268. &tx_chn->ringtxcq);
  269. if (ret) {
  270. dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
  271. goto err;
  272. }
  273. /* Set the dma_dev for the rings to be configured */
  274. cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
  275. cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
  276. /* Set the ASEL value for DMA rings of PKTDMA */
  277. if (xudma_is_pktdma(tx_chn->common.udmax)) {
  278. cfg->tx_cfg.asel = tx_chn->common.atype_asel;
  279. cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
  280. }
  281. ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
  282. if (ret) {
  283. dev_err(dev, "Failed to cfg ringtx %d\n", ret);
  284. goto err;
  285. }
  286. ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
  287. if (ret) {
  288. dev_err(dev, "Failed to cfg ringtx %d\n", ret);
  289. goto err;
  290. }
  291. /* request and cfg psi-l */
  292. tx_chn->common.src_thread =
  293. xudma_dev_get_psil_base(tx_chn->common.udmax) +
  294. tx_chn->udma_tchan_id;
  295. ret = k3_udma_glue_cfg_tx_chn(tx_chn);
  296. if (ret) {
  297. dev_err(dev, "Failed to cfg tchan %d\n", ret);
  298. goto err;
  299. }
  300. k3_udma_glue_dump_tx_chn(tx_chn);
  301. return tx_chn;
  302. err:
  303. k3_udma_glue_release_tx_chn(tx_chn);
  304. return ERR_PTR(ret);
  305. }
  306. EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
  307. void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  308. {
  309. if (tx_chn->psil_paired) {
  310. xudma_navss_psil_unpair(tx_chn->common.udmax,
  311. tx_chn->common.src_thread,
  312. tx_chn->common.dst_thread);
  313. tx_chn->psil_paired = false;
  314. }
  315. if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
  316. xudma_tchan_put(tx_chn->common.udmax,
  317. tx_chn->udma_tchanx);
  318. if (tx_chn->ringtxcq)
  319. k3_ringacc_ring_free(tx_chn->ringtxcq);
  320. if (tx_chn->ringtx)
  321. k3_ringacc_ring_free(tx_chn->ringtx);
  322. if (tx_chn->common.chan_dev.parent) {
  323. device_unregister(&tx_chn->common.chan_dev);
  324. tx_chn->common.chan_dev.parent = NULL;
  325. }
  326. }
  327. EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
  328. int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
  329. struct cppi5_host_desc_t *desc_tx,
  330. dma_addr_t desc_dma)
  331. {
  332. u32 ringtxcq_id;
  333. if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
  334. return -ENOMEM;
  335. ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
  336. cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
  337. return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
  338. }
  339. EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
  340. int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
  341. dma_addr_t *desc_dma)
  342. {
  343. int ret;
  344. ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
  345. if (!ret)
  346. atomic_inc(&tx_chn->free_pkts);
  347. return ret;
  348. }
  349. EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
  350. int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  351. {
  352. int ret;
  353. ret = xudma_navss_psil_pair(tx_chn->common.udmax,
  354. tx_chn->common.src_thread,
  355. tx_chn->common.dst_thread);
  356. if (ret) {
  357. dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
  358. return ret;
  359. }
  360. tx_chn->psil_paired = true;
  361. xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
  362. UDMA_PEER_RT_EN_ENABLE);
  363. xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
  364. UDMA_CHAN_RT_CTL_EN);
  365. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
  366. return 0;
  367. }
  368. EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
  369. void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  370. {
  371. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
  372. xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
  373. xudma_tchanrt_write(tx_chn->udma_tchanx,
  374. UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
  375. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
  376. if (tx_chn->psil_paired) {
  377. xudma_navss_psil_unpair(tx_chn->common.udmax,
  378. tx_chn->common.src_thread,
  379. tx_chn->common.dst_thread);
  380. tx_chn->psil_paired = false;
  381. }
  382. }
  383. EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
  384. void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
  385. bool sync)
  386. {
  387. int i = 0;
  388. u32 val;
  389. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
  390. xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
  391. UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
  392. val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
  393. while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
  394. val = xudma_tchanrt_read(tx_chn->udma_tchanx,
  395. UDMA_CHAN_RT_CTL_REG);
  396. udelay(1);
  397. if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
  398. dev_err(tx_chn->common.dev, "TX tdown timeout\n");
  399. break;
  400. }
  401. i++;
  402. }
  403. val = xudma_tchanrt_read(tx_chn->udma_tchanx,
  404. UDMA_CHAN_RT_PEER_RT_EN_REG);
  405. if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
  406. dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
  407. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
  408. }
  409. EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
  410. void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
  411. void *data,
  412. void (*cleanup)(void *data, dma_addr_t desc_dma))
  413. {
  414. struct device *dev = tx_chn->common.dev;
  415. dma_addr_t desc_dma;
  416. int occ_tx, i, ret;
  417. /*
  418. * TXQ reset need to be special way as it is input for udma and its
  419. * state cached by udma, so:
  420. * 1) save TXQ occ
  421. * 2) clean up TXQ and call callback .cleanup() for each desc
  422. * 3) reset TXQ in a special way
  423. */
  424. occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
  425. dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
  426. for (i = 0; i < occ_tx; i++) {
  427. ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
  428. if (ret) {
  429. if (ret != -ENODATA)
  430. dev_err(dev, "TX reset pop %d\n", ret);
  431. break;
  432. }
  433. cleanup(data, desc_dma);
  434. }
  435. /* reset TXCQ as it is not input for udma - expected to be empty */
  436. k3_ringacc_ring_reset(tx_chn->ringtxcq);
  437. k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
  438. }
  439. EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
  440. u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
  441. {
  442. return tx_chn->common.hdesc_size;
  443. }
  444. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
  445. u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
  446. {
  447. return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
  448. }
  449. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
  450. int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
  451. {
  452. if (xudma_is_pktdma(tx_chn->common.udmax)) {
  453. tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
  454. tx_chn->udma_tflow_id);
  455. } else {
  456. tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
  457. }
  458. return tx_chn->virq;
  459. }
  460. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
  461. struct device *
  462. k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
  463. {
  464. if (xudma_is_pktdma(tx_chn->common.udmax) &&
  465. (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
  466. return &tx_chn->common.chan_dev;
  467. return xudma_get_device(tx_chn->common.udmax);
  468. }
  469. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
  470. void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
  471. dma_addr_t *addr)
  472. {
  473. if (!xudma_is_pktdma(tx_chn->common.udmax) ||
  474. !tx_chn->common.atype_asel)
  475. return;
  476. *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
  477. }
  478. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
  479. void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
  480. dma_addr_t *addr)
  481. {
  482. if (!xudma_is_pktdma(tx_chn->common.udmax) ||
  483. !tx_chn->common.atype_asel)
  484. return;
  485. *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
  486. }
  487. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
  488. static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
  489. {
  490. const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
  491. struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
  492. int ret;
  493. memset(&req, 0, sizeof(req));
  494. req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
  495. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
  496. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
  497. TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
  498. req.nav_id = tisci_rm->tisci_dev_id;
  499. req.index = rx_chn->udma_rchan_id;
  500. req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
  501. /*
  502. * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
  503. * and udmax impl, so just configure it to invalid value.
  504. * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
  505. */
  506. req.rxcq_qnum = 0xFFFF;
  507. if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
  508. rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
  509. /* Default flow + extra ones */
  510. req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
  511. TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
  512. req.flowid_start = rx_chn->flow_id_base;
  513. req.flowid_cnt = rx_chn->flow_num;
  514. }
  515. req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
  516. req.rx_atype = rx_chn->common.atype_asel;
  517. ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
  518. if (ret)
  519. dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
  520. rx_chn->udma_rchan_id, ret);
  521. return ret;
  522. }
  523. static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
  524. u32 flow_num)
  525. {
  526. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
  527. if (IS_ERR_OR_NULL(flow->udma_rflow))
  528. return;
  529. if (flow->ringrxfdq)
  530. k3_ringacc_ring_free(flow->ringrxfdq);
  531. if (flow->ringrx)
  532. k3_ringacc_ring_free(flow->ringrx);
  533. xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
  534. flow->udma_rflow = NULL;
  535. rx_chn->flows_ready--;
  536. }
  537. static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
  538. u32 flow_idx,
  539. struct k3_udma_glue_rx_flow_cfg *flow_cfg)
  540. {
  541. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
  542. const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
  543. struct device *dev = rx_chn->common.dev;
  544. struct ti_sci_msg_rm_udmap_flow_cfg req;
  545. int rx_ring_id;
  546. int rx_ringfdq_id;
  547. int ret = 0;
  548. flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
  549. flow->udma_rflow_id);
  550. if (IS_ERR(flow->udma_rflow)) {
  551. ret = PTR_ERR(flow->udma_rflow);
  552. dev_err(dev, "UDMAX rflow get err %d\n", ret);
  553. return ret;
  554. }
  555. if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
  556. ret = -ENODEV;
  557. goto err_rflow_put;
  558. }
  559. if (xudma_is_pktdma(rx_chn->common.udmax)) {
  560. rx_ringfdq_id = flow->udma_rflow_id +
  561. xudma_get_rflow_ring_offset(rx_chn->common.udmax);
  562. rx_ring_id = 0;
  563. } else {
  564. rx_ring_id = flow_cfg->ring_rxq_id;
  565. rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
  566. }
  567. /* request and cfg rings */
  568. ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
  569. rx_ringfdq_id, rx_ring_id,
  570. &flow->ringrxfdq,
  571. &flow->ringrx);
  572. if (ret) {
  573. dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
  574. goto err_rflow_put;
  575. }
  576. /* Set the dma_dev for the rings to be configured */
  577. flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
  578. flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
  579. /* Set the ASEL value for DMA rings of PKTDMA */
  580. if (xudma_is_pktdma(rx_chn->common.udmax)) {
  581. flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
  582. flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
  583. }
  584. ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
  585. if (ret) {
  586. dev_err(dev, "Failed to cfg ringrx %d\n", ret);
  587. goto err_ringrxfdq_free;
  588. }
  589. ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
  590. if (ret) {
  591. dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
  592. goto err_ringrxfdq_free;
  593. }
  594. if (rx_chn->remote) {
  595. rx_ring_id = TI_SCI_RESOURCE_NULL;
  596. rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
  597. } else {
  598. rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
  599. rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
  600. }
  601. memset(&req, 0, sizeof(req));
  602. req.valid_params =
  603. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
  604. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
  605. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
  606. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
  607. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
  608. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
  609. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
  610. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
  611. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
  612. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
  613. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
  614. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
  615. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
  616. req.nav_id = tisci_rm->tisci_dev_id;
  617. req.flow_index = flow->udma_rflow_id;
  618. if (rx_chn->common.epib)
  619. req.rx_einfo_present = 1;
  620. if (rx_chn->common.psdata_size)
  621. req.rx_psinfo_present = 1;
  622. if (flow_cfg->rx_error_handling)
  623. req.rx_error_handling = 1;
  624. req.rx_desc_type = 0;
  625. req.rx_dest_qnum = rx_ring_id;
  626. req.rx_src_tag_hi_sel = 0;
  627. req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
  628. req.rx_dest_tag_hi_sel = 0;
  629. req.rx_dest_tag_lo_sel = 0;
  630. req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
  631. req.rx_fdq1_qnum = rx_ringfdq_id;
  632. req.rx_fdq2_qnum = rx_ringfdq_id;
  633. req.rx_fdq3_qnum = rx_ringfdq_id;
  634. ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
  635. if (ret) {
  636. dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
  637. ret);
  638. goto err_ringrxfdq_free;
  639. }
  640. rx_chn->flows_ready++;
  641. dev_dbg(dev, "flow%d config done. ready:%d\n",
  642. flow->udma_rflow_id, rx_chn->flows_ready);
  643. return 0;
  644. err_ringrxfdq_free:
  645. k3_ringacc_ring_free(flow->ringrxfdq);
  646. k3_ringacc_ring_free(flow->ringrx);
  647. err_rflow_put:
  648. xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
  649. flow->udma_rflow = NULL;
  650. return ret;
  651. }
  652. static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
  653. {
  654. struct device *dev = chn->common.dev;
  655. dev_dbg(dev, "dump_rx_chn:\n"
  656. "udma_rchan_id: %d\n"
  657. "src_thread: %08x\n"
  658. "dst_thread: %08x\n"
  659. "epib: %d\n"
  660. "hdesc_size: %u\n"
  661. "psdata_size: %u\n"
  662. "swdata_size: %u\n"
  663. "flow_id_base: %d\n"
  664. "flow_num: %d\n",
  665. chn->udma_rchan_id,
  666. chn->common.src_thread,
  667. chn->common.dst_thread,
  668. chn->common.epib,
  669. chn->common.hdesc_size,
  670. chn->common.psdata_size,
  671. chn->common.swdata_size,
  672. chn->flow_id_base,
  673. chn->flow_num);
  674. }
  675. static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
  676. char *mark)
  677. {
  678. struct device *dev = chn->common.dev;
  679. dev_dbg(dev, "=== dump ===> %s\n", mark);
  680. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
  681. xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
  682. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
  683. xudma_rchanrt_read(chn->udma_rchanx,
  684. UDMA_CHAN_RT_PEER_RT_EN_REG));
  685. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
  686. xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
  687. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
  688. xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
  689. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
  690. xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
  691. }
  692. static int
  693. k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
  694. struct k3_udma_glue_rx_channel_cfg *cfg)
  695. {
  696. int ret;
  697. /* default rflow */
  698. if (cfg->flow_id_use_rxchan_id)
  699. return 0;
  700. /* not a GP rflows */
  701. if (rx_chn->flow_id_base != -1 &&
  702. !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
  703. return 0;
  704. /* Allocate range of GP rflows */
  705. ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
  706. rx_chn->flow_id_base,
  707. rx_chn->flow_num);
  708. if (ret < 0) {
  709. dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
  710. rx_chn->flow_id_base, rx_chn->flow_num, ret);
  711. return ret;
  712. }
  713. rx_chn->flow_id_base = ret;
  714. return 0;
  715. }
  716. static struct k3_udma_glue_rx_channel *
  717. k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
  718. struct k3_udma_glue_rx_channel_cfg *cfg)
  719. {
  720. struct k3_udma_glue_rx_channel *rx_chn;
  721. struct psil_endpoint_config *ep_cfg;
  722. int ret, i;
  723. if (cfg->flow_id_num <= 0)
  724. return ERR_PTR(-EINVAL);
  725. if (cfg->flow_id_num != 1 &&
  726. (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
  727. return ERR_PTR(-EINVAL);
  728. rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
  729. if (!rx_chn)
  730. return ERR_PTR(-ENOMEM);
  731. rx_chn->common.dev = dev;
  732. rx_chn->common.swdata_size = cfg->swdata_size;
  733. rx_chn->remote = false;
  734. /* parse of udmap channel */
  735. ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
  736. &rx_chn->common, false);
  737. if (ret)
  738. goto err;
  739. rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
  740. rx_chn->common.psdata_size,
  741. rx_chn->common.swdata_size);
  742. ep_cfg = rx_chn->common.ep_config;
  743. if (xudma_is_pktdma(rx_chn->common.udmax))
  744. rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
  745. else
  746. rx_chn->udma_rchan_id = -1;
  747. /* request and cfg UDMAP RX channel */
  748. rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
  749. rx_chn->udma_rchan_id);
  750. if (IS_ERR(rx_chn->udma_rchanx)) {
  751. ret = PTR_ERR(rx_chn->udma_rchanx);
  752. dev_err(dev, "UDMAX rchanx get err %d\n", ret);
  753. goto err;
  754. }
  755. rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
  756. rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
  757. rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
  758. dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
  759. rx_chn->udma_rchan_id, rx_chn->common.src_thread);
  760. ret = device_register(&rx_chn->common.chan_dev);
  761. if (ret) {
  762. dev_err(dev, "Channel Device registration failed %d\n", ret);
  763. put_device(&rx_chn->common.chan_dev);
  764. rx_chn->common.chan_dev.parent = NULL;
  765. goto err;
  766. }
  767. if (xudma_is_pktdma(rx_chn->common.udmax)) {
  768. /* prepare the channel device as coherent */
  769. rx_chn->common.chan_dev.dma_coherent = true;
  770. dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
  771. DMA_BIT_MASK(48));
  772. }
  773. if (xudma_is_pktdma(rx_chn->common.udmax)) {
  774. int flow_start = cfg->flow_id_base;
  775. int flow_end;
  776. if (flow_start == -1)
  777. flow_start = ep_cfg->flow_start;
  778. flow_end = flow_start + cfg->flow_id_num - 1;
  779. if (flow_start < ep_cfg->flow_start ||
  780. flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
  781. dev_err(dev, "Invalid flow range requested\n");
  782. ret = -EINVAL;
  783. goto err;
  784. }
  785. rx_chn->flow_id_base = flow_start;
  786. } else {
  787. rx_chn->flow_id_base = cfg->flow_id_base;
  788. /* Use RX channel id as flow id: target dev can't generate flow_id */
  789. if (cfg->flow_id_use_rxchan_id)
  790. rx_chn->flow_id_base = rx_chn->udma_rchan_id;
  791. }
  792. rx_chn->flow_num = cfg->flow_id_num;
  793. rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
  794. sizeof(*rx_chn->flows), GFP_KERNEL);
  795. if (!rx_chn->flows) {
  796. ret = -ENOMEM;
  797. goto err;
  798. }
  799. ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
  800. if (ret)
  801. goto err;
  802. for (i = 0; i < rx_chn->flow_num; i++)
  803. rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
  804. /* request and cfg psi-l */
  805. rx_chn->common.dst_thread =
  806. xudma_dev_get_psil_base(rx_chn->common.udmax) +
  807. rx_chn->udma_rchan_id;
  808. ret = k3_udma_glue_cfg_rx_chn(rx_chn);
  809. if (ret) {
  810. dev_err(dev, "Failed to cfg rchan %d\n", ret);
  811. goto err;
  812. }
  813. /* init default RX flow only if flow_num = 1 */
  814. if (cfg->def_flow_cfg) {
  815. ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
  816. if (ret)
  817. goto err;
  818. }
  819. k3_udma_glue_dump_rx_chn(rx_chn);
  820. return rx_chn;
  821. err:
  822. k3_udma_glue_release_rx_chn(rx_chn);
  823. return ERR_PTR(ret);
  824. }
  825. static struct k3_udma_glue_rx_channel *
  826. k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
  827. struct k3_udma_glue_rx_channel_cfg *cfg)
  828. {
  829. struct k3_udma_glue_rx_channel *rx_chn;
  830. int ret, i;
  831. if (cfg->flow_id_num <= 0 ||
  832. cfg->flow_id_use_rxchan_id ||
  833. cfg->def_flow_cfg ||
  834. cfg->flow_id_base < 0)
  835. return ERR_PTR(-EINVAL);
  836. /*
  837. * Remote RX channel is under control of Remote CPU core, so
  838. * Linux can only request and manipulate by dedicated RX flows
  839. */
  840. rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
  841. if (!rx_chn)
  842. return ERR_PTR(-ENOMEM);
  843. rx_chn->common.dev = dev;
  844. rx_chn->common.swdata_size = cfg->swdata_size;
  845. rx_chn->remote = true;
  846. rx_chn->udma_rchan_id = -1;
  847. rx_chn->flow_num = cfg->flow_id_num;
  848. rx_chn->flow_id_base = cfg->flow_id_base;
  849. rx_chn->psil_paired = false;
  850. /* parse of udmap channel */
  851. ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
  852. &rx_chn->common, false);
  853. if (ret)
  854. goto err;
  855. rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
  856. rx_chn->common.psdata_size,
  857. rx_chn->common.swdata_size);
  858. rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
  859. sizeof(*rx_chn->flows), GFP_KERNEL);
  860. if (!rx_chn->flows) {
  861. ret = -ENOMEM;
  862. goto err;
  863. }
  864. rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
  865. rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
  866. dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
  867. rx_chn->common.src_thread);
  868. ret = device_register(&rx_chn->common.chan_dev);
  869. if (ret) {
  870. dev_err(dev, "Channel Device registration failed %d\n", ret);
  871. put_device(&rx_chn->common.chan_dev);
  872. rx_chn->common.chan_dev.parent = NULL;
  873. goto err;
  874. }
  875. if (xudma_is_pktdma(rx_chn->common.udmax)) {
  876. /* prepare the channel device as coherent */
  877. rx_chn->common.chan_dev.dma_coherent = true;
  878. dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
  879. DMA_BIT_MASK(48));
  880. }
  881. ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
  882. if (ret)
  883. goto err;
  884. for (i = 0; i < rx_chn->flow_num; i++)
  885. rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
  886. k3_udma_glue_dump_rx_chn(rx_chn);
  887. return rx_chn;
  888. err:
  889. k3_udma_glue_release_rx_chn(rx_chn);
  890. return ERR_PTR(ret);
  891. }
  892. struct k3_udma_glue_rx_channel *
  893. k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
  894. struct k3_udma_glue_rx_channel_cfg *cfg)
  895. {
  896. if (cfg->remote)
  897. return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
  898. else
  899. return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
  900. }
  901. EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
  902. void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
  903. {
  904. int i;
  905. if (IS_ERR_OR_NULL(rx_chn->common.udmax))
  906. return;
  907. if (rx_chn->psil_paired) {
  908. xudma_navss_psil_unpair(rx_chn->common.udmax,
  909. rx_chn->common.src_thread,
  910. rx_chn->common.dst_thread);
  911. rx_chn->psil_paired = false;
  912. }
  913. for (i = 0; i < rx_chn->flow_num; i++)
  914. k3_udma_glue_release_rx_flow(rx_chn, i);
  915. if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
  916. xudma_free_gp_rflow_range(rx_chn->common.udmax,
  917. rx_chn->flow_id_base,
  918. rx_chn->flow_num);
  919. if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
  920. xudma_rchan_put(rx_chn->common.udmax,
  921. rx_chn->udma_rchanx);
  922. if (rx_chn->common.chan_dev.parent) {
  923. device_unregister(&rx_chn->common.chan_dev);
  924. rx_chn->common.chan_dev.parent = NULL;
  925. }
  926. }
  927. EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
  928. int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
  929. u32 flow_idx,
  930. struct k3_udma_glue_rx_flow_cfg *flow_cfg)
  931. {
  932. if (flow_idx >= rx_chn->flow_num)
  933. return -EINVAL;
  934. return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
  935. }
  936. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
  937. u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
  938. u32 flow_idx)
  939. {
  940. struct k3_udma_glue_rx_flow *flow;
  941. if (flow_idx >= rx_chn->flow_num)
  942. return -EINVAL;
  943. flow = &rx_chn->flows[flow_idx];
  944. return k3_ringacc_get_ring_id(flow->ringrxfdq);
  945. }
  946. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
  947. u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
  948. {
  949. return rx_chn->flow_id_base;
  950. }
  951. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
  952. int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
  953. u32 flow_idx)
  954. {
  955. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
  956. const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
  957. struct device *dev = rx_chn->common.dev;
  958. struct ti_sci_msg_rm_udmap_flow_cfg req;
  959. int rx_ring_id;
  960. int rx_ringfdq_id;
  961. int ret = 0;
  962. if (!rx_chn->remote)
  963. return -EINVAL;
  964. rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
  965. rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
  966. memset(&req, 0, sizeof(req));
  967. req.valid_params =
  968. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
  969. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
  970. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
  971. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
  972. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
  973. req.nav_id = tisci_rm->tisci_dev_id;
  974. req.flow_index = flow->udma_rflow_id;
  975. req.rx_dest_qnum = rx_ring_id;
  976. req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
  977. req.rx_fdq1_qnum = rx_ringfdq_id;
  978. req.rx_fdq2_qnum = rx_ringfdq_id;
  979. req.rx_fdq3_qnum = rx_ringfdq_id;
  980. ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
  981. if (ret) {
  982. dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
  983. ret);
  984. }
  985. return ret;
  986. }
  987. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
  988. int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
  989. u32 flow_idx)
  990. {
  991. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
  992. const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
  993. struct device *dev = rx_chn->common.dev;
  994. struct ti_sci_msg_rm_udmap_flow_cfg req;
  995. int ret = 0;
  996. if (!rx_chn->remote)
  997. return -EINVAL;
  998. memset(&req, 0, sizeof(req));
  999. req.valid_params =
  1000. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
  1001. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
  1002. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
  1003. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
  1004. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
  1005. req.nav_id = tisci_rm->tisci_dev_id;
  1006. req.flow_index = flow->udma_rflow_id;
  1007. req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
  1008. req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
  1009. req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
  1010. req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
  1011. req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
  1012. ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
  1013. if (ret) {
  1014. dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
  1015. ret);
  1016. }
  1017. return ret;
  1018. }
  1019. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
  1020. int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
  1021. {
  1022. int ret;
  1023. if (rx_chn->remote)
  1024. return -EINVAL;
  1025. if (rx_chn->flows_ready < rx_chn->flow_num)
  1026. return -EINVAL;
  1027. ret = xudma_navss_psil_pair(rx_chn->common.udmax,
  1028. rx_chn->common.src_thread,
  1029. rx_chn->common.dst_thread);
  1030. if (ret) {
  1031. dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
  1032. return ret;
  1033. }
  1034. rx_chn->psil_paired = true;
  1035. xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
  1036. UDMA_CHAN_RT_CTL_EN);
  1037. xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
  1038. UDMA_PEER_RT_EN_ENABLE);
  1039. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
  1040. return 0;
  1041. }
  1042. EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
  1043. void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
  1044. {
  1045. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
  1046. xudma_rchanrt_write(rx_chn->udma_rchanx,
  1047. UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
  1048. xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
  1049. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
  1050. if (rx_chn->psil_paired) {
  1051. xudma_navss_psil_unpair(rx_chn->common.udmax,
  1052. rx_chn->common.src_thread,
  1053. rx_chn->common.dst_thread);
  1054. rx_chn->psil_paired = false;
  1055. }
  1056. }
  1057. EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
  1058. void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
  1059. bool sync)
  1060. {
  1061. int i = 0;
  1062. u32 val;
  1063. if (rx_chn->remote)
  1064. return;
  1065. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
  1066. xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
  1067. UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
  1068. val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
  1069. while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
  1070. val = xudma_rchanrt_read(rx_chn->udma_rchanx,
  1071. UDMA_CHAN_RT_CTL_REG);
  1072. udelay(1);
  1073. if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
  1074. dev_err(rx_chn->common.dev, "RX tdown timeout\n");
  1075. break;
  1076. }
  1077. i++;
  1078. }
  1079. val = xudma_rchanrt_read(rx_chn->udma_rchanx,
  1080. UDMA_CHAN_RT_PEER_RT_EN_REG);
  1081. if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
  1082. dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
  1083. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
  1084. }
  1085. EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
  1086. void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
  1087. u32 flow_num, void *data,
  1088. void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
  1089. {
  1090. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
  1091. struct device *dev = rx_chn->common.dev;
  1092. dma_addr_t desc_dma;
  1093. int occ_rx, i, ret;
  1094. /* reset RXCQ as it is not input for udma - expected to be empty */
  1095. occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
  1096. dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
  1097. /* Skip RX FDQ in case one FDQ is used for the set of flows */
  1098. if (skip_fdq)
  1099. goto do_reset;
  1100. /*
  1101. * RX FDQ reset need to be special way as it is input for udma and its
  1102. * state cached by udma, so:
  1103. * 1) save RX FDQ occ
  1104. * 2) clean up RX FDQ and call callback .cleanup() for each desc
  1105. * 3) reset RX FDQ in a special way
  1106. */
  1107. occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
  1108. dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
  1109. for (i = 0; i < occ_rx; i++) {
  1110. ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
  1111. if (ret) {
  1112. if (ret != -ENODATA)
  1113. dev_err(dev, "RX reset pop %d\n", ret);
  1114. break;
  1115. }
  1116. cleanup(data, desc_dma);
  1117. }
  1118. k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
  1119. do_reset:
  1120. k3_ringacc_ring_reset(flow->ringrx);
  1121. }
  1122. EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
  1123. int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
  1124. u32 flow_num, struct cppi5_host_desc_t *desc_rx,
  1125. dma_addr_t desc_dma)
  1126. {
  1127. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
  1128. return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
  1129. }
  1130. EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
  1131. int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
  1132. u32 flow_num, dma_addr_t *desc_dma)
  1133. {
  1134. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
  1135. return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
  1136. }
  1137. EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
  1138. int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
  1139. u32 flow_num)
  1140. {
  1141. struct k3_udma_glue_rx_flow *flow;
  1142. flow = &rx_chn->flows[flow_num];
  1143. if (xudma_is_pktdma(rx_chn->common.udmax)) {
  1144. flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
  1145. flow->udma_rflow_id);
  1146. } else {
  1147. flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
  1148. }
  1149. return flow->virq;
  1150. }
  1151. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
  1152. struct device *
  1153. k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
  1154. {
  1155. if (xudma_is_pktdma(rx_chn->common.udmax) &&
  1156. (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
  1157. return &rx_chn->common.chan_dev;
  1158. return xudma_get_device(rx_chn->common.udmax);
  1159. }
  1160. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
  1161. void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
  1162. dma_addr_t *addr)
  1163. {
  1164. if (!xudma_is_pktdma(rx_chn->common.udmax) ||
  1165. !rx_chn->common.atype_asel)
  1166. return;
  1167. *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
  1168. }
  1169. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
  1170. void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
  1171. dma_addr_t *addr)
  1172. {
  1173. if (!xudma_is_pktdma(rx_chn->common.udmax) ||
  1174. !rx_chn->common.atype_asel)
  1175. return;
  1176. *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
  1177. }
  1178. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
  1179. static int __init k3_udma_glue_class_init(void)
  1180. {
  1181. return class_register(&k3_udma_glue_devclass);
  1182. }
  1183. arch_initcall(k3_udma_glue_class_init);