q2spi-gsi.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/io.h>
  6. #include <linux/module.h>
  7. #include <linux/dmaengine.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/errno.h>
  10. #include "q2spi-msm.h"
  11. #include "q2spi-slave-reg.h"
  12. static void q2spi_rx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param *cb_param)
  13. {
  14. struct q2spi_packet *q2spi_pkt = cb_param->userdata;
  15. struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
  16. struct q2spi_dma_transfer *xfer;
  17. u32 status = 0;
  18. if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) {
  19. Q2SPI_DEBUG(q2spi, "%s for Doorbell\n", __func__);
  20. xfer = q2spi->db_xfer;
  21. } else {
  22. xfer = q2spi_pkt->xfer;
  23. Q2SPI_DEBUG(q2spi, "%s for Rx Event\n", __func__);
  24. }
  25. if (!xfer || !xfer->rx_buf) {
  26. pr_err("%s rx buf NULL!!!\n", __func__);
  27. return;
  28. }
  29. Q2SPI_DEBUG(q2spi, "%s cb_param:%p cb_param->len:%d cb_param->status:%d\n",
  30. __func__, cb_param, cb_param->length, cb_param->status);
  31. Q2SPI_DEBUG(q2spi, "%s xfer:%p rx_buf:%p rx_dma:%p rx_len:%d m_cmd_param:%d\n",
  32. __func__, xfer, xfer->rx_buf, (void *)xfer->rx_dma, xfer->rx_len,
  33. q2spi_pkt->m_cmd_param);
  34. /* check status is 0 or EOT for success */
  35. status = cb_param->status;
  36. if (cb_param->length <= xfer->rx_len) {
  37. xfer->rx_len = cb_param->length;
  38. q2spi_dump_ipc(q2spi, q2spi->ipc, "rx_xfer_completion_event RX",
  39. (char *)xfer->rx_buf, cb_param->length);
  40. if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) {
  41. Q2SPI_DEBUG(q2spi, "%s call db_rx_cb\n", __func__);
  42. complete_all(&q2spi->db_rx_cb);
  43. } else {
  44. Q2SPI_DEBUG(q2spi, "%s call rx_cb\n", __func__);
  45. complete_all(&q2spi->rx_cb);
  46. }
  47. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n",
  48. __func__, q2spi_pkt, q2spi_pkt->state, q2spi_pkt->vtype);
  49. } else {
  50. Q2SPI_ERROR(q2spi, "%s Err length miss-match %d %d\n",
  51. __func__, cb_param->length, xfer->rx_len);
  52. }
  53. }
  54. static void q2spi_tx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param *cb_param)
  55. {
  56. struct q2spi_packet *q2spi_pkt = cb_param->userdata;
  57. struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
  58. struct q2spi_dma_transfer *xfer = q2spi_pkt->xfer;
  59. Q2SPI_DEBUG(q2spi, "%s xfer->tx_len:%d cb_param_length:%d\n", __func__,
  60. xfer->tx_len, cb_param->length);
  61. if (cb_param->length == xfer->tx_len) {
  62. Q2SPI_DEBUG(q2spi, "%s complete_tx_cb\n", __func__);
  63. complete_all(&q2spi->tx_cb);
  64. } else {
  65. Q2SPI_ERROR(q2spi, "%s Err length miss-match\n", __func__);
  66. }
  67. }
  68. static void q2spi_parse_q2spi_status(struct msm_gpi_dma_async_tx_cb_param *cb_param)
  69. {
  70. struct q2spi_packet *q2spi_pkt = cb_param->userdata;
  71. struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
  72. u32 status = 0;
  73. status = cb_param->q2spi_status;
  74. Q2SPI_DEBUG(q2spi, "%s status:%d complete_tx_cb\n", __func__, status);
  75. complete_all(&q2spi->tx_cb);
  76. Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n",
  77. __func__, q2spi_pkt, q2spi_pkt->state, q2spi_pkt->vtype);
  78. }
  79. static void q2spi_parse_cr_header(struct q2spi_geni *q2spi, struct msm_gpi_cb const *cb)
  80. {
  81. Q2SPI_DEBUG(q2spi, "%s line:%d\n", __func__, __LINE__);
  82. q2spi_doorbell(q2spi, &cb->q2spi_cr_header_event);
  83. }
  84. static void q2spi_gsi_tx_callback(void *cb)
  85. {
  86. struct msm_gpi_dma_async_tx_cb_param *cb_param = NULL;
  87. struct q2spi_packet *q2spi_pkt;
  88. struct q2spi_geni *q2spi;
  89. cb_param = (struct msm_gpi_dma_async_tx_cb_param *)cb;
  90. if (!cb_param) {
  91. pr_err("%s Err Invalid CB\n", __func__);
  92. return;
  93. }
  94. q2spi_pkt = cb_param->userdata;
  95. q2spi = q2spi_pkt->q2spi;
  96. if (!q2spi) {
  97. pr_err("%s Err Invalid q2spi\n", __func__);
  98. return;
  99. }
  100. if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
  101. Q2SPI_DEBUG(q2spi, "%s Unexpected CB status\n", __func__);
  102. return;
  103. }
  104. if (cb_param->completion_code == MSM_GPI_TCE_UNEXP_ERR) {
  105. Q2SPI_DEBUG(q2spi, "%s Unexpected GSI CB completion code\n", __func__);
  106. return;
  107. } else if (cb_param->completion_code == MSM_GPI_TCE_EOT) {
  108. Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
  109. if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) {
  110. Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__);
  111. q2spi_tx_xfer_completion_event(cb_param);
  112. } else if (cb_param->tce_type == QUP_TCE_TYPE_Q2SPI_STATUS) {
  113. Q2SPI_DEBUG(q2spi, "%s QUP_TCE_TYPE_Q2SPI_STATUS\n", __func__);
  114. q2spi_parse_q2spi_status(cb_param);
  115. }
  116. }
  117. }
  118. static void q2spi_gsi_rx_callback(void *cb)
  119. {
  120. struct msm_gpi_dma_async_tx_cb_param *cb_param = NULL;
  121. struct q2spi_packet *q2spi_pkt;
  122. struct q2spi_geni *q2spi;
  123. cb_param = (struct msm_gpi_dma_async_tx_cb_param *)cb;
  124. if (!cb_param) {
  125. pr_err("%s Err Invalid CB\n", __func__);
  126. return;
  127. }
  128. q2spi_pkt = cb_param->userdata;
  129. if (!q2spi_pkt) {
  130. pr_err("%s Err Invalid packet\n", __func__);
  131. return;
  132. }
  133. q2spi = q2spi_pkt->q2spi;
  134. if (!q2spi) {
  135. pr_err("%s Err Invalid q2spi\n", __func__);
  136. return;
  137. }
  138. if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
  139. Q2SPI_ERROR(q2spi, "%s Err cb_status:%d\n", __func__, cb_param->status);
  140. return;
  141. }
  142. if (cb_param->completion_code == MSM_GPI_TCE_UNEXP_ERR) {
  143. Q2SPI_ERROR(q2spi, "%s Err MSM_GPI_TCE_UNEXP_ERR\n", __func__);
  144. return;
  145. } else if (cb_param->completion_code == MSM_GPI_TCE_EOT) {
  146. Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
  147. if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) {
  148. /* CR header */
  149. Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__);
  150. q2spi_rx_xfer_completion_event(cb_param);
  151. }
  152. } else {
  153. Q2SPI_DEBUG(q2spi, "%s: Err cb_param->completion_code = %d\n",
  154. __func__, cb_param->completion_code);
  155. }
  156. Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
  157. }
  158. static void q2spi_geni_deallocate_chan(struct q2spi_gsi *gsi)
  159. {
  160. dma_release_channel(gsi->rx_c);
  161. dma_release_channel(gsi->tx_c);
  162. gsi->tx_c = NULL;
  163. gsi->rx_c = NULL;
  164. }
  165. /**
  166. * q2spi_geni_gsi_release - Releases GSI channel resources
  167. *
  168. * @q2spi: pointer to q2spi_geni driver data
  169. *
  170. * Return: None
  171. */
  172. void q2spi_geni_gsi_release(struct q2spi_geni *q2spi)
  173. {
  174. q2spi_geni_deallocate_chan(q2spi->gsi);
  175. kfree(q2spi->gsi);
  176. }
  177. /**
  178. * q2spi_geni_gsi_setup - Does setup of GSI channel resources
  179. *
  180. * @q2spi: pointer to q2spi_geni driver data
  181. *
  182. * Return: 0 on success, linux error code on failure
  183. */
  184. int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi)
  185. {
  186. struct q2spi_gsi *gsi = NULL;
  187. int ret = 0;
  188. gsi = kzalloc(sizeof(struct q2spi_gsi), GFP_ATOMIC);
  189. if (!gsi) {
  190. Q2SPI_ERROR(q2spi, "%s Err GSI structure memory alloc failed\n", __func__);
  191. return -ENOMEM;
  192. }
  193. q2spi->gsi = gsi;
  194. Q2SPI_DEBUG(q2spi, "%s gsi:%p\n", __func__, gsi);
  195. if (gsi->chan_setup) {
  196. Q2SPI_ERROR(q2spi, "%s Err GSI channel already configured\n", __func__);
  197. return ret;
  198. }
  199. gsi->tx_c = dma_request_slave_channel(q2spi->dev, "tx");
  200. if (IS_ERR_OR_NULL(gsi->tx_c)) {
  201. Q2SPI_ERROR(q2spi, "%s Err Failed to get tx DMA ch %ld\n",
  202. __func__, PTR_ERR(gsi->tx_c));
  203. q2spi_kfree(q2spi, q2spi->gsi, __LINE__);
  204. return -EIO;
  205. }
  206. Q2SPI_DEBUG(q2spi, "%s gsi_tx_c:%p\n", __func__, gsi->tx_c);
  207. gsi->rx_c = dma_request_slave_channel(q2spi->dev, "rx");
  208. if (IS_ERR_OR_NULL(gsi->rx_c)) {
  209. Q2SPI_ERROR(q2spi, "%s Err Failed to get rx DMA ch %ld\n",
  210. __func__, PTR_ERR(gsi->rx_c));
  211. dma_release_channel(gsi->tx_c);
  212. gsi->tx_c = NULL;
  213. q2spi_kfree(q2spi, q2spi->gsi, __LINE__);
  214. return -EIO;
  215. }
  216. Q2SPI_DEBUG(q2spi, "%s gsi_rx_c:%p\n", __func__, gsi->rx_c);
  217. gsi->tx_ev.init.callback = q2spi_gsi_ch_ev_cb;
  218. gsi->tx_ev.init.cb_param = q2spi;
  219. gsi->tx_ev.cmd = MSM_GPI_INIT;
  220. gsi->tx_c->private = &gsi->tx_ev;
  221. ret = dmaengine_slave_config(gsi->tx_c, NULL);
  222. if (ret) {
  223. Q2SPI_ERROR(q2spi, "%s tx dma slave config ret :%d\n", __func__, ret);
  224. goto dmaengine_slave_config_fail;
  225. }
  226. gsi->rx_ev.init.callback = q2spi_gsi_ch_ev_cb;
  227. gsi->rx_ev.init.cb_param = q2spi;
  228. gsi->rx_ev.cmd = MSM_GPI_INIT;
  229. gsi->rx_c->private = &gsi->rx_ev;
  230. ret = dmaengine_slave_config(gsi->rx_c, NULL);
  231. if (ret) {
  232. Q2SPI_ERROR(q2spi, "%s rx dma slave config ret :%d\n", __func__, ret);
  233. goto dmaengine_slave_config_fail;
  234. }
  235. Q2SPI_DEBUG(q2spi, "%s q2spi:%p gsi:%p q2spi_gsi:%p\n", __func__, q2spi, gsi, q2spi->gsi);
  236. q2spi->gsi->chan_setup = true;
  237. return ret;
  238. dmaengine_slave_config_fail:
  239. q2spi_geni_gsi_release(q2spi);
  240. return ret;
  241. }
  242. static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_idx, int *clk_div)
  243. {
  244. unsigned long sclk_freq;
  245. unsigned long res_freq;
  246. struct geni_se *se = &q2spi->se;
  247. int ret = 0;
  248. Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid);
  249. ret = geni_se_clk_freq_match(&q2spi->se, (speed_hz * q2spi->oversampling),
  250. clk_idx, &sclk_freq, false);
  251. if (ret) {
  252. Q2SPI_ERROR(q2spi, "%s Err Failed(%d) to find src clk for 0x%x\n",
  253. __func__, ret, speed_hz);
  254. return ret;
  255. }
  256. *clk_div = DIV_ROUND_UP(sclk_freq, (q2spi->oversampling * speed_hz));
  257. if (!(*clk_div)) {
  258. Q2SPI_ERROR(q2spi, "%s Err sclk:%lu oversampling:%d speed:%u\n",
  259. __func__, sclk_freq, q2spi->oversampling, speed_hz);
  260. return -EINVAL;
  261. }
  262. res_freq = (sclk_freq / (*clk_div));
  263. Q2SPI_DEBUG(q2spi, "%s req %u resultant %lu sclk %lu, idx %d, div %d\n",
  264. __func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
  265. ret = clk_set_rate(se->clk, sclk_freq);
  266. if (ret) {
  267. Q2SPI_ERROR(q2spi, "%s Err clk_set_rate failed %d\n", __func__, ret);
  268. return ret;
  269. }
  270. Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
  271. return 0;
  272. }
  273. /* 3.10.2.8 Q2SPI */
  274. static struct msm_gpi_tre *setup_cfg0_tre(struct q2spi_geni *q2spi)
  275. {
  276. struct msm_gpi_tre *c0_tre = &q2spi->gsi->config0_tre;
  277. u8 word_len = 0;
  278. u8 cs_mode = 0;
  279. u8 intr_pol = 0;
  280. u8 pack = 0;
  281. u8 cs_clk_delay = SPI_CS_CLK_DLY;
  282. int div = 0;
  283. int ret = 0;
  284. int idx = 0;
  285. int tdn = S_GP_CNT5_TDN;
  286. int tsn = M_GP_CNT7_TSN;
  287. int tan = M_GP_CNT4_TAN;
  288. int ssn = S_GP_CNT7_SSN;
  289. int cn_delay = M_GP_CNT6_CN_DELAY;
  290. Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid);
  291. ret = get_q2spi_clk_cfg(q2spi->cur_speed_hz, q2spi, &idx, &div);
  292. if (ret) {
  293. Q2SPI_ERROR(q2spi, "%s Err setting clks:%d\n", __func__, ret);
  294. return ERR_PTR(ret);
  295. }
  296. word_len = MIN_WORD_LEN;
  297. pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
  298. cs_mode = CS_LESS_MODE;
  299. intr_pol = INTR_HIGH_POLARITY;
  300. Q2SPI_DEBUG(q2spi, "%s cs_mode 0x%x word %d pack %d idx %d div %d\n",
  301. __func__, cs_mode, word_len, pack, idx, div);
  302. /* config0 */
  303. c0_tre->dword[0] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD0(tsn, pack, tdn, cs_mode,
  304. intr_pol, word_len);
  305. c0_tre->dword[1] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD1(tan, cs_clk_delay, ssn);
  306. c0_tre->dword[2] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD2(cn_delay, idx, div);
  307. c0_tre->dword[3] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1);
  308. Q2SPI_DEBUG(q2spi, "%s c0_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n",
  309. __func__, c0_tre->dword[0], c0_tre->dword[1],
  310. c0_tre->dword[2], c0_tre->dword[3]);
  311. q2spi->setup_config0 = true;
  312. return c0_tre;
  313. }
  314. /* 3.10.4.9 Q2SPI */
  315. static struct
  316. msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags, struct q2spi_geni *q2spi)
  317. {
  318. struct msm_gpi_tre *go_tre = &q2spi->gsi->go_tre;
  319. int chain = 0;
  320. int eot = 0;
  321. int eob = 0;
  322. int link_rx = 0;
  323. if (IS_ERR_OR_NULL(go_tre))
  324. return go_tre;
  325. go_tre->dword[0] = MSM_GPI_Q2SPI_GO_TRE_DWORD0(flags, cs, cmd);
  326. go_tre->dword[1] = MSM_GPI_Q2SPI_GO_TRE_DWORD1;
  327. go_tre->dword[2] = MSM_GPI_Q2SPI_GO_TRE_DWORD2(rx_len);
  328. if (cmd == Q2SPI_RX_ONLY) {
  329. eot = 0;
  330. eob = 0;
  331. /* GO TRE on RX: processing needed check this */
  332. chain = 0;
  333. link_rx = 1;
  334. } else if (cmd == Q2SPI_TX_ONLY) {
  335. eot = 0;
  336. /* GO TRE on TX: processing needed check this */
  337. eob = 0;
  338. chain = 1;
  339. } else if (cmd == Q2SPI_TX_RX) {
  340. eot = 0;
  341. eob = 0;
  342. chain = 1;
  343. link_rx = 1;
  344. }
  345. go_tre->dword[3] = MSM_GPI_Q2SPI_GO_TRE_DWORD3(link_rx, 0, eot, eob, chain);
  346. Q2SPI_DEBUG(q2spi, "%s rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
  347. __func__, rx_len, flags, cs, cmd, eot, eob, chain);
  348. if (cmd == Q2SPI_RX_ONLY)
  349. Q2SPI_DEBUG(q2spi, "%s Q2SPI_RX_ONLY\n", __func__);
  350. else if (cmd == Q2SPI_TX_ONLY)
  351. Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_ONLY\n", __func__);
  352. else if (cmd == Q2SPI_TX_RX)
  353. Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_RX_ONLY\n", __func__);
  354. Q2SPI_DEBUG(q2spi, "%s go_tre dword[0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n",
  355. __func__, go_tre->dword[0], go_tre->dword[1], go_tre->dword[2],
  356. go_tre->dword[3]);
  357. return go_tre;
  358. }
  359. /*3.10.5 DMA TRE */
  360. static struct
  361. msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre, dma_addr_t buf, u32 len,
  362. struct q2spi_geni *q2spi, bool is_tx)
  363. {
  364. if (IS_ERR_OR_NULL(tre))
  365. return tre;
  366. tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf);
  367. tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
  368. tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
  369. tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, is_tx, 0, 0);
  370. Q2SPI_DEBUG(q2spi, "%s dma_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n",
  371. __func__, tre->dword[0], tre->dword[1],
  372. tre->dword[2], tre->dword[3]);
  373. return tre;
  374. }
  375. int check_gsi_transfer_completion_db_rx(struct q2spi_geni *q2spi)
  376. {
  377. int i = 0, ret = 0;
  378. unsigned long timeout = 0, xfer_timeout = 0;
  379. xfer_timeout = XFER_TIMEOUT_OFFSET;
  380. timeout = wait_for_completion_timeout(&q2spi->db_rx_cb, msecs_to_jiffies(xfer_timeout));
  381. if (!timeout) {
  382. Q2SPI_ERROR(q2spi, "%s Rx[%d] timeout%lu\n", __func__, i, timeout);
  383. ret = -ETIMEDOUT;
  384. goto err_gsi_geni_transfer;
  385. } else {
  386. Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__);
  387. }
  388. err_gsi_geni_transfer:
  389. return ret;
  390. }
  391. int check_gsi_transfer_completion(struct q2spi_geni *q2spi)
  392. {
  393. int i = 0, ret = 0;
  394. unsigned long timeout = 0, xfer_timeout = 0;
  395. xfer_timeout = XFER_TIMEOUT_OFFSET;
  396. Q2SPI_DEBUG(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__,
  397. q2spi->gsi->num_tx_eot, q2spi->gsi->num_rx_eot);
  398. for (i = 0 ; i < q2spi->gsi->num_tx_eot; i++) {
  399. timeout =
  400. wait_for_completion_timeout(&q2spi->tx_cb, msecs_to_jiffies(xfer_timeout));
  401. if (!timeout) {
  402. Q2SPI_ERROR(q2spi, "%s PID:%d Tx[%d] timeout\n", __func__, current->pid, i);
  403. ret = -ETIMEDOUT;
  404. goto err_gsi_geni_transfer;
  405. } else {
  406. Q2SPI_DEBUG(q2spi, "%s tx completed\n", __func__);
  407. }
  408. }
  409. for (i = 0 ; i < q2spi->gsi->num_rx_eot; i++) {
  410. timeout =
  411. wait_for_completion_timeout(&q2spi->rx_cb, msecs_to_jiffies(xfer_timeout));
  412. if (!timeout) {
  413. Q2SPI_ERROR(q2spi, "%s PID:%d Rx[%d] timeout\n", __func__, current->pid, i);
  414. ret = -ETIMEDOUT;
  415. goto err_gsi_geni_transfer;
  416. } else {
  417. Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__);
  418. }
  419. }
  420. err_gsi_geni_transfer:
  421. if (q2spi->gsi->qup_gsi_err || !timeout) {
  422. ret = -ETIMEDOUT;
  423. Q2SPI_ERROR(q2spi, "%s Err QUP Gsi Error\n", __func__);
  424. q2spi->gsi->qup_gsi_err = false;
  425. q2spi->setup_config0 = false;
  426. dmaengine_terminate_all(q2spi->gsi->tx_c);
  427. }
  428. return ret;
  429. }
  430. int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
  431. {
  432. struct msm_gpi_tre *c0_tre = NULL;
  433. struct msm_gpi_tre *go_tre = NULL;
  434. struct msm_gpi_tre *tx_tre = NULL;
  435. struct msm_gpi_tre *rx_tre = NULL;
  436. struct scatterlist *xfer_tx_sg;
  437. struct scatterlist *xfer_rx_sg;
  438. u8 cs = 0;
  439. u32 tx_rx_len = 0;
  440. int rx_nent = 0;
  441. int tx_nent = 0;
  442. int go_flags = 0;
  443. unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  444. struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
  445. struct q2spi_dma_transfer *xfer;
  446. u8 cmd;
  447. if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY)
  448. xfer = q2spi->db_xfer;
  449. else
  450. xfer = q2spi_pkt->xfer;
  451. cmd = xfer->cmd;
  452. Q2SPI_DEBUG(q2spi, "%s PID=%d xfer:%p vtype=%d\n", __func__,
  453. current->pid, xfer, q2spi_pkt->vtype);
  454. Q2SPI_DEBUG(q2spi, "%s cmd:%d q2spi_pkt:%p\n", __func__, cmd, q2spi_pkt);
  455. q2spi->gsi->num_tx_eot = 0;
  456. q2spi->gsi->num_rx_eot = 0;
  457. q2spi->gsi->qup_gsi_err = false;
  458. xfer_tx_sg = q2spi->gsi->tx_sg;
  459. xfer_rx_sg = q2spi->gsi->rx_sg;
  460. c0_tre = &q2spi->gsi->config0_tre;
  461. go_tre = &q2spi->gsi->go_tre;
  462. tx_nent++;
  463. if (!q2spi->setup_config0) {
  464. c0_tre = setup_cfg0_tre(q2spi);
  465. if (IS_ERR_OR_NULL(c0_tre)) {
  466. Q2SPI_DEBUG(q2spi, "%s Err setting c0_tre", __func__);
  467. return -EINVAL;
  468. }
  469. }
  470. if (cmd == Q2SPI_TX_ONLY)
  471. tx_rx_len = xfer->tx_data_len;
  472. else
  473. tx_rx_len = xfer->rx_data_len;
  474. go_flags |= Q2SPI_CMD;
  475. go_flags |= (SINGLE_SDR_MODE << Q2SPI_MODE_SHIFT) & Q2SPI_MODE;
  476. go_tre = setup_go_tre(cmd, cs, tx_rx_len, go_flags, q2spi);
  477. if (IS_ERR_OR_NULL(go_tre)) {
  478. Q2SPI_DEBUG(q2spi, "%s Err setting g0_tre", __func__);
  479. return -EINVAL;
  480. }
  481. if (cmd == Q2SPI_TX_ONLY) {
  482. tx_nent += 2;
  483. } else if (cmd == Q2SPI_RX_ONLY) {
  484. tx_nent++;
  485. rx_nent++;
  486. } else if (cmd == Q2SPI_TX_RX) {
  487. tx_nent += 2;
  488. rx_nent++;
  489. }
  490. Q2SPI_DEBUG(q2spi, "%s tx_nent:%d rx_nent:%d\n", __func__, tx_nent, rx_nent);
  491. sg_init_table(xfer_tx_sg, tx_nent);
  492. if (rx_nent)
  493. sg_init_table(xfer_rx_sg, rx_nent);
  494. if (c0_tre)
  495. sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre));
  496. sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre));
  497. tx_tre = &q2spi->gsi->tx_dma_tre;
  498. tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->tx_len, q2spi, 1);
  499. if (IS_ERR_OR_NULL(tx_tre)) {
  500. Q2SPI_ERROR(q2spi, "%s Err setting up tx tre\n", __func__);
  501. return -EINVAL;
  502. }
  503. sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre));
  504. q2spi->gsi->num_tx_eot++;
  505. q2spi->gsi->tx_desc = dmaengine_prep_slave_sg(q2spi->gsi->tx_c, q2spi->gsi->tx_sg, tx_nent,
  506. DMA_MEM_TO_DEV, flags);
  507. if (IS_ERR_OR_NULL(q2spi->gsi->tx_desc)) {
  508. Q2SPI_ERROR(q2spi, "%s Err setting up tx desc\n", __func__);
  509. return -EIO;
  510. }
  511. q2spi->gsi->tx_ev.init.cb_param = q2spi_pkt;
  512. q2spi->gsi->tx_desc->callback = q2spi_gsi_tx_callback;
  513. q2spi->gsi->tx_desc->callback_param = &q2spi->gsi->tx_cb_param;
  514. q2spi->gsi->tx_cb_param.userdata = q2spi_pkt;
  515. q2spi->gsi->tx_cookie = dmaengine_submit(q2spi->gsi->tx_desc);
  516. Q2SPI_DEBUG(q2spi, "%s Tx cb_param:%p\n", __func__, q2spi->gsi->tx_desc->callback_param);
  517. if (dma_submit_error(q2spi->gsi->tx_cookie)) {
  518. Q2SPI_ERROR(q2spi, "%s Err dmaengine_submit failed (%d)\n",
  519. __func__, q2spi->gsi->tx_cookie);
  520. dmaengine_terminate_all(q2spi->gsi->tx_c);
  521. return -EINVAL;
  522. }
  523. if (cmd == Q2SPI_TX_RX) {
  524. rx_tre = &q2spi->gsi->rx_dma_tre;
  525. rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->rx_len, q2spi, 1);
  526. if (IS_ERR_OR_NULL(rx_tre)) {
  527. Q2SPI_ERROR(q2spi, "%s Err setting up rx tre\n", __func__);
  528. return -EINVAL;
  529. }
  530. sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
  531. q2spi->gsi->rx_desc = dmaengine_prep_slave_sg(q2spi->gsi->rx_c, q2spi->gsi->rx_sg,
  532. rx_nent, DMA_DEV_TO_MEM, flags);
  533. if (IS_ERR_OR_NULL(q2spi->gsi->rx_desc)) {
  534. Q2SPI_ERROR(q2spi, "%s rx_desc fail\n", __func__);
  535. return -EIO;
  536. }
  537. q2spi->gsi->rx_ev.init.cb_param = q2spi_pkt;
  538. q2spi->gsi->rx_desc->callback = q2spi_gsi_rx_callback;
  539. q2spi->gsi->rx_desc->callback_param = &q2spi->gsi->rx_cb_param;
  540. q2spi->gsi->rx_cb_param.userdata = q2spi_pkt;
  541. q2spi->gsi->num_rx_eot++;
  542. q2spi->gsi->rx_cookie = dmaengine_submit(q2spi->gsi->rx_desc);
  543. Q2SPI_DEBUG(q2spi, "%s Rx cb_param:%p\n", __func__,
  544. q2spi->gsi->rx_desc->callback_param);
  545. if (dma_submit_error(q2spi->gsi->rx_cookie)) {
  546. Q2SPI_ERROR(q2spi, "%s Err dmaengine_submit failed (%d)\n",
  547. __func__, q2spi->gsi->rx_cookie);
  548. dmaengine_terminate_all(q2spi->gsi->rx_c);
  549. return -EINVAL;
  550. }
  551. } else if (cmd == Q2SPI_RX_ONLY) {
  552. rx_tre = &q2spi->gsi->rx_dma_tre;
  553. rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->rx_len, q2spi, 1);
  554. if (IS_ERR_OR_NULL(rx_tre)) {
  555. Q2SPI_ERROR(q2spi, "%s Err setting up rx tre\n", __func__);
  556. return -EINVAL;
  557. }
  558. sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
  559. q2spi->gsi->db_rx_desc = dmaengine_prep_slave_sg(q2spi->gsi->rx_c,
  560. q2spi->gsi->rx_sg,
  561. rx_nent, DMA_DEV_TO_MEM, flags);
  562. if (IS_ERR_OR_NULL(q2spi->gsi->db_rx_desc)) {
  563. Q2SPI_ERROR(q2spi, "%s Err db_rx_desc fail\n", __func__);
  564. return -EIO;
  565. }
  566. q2spi->gsi->db_rx_desc->callback = q2spi_gsi_rx_callback;
  567. q2spi->gsi->db_rx_desc->callback_param = &q2spi->gsi->db_rx_cb_param;
  568. q2spi->gsi->db_rx_cb_param.userdata = q2spi_pkt;
  569. q2spi->gsi->num_rx_eot++;
  570. q2spi->gsi->rx_cookie = dmaengine_submit(q2spi->gsi->db_rx_desc);
  571. Q2SPI_DEBUG(q2spi, "%s DB cb_param:%p\n", __func__,
  572. q2spi->gsi->db_rx_desc->callback_param);
  573. if (dma_submit_error(q2spi->gsi->rx_cookie)) {
  574. Q2SPI_ERROR(q2spi, "%s Err dmaengine_submit failed (%d)\n",
  575. __func__, q2spi->gsi->rx_cookie);
  576. dmaengine_terminate_all(q2spi->gsi->rx_c);
  577. return -EINVAL;
  578. }
  579. }
  580. if (cmd & Q2SPI_RX_ONLY) {
  581. Q2SPI_DEBUG(q2spi, "%s rx_c dma_async_issue_pending\n", __func__);
  582. q2spi_dump_ipc(q2spi, q2spi->ipc, "GSI DMA-RX", (char *)xfer->rx_buf, tx_rx_len);
  583. if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY)
  584. reinit_completion(&q2spi->db_rx_cb);
  585. else
  586. reinit_completion(&q2spi->rx_cb);
  587. dma_async_issue_pending(q2spi->gsi->rx_c);
  588. }
  589. if (cmd & Q2SPI_TX_ONLY)
  590. q2spi_dump_ipc(q2spi, q2spi->ipc, "GSI DMA TX", (char *)xfer->tx_buf,
  591. Q2SPI_HEADER_LEN + tx_rx_len);
  592. Q2SPI_DEBUG(q2spi, "%s tx_c dma_async_issue_pending\n", __func__);
  593. reinit_completion(&q2spi->tx_cb);
  594. dma_async_issue_pending(q2spi->gsi->tx_c);
  595. Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
  596. return 0;
  597. }
  598. void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *ptr)
  599. {
  600. const struct qup_q2spi_cr_header_event *q2spi_cr_hdr_event;
  601. struct q2spi_geni *q2spi = ptr;
  602. int num_crs, i = 0;
  603. Q2SPI_DEBUG(q2spi, "%s event:%d\n", __func__, cb->cb_event);
  604. switch (cb->cb_event) {
  605. case MSM_GPI_QUP_NOTIFY:
  606. case MSM_GPI_QUP_MAX_EVENT:
  607. Q2SPI_DEBUG(q2spi, "%s:cb_ev%d status%llu ts%llu count%llu\n",
  608. __func__, cb->cb_event, cb->status, cb->timestamp, cb->count);
  609. break;
  610. case MSM_GPI_QUP_ERROR:
  611. case MSM_GPI_QUP_CH_ERROR:
  612. case MSM_GPI_QUP_FW_ERROR:
  613. case MSM_GPI_QUP_PENDING_EVENT:
  614. case MSM_GPI_QUP_EOT_DESC_MISMATCH:
  615. case MSM_GPI_QUP_SW_ERROR:
  616. Q2SPI_ERROR(q2spi, "%s cb_ev %d status %llu ts %llu count %llu\n",
  617. __func__, cb->cb_event, cb->status,
  618. cb->timestamp, cb->count);
  619. Q2SPI_ERROR(q2spi, "%s err_routine:%u err_type:%u err.code%u\n",
  620. __func__, cb->error_log.routine, cb->error_log.type,
  621. cb->error_log.error_code);
  622. q2spi->gsi->qup_gsi_err = true;
  623. complete_all(&q2spi->tx_cb);
  624. complete_all(&q2spi->rx_cb);
  625. break;
  626. case MSM_GPI_QUP_CR_HEADER:
  627. q2spi_cr_hdr_event = &cb->q2spi_cr_header_event;
  628. num_crs = q2spi_cr_hdr_event->byte0_len;
  629. for (i = 0; i < num_crs; i++) {
  630. if (q2spi_cr_hdr_event->cr_hdr[i] == CR_ADDR_LESS_RD) {
  631. reinit_completion(&q2spi->sma_rd_comp);
  632. atomic_inc(&q2spi->doorbell_pending);
  633. if (!atomic_read(&q2spi->sma_wr_pending))
  634. atomic_set(&q2spi->sma_rd_pending, 1);
  635. }
  636. if (q2spi_cr_hdr_event->cr_hdr[i] == CR_BULK_ACCESS_STATUS)
  637. atomic_dec(&q2spi->doorbell_pending);
  638. if (q2spi_cr_hdr_event->cr_hdr[i] == CR_ADDR_LESS_WR) {
  639. if (!atomic_read(&q2spi->sma_rd_pending))
  640. atomic_set(&q2spi->sma_wr_pending, 1);
  641. }
  642. }
  643. Q2SPI_DEBUG(q2spi, "%s GSI doorbell event, db_pending:%d\n",
  644. __func__, atomic_read(&q2spi->doorbell_pending));
  645. q2spi_parse_cr_header(q2spi, cb);
  646. break;
  647. default:
  648. break;
  649. }
  650. if (cb->cb_event == MSM_GPI_QUP_FW_ERROR) {
  651. q2spi_geni_se_dump_regs(q2spi);
  652. Q2SPI_ERROR(q2spi, "%s dump GSI regs\n", __func__);
  653. }
  654. }