spi-geni-qcom.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
  3. #include <linux/clk.h>
  4. #include <linux/dmaengine.h>
  5. #include <linux/dma-mapping.h>
  6. #include <linux/dma/qcom-gpi-dma.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/io.h>
  9. #include <linux/log2.h>
  10. #include <linux/module.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/pm_opp.h>
  13. #include <linux/pm_runtime.h>
  14. #include <linux/soc/qcom/geni-se.h>
  15. #include <linux/spi/spi.h>
  16. #include <linux/spinlock.h>
  17. /* SPI SE specific registers and respective register fields */
  18. #define SE_SPI_CPHA 0x224
  19. #define CPHA BIT(0)
  20. #define SE_SPI_LOOPBACK 0x22c
  21. #define LOOPBACK_ENABLE 0x1
  22. #define NORMAL_MODE 0x0
  23. #define LOOPBACK_MSK GENMASK(1, 0)
  24. #define SE_SPI_CPOL 0x230
  25. #define CPOL BIT(2)
  26. #define SE_SPI_DEMUX_OUTPUT_INV 0x24c
  27. #define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
  28. #define SE_SPI_DEMUX_SEL 0x250
  29. #define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
  30. #define SE_SPI_TRANS_CFG 0x25c
  31. #define CS_TOGGLE BIT(1)
  32. #define SE_SPI_WORD_LEN 0x268
  33. #define WORD_LEN_MSK GENMASK(9, 0)
  34. #define MIN_WORD_LEN 4
  35. #define SE_SPI_TX_TRANS_LEN 0x26c
  36. #define SE_SPI_RX_TRANS_LEN 0x270
  37. #define TRANS_LEN_MSK GENMASK(23, 0)
  38. #define SE_SPI_PRE_POST_CMD_DLY 0x274
  39. #define SE_SPI_DELAY_COUNTERS 0x278
  40. #define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0)
  41. #define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
  42. #define SPI_CS_CLK_DELAY_SHFT 10
  43. /* M_CMD OP codes for SPI */
  44. #define SPI_TX_ONLY 1
  45. #define SPI_RX_ONLY 2
  46. #define SPI_TX_RX 7
  47. #define SPI_CS_ASSERT 8
  48. #define SPI_CS_DEASSERT 9
  49. #define SPI_SCK_ONLY 10
  50. /* M_CMD params for SPI */
  51. #define SPI_PRE_CMD_DELAY BIT(0)
  52. #define TIMESTAMP_BEFORE BIT(1)
  53. #define FRAGMENTATION BIT(2)
  54. #define TIMESTAMP_AFTER BIT(3)
  55. #define POST_CMD_DELAY BIT(4)
  56. #define GSI_LOOPBACK_EN BIT(0)
  57. #define GSI_CS_TOGGLE BIT(3)
  58. #define GSI_CPHA BIT(4)
  59. #define GSI_CPOL BIT(5)
  60. struct spi_geni_master {
  61. struct geni_se se;
  62. struct device *dev;
  63. u32 tx_fifo_depth;
  64. u32 fifo_width_bits;
  65. u32 tx_wm;
  66. u32 last_mode;
  67. unsigned long cur_speed_hz;
  68. unsigned long cur_sclk_hz;
  69. unsigned int cur_bits_per_word;
  70. unsigned int tx_rem_bytes;
  71. unsigned int rx_rem_bytes;
  72. const struct spi_transfer *cur_xfer;
  73. struct completion cs_done;
  74. struct completion cancel_done;
  75. struct completion abort_done;
  76. unsigned int oversampling;
  77. spinlock_t lock;
  78. int irq;
  79. bool cs_flag;
  80. bool abort_failed;
  81. struct dma_chan *tx;
  82. struct dma_chan *rx;
  83. int cur_xfer_mode;
  84. };
  85. static int get_spi_clk_cfg(unsigned int speed_hz,
  86. struct spi_geni_master *mas,
  87. unsigned int *clk_idx,
  88. unsigned int *clk_div)
  89. {
  90. unsigned long sclk_freq;
  91. unsigned int actual_hz;
  92. int ret;
  93. ret = geni_se_clk_freq_match(&mas->se,
  94. speed_hz * mas->oversampling,
  95. clk_idx, &sclk_freq, false);
  96. if (ret) {
  97. dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
  98. ret, speed_hz);
  99. return ret;
  100. }
  101. *clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
  102. actual_hz = sclk_freq / (mas->oversampling * *clk_div);
  103. dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
  104. actual_hz, sclk_freq, *clk_idx, *clk_div);
  105. ret = dev_pm_opp_set_rate(mas->dev, sclk_freq);
  106. if (ret)
  107. dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n", ret);
  108. else
  109. mas->cur_sclk_hz = sclk_freq;
  110. return ret;
  111. }
  112. static void handle_fifo_timeout(struct spi_master *spi,
  113. struct spi_message *msg)
  114. {
  115. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  116. unsigned long time_left;
  117. struct geni_se *se = &mas->se;
  118. spin_lock_irq(&mas->lock);
  119. reinit_completion(&mas->cancel_done);
  120. writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
  121. mas->cur_xfer = NULL;
  122. geni_se_cancel_m_cmd(se);
  123. spin_unlock_irq(&mas->lock);
  124. time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
  125. if (time_left)
  126. return;
  127. spin_lock_irq(&mas->lock);
  128. reinit_completion(&mas->abort_done);
  129. geni_se_abort_m_cmd(se);
  130. spin_unlock_irq(&mas->lock);
  131. time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
  132. if (!time_left) {
  133. dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
  134. /*
  135. * No need for a lock since SPI core has a lock and we never
  136. * access this from an interrupt.
  137. */
  138. mas->abort_failed = true;
  139. }
  140. }
  141. static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
  142. {
  143. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  144. dmaengine_terminate_sync(mas->tx);
  145. dmaengine_terminate_sync(mas->rx);
  146. }
  147. static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
  148. {
  149. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  150. switch (mas->cur_xfer_mode) {
  151. case GENI_SE_FIFO:
  152. handle_fifo_timeout(spi, msg);
  153. break;
  154. case GENI_GPI_DMA:
  155. handle_gpi_timeout(spi, msg);
  156. break;
  157. default:
  158. dev_err(mas->dev, "Abort on Mode:%d not supported", mas->cur_xfer_mode);
  159. }
  160. }
  161. static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
  162. {
  163. struct geni_se *se = &mas->se;
  164. u32 m_irq, m_irq_en;
  165. if (!mas->abort_failed)
  166. return false;
  167. /*
  168. * The only known case where a transfer times out and then a cancel
  169. * times out then an abort times out is if something is blocking our
  170. * interrupt handler from running. Avoid starting any new transfers
  171. * until that sorts itself out.
  172. */
  173. spin_lock_irq(&mas->lock);
  174. m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
  175. m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
  176. spin_unlock_irq(&mas->lock);
  177. if (m_irq & m_irq_en) {
  178. dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
  179. m_irq & m_irq_en);
  180. return true;
  181. }
  182. /*
  183. * If we're here the problem resolved itself so no need to check more
  184. * on future transfers.
  185. */
  186. mas->abort_failed = false;
  187. return false;
  188. }
  189. static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
  190. {
  191. struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
  192. struct spi_master *spi = dev_get_drvdata(mas->dev);
  193. struct geni_se *se = &mas->se;
  194. unsigned long time_left;
  195. if (!(slv->mode & SPI_CS_HIGH))
  196. set_flag = !set_flag;
  197. if (set_flag == mas->cs_flag)
  198. return;
  199. pm_runtime_get_sync(mas->dev);
  200. if (spi_geni_is_abort_still_pending(mas)) {
  201. dev_err(mas->dev, "Can't set chip select\n");
  202. goto exit;
  203. }
  204. spin_lock_irq(&mas->lock);
  205. if (mas->cur_xfer) {
  206. dev_err(mas->dev, "Can't set CS when prev xfer running\n");
  207. spin_unlock_irq(&mas->lock);
  208. goto exit;
  209. }
  210. mas->cs_flag = set_flag;
  211. reinit_completion(&mas->cs_done);
  212. if (set_flag)
  213. geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
  214. else
  215. geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
  216. spin_unlock_irq(&mas->lock);
  217. time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
  218. if (!time_left) {
  219. dev_warn(mas->dev, "Timeout setting chip select\n");
  220. handle_fifo_timeout(spi, NULL);
  221. }
  222. exit:
  223. pm_runtime_put(mas->dev);
  224. }
  225. static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
  226. unsigned int bits_per_word)
  227. {
  228. unsigned int pack_words;
  229. bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
  230. struct geni_se *se = &mas->se;
  231. u32 word_len;
  232. /*
  233. * If bits_per_word isn't a byte aligned value, set the packing to be
  234. * 1 SPI word per FIFO word.
  235. */
  236. if (!(mas->fifo_width_bits % bits_per_word))
  237. pack_words = mas->fifo_width_bits / bits_per_word;
  238. else
  239. pack_words = 1;
  240. geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
  241. true, true);
  242. word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK;
  243. writel(word_len, se->base + SE_SPI_WORD_LEN);
  244. }
  245. static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
  246. unsigned long clk_hz)
  247. {
  248. u32 clk_sel, m_clk_cfg, idx, div;
  249. struct geni_se *se = &mas->se;
  250. int ret;
  251. if (clk_hz == mas->cur_speed_hz)
  252. return 0;
  253. ret = get_spi_clk_cfg(clk_hz, mas, &idx, &div);
  254. if (ret) {
  255. dev_err(mas->dev, "Err setting clk to %lu: %d\n", clk_hz, ret);
  256. return ret;
  257. }
  258. /*
  259. * SPI core clock gets configured with the requested frequency
  260. * or the frequency closer to the requested frequency.
  261. * For that reason requested frequency is stored in the
  262. * cur_speed_hz and referred in the consecutive transfer instead
  263. * of calling clk_get_rate() API.
  264. */
  265. mas->cur_speed_hz = clk_hz;
  266. clk_sel = idx & CLK_SEL_MSK;
  267. m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
  268. writel(clk_sel, se->base + SE_GENI_CLK_SEL);
  269. writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
  270. /* Set BW quota for CPU as driver supports FIFO mode only. */
  271. se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz);
  272. ret = geni_icc_set_bw(se);
  273. if (ret)
  274. return ret;
  275. return 0;
  276. }
  277. static int setup_fifo_params(struct spi_device *spi_slv,
  278. struct spi_master *spi)
  279. {
  280. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  281. struct geni_se *se = &mas->se;
  282. u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
  283. u32 demux_sel;
  284. if (mas->last_mode != spi_slv->mode) {
  285. if (spi_slv->mode & SPI_LOOP)
  286. loopback_cfg = LOOPBACK_ENABLE;
  287. if (spi_slv->mode & SPI_CPOL)
  288. cpol = CPOL;
  289. if (spi_slv->mode & SPI_CPHA)
  290. cpha = CPHA;
  291. if (spi_slv->mode & SPI_CS_HIGH)
  292. demux_output_inv = BIT(spi_slv->chip_select);
  293. demux_sel = spi_slv->chip_select;
  294. mas->cur_bits_per_word = spi_slv->bits_per_word;
  295. spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
  296. writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
  297. writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
  298. writel(cpha, se->base + SE_SPI_CPHA);
  299. writel(cpol, se->base + SE_SPI_CPOL);
  300. writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
  301. mas->last_mode = spi_slv->mode;
  302. }
  303. return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
  304. }
  305. static void
  306. spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
  307. {
  308. struct spi_master *spi = cb;
  309. spi->cur_msg->status = -EIO;
  310. if (result->result != DMA_TRANS_NOERROR) {
  311. dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
  312. spi_finalize_current_transfer(spi);
  313. return;
  314. }
  315. if (!result->residue) {
  316. spi->cur_msg->status = 0;
  317. dev_dbg(&spi->dev, "DMA txn completed\n");
  318. } else {
  319. dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
  320. }
  321. spi_finalize_current_transfer(spi);
  322. }
  323. static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
  324. struct spi_device *spi_slv, struct spi_master *spi)
  325. {
  326. unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  327. struct dma_slave_config config = {};
  328. struct gpi_spi_config peripheral = {};
  329. struct dma_async_tx_descriptor *tx_desc, *rx_desc;
  330. int ret;
  331. config.peripheral_config = &peripheral;
  332. config.peripheral_size = sizeof(peripheral);
  333. peripheral.set_config = true;
  334. if (xfer->bits_per_word != mas->cur_bits_per_word ||
  335. xfer->speed_hz != mas->cur_speed_hz) {
  336. mas->cur_bits_per_word = xfer->bits_per_word;
  337. mas->cur_speed_hz = xfer->speed_hz;
  338. }
  339. if (xfer->tx_buf && xfer->rx_buf) {
  340. peripheral.cmd = SPI_DUPLEX;
  341. } else if (xfer->tx_buf) {
  342. peripheral.cmd = SPI_TX;
  343. peripheral.rx_len = 0;
  344. } else if (xfer->rx_buf) {
  345. peripheral.cmd = SPI_RX;
  346. if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
  347. peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
  348. } else {
  349. int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
  350. peripheral.rx_len = (xfer->len / bytes_per_word);
  351. }
  352. }
  353. peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP);
  354. peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL);
  355. peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA);
  356. peripheral.cs = spi_slv->chip_select;
  357. peripheral.pack_en = true;
  358. peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
  359. ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
  360. &peripheral.clk_src, &peripheral.clk_div);
  361. if (ret) {
  362. dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret);
  363. return ret;
  364. }
  365. if (!xfer->cs_change) {
  366. if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
  367. peripheral.fragmentation = FRAGMENTATION;
  368. }
  369. if (peripheral.cmd & SPI_RX) {
  370. dmaengine_slave_config(mas->rx, &config);
  371. rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
  372. DMA_DEV_TO_MEM, flags);
  373. if (!rx_desc) {
  374. dev_err(mas->dev, "Err setting up rx desc\n");
  375. return -EIO;
  376. }
  377. }
  378. /*
  379. * Prepare the TX always, even for RX or tx_buf being null, we would
  380. * need TX to be prepared per GSI spec
  381. */
  382. dmaengine_slave_config(mas->tx, &config);
  383. tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents,
  384. DMA_MEM_TO_DEV, flags);
  385. if (!tx_desc) {
  386. dev_err(mas->dev, "Err setting up tx desc\n");
  387. return -EIO;
  388. }
  389. tx_desc->callback_result = spi_gsi_callback_result;
  390. tx_desc->callback_param = spi;
  391. if (peripheral.cmd & SPI_RX)
  392. dmaengine_submit(rx_desc);
  393. dmaengine_submit(tx_desc);
  394. if (peripheral.cmd & SPI_RX)
  395. dma_async_issue_pending(mas->rx);
  396. dma_async_issue_pending(mas->tx);
  397. return 1;
  398. }
  399. static bool geni_can_dma(struct spi_controller *ctlr,
  400. struct spi_device *slv, struct spi_transfer *xfer)
  401. {
  402. struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
  403. /* check if dma is supported */
  404. return mas->cur_xfer_mode != GENI_SE_FIFO;
  405. }
  406. static int spi_geni_prepare_message(struct spi_master *spi,
  407. struct spi_message *spi_msg)
  408. {
  409. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  410. int ret;
  411. switch (mas->cur_xfer_mode) {
  412. case GENI_SE_FIFO:
  413. if (spi_geni_is_abort_still_pending(mas))
  414. return -EBUSY;
  415. ret = setup_fifo_params(spi_msg->spi, spi);
  416. if (ret)
  417. dev_err(mas->dev, "Couldn't select mode %d\n", ret);
  418. return ret;
  419. case GENI_GPI_DMA:
  420. /* nothing to do for GPI DMA */
  421. return 0;
  422. }
  423. dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode);
  424. return -EINVAL;
  425. }
  426. static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
  427. {
  428. int ret;
  429. mas->tx = dma_request_chan(mas->dev, "tx");
  430. if (IS_ERR(mas->tx)) {
  431. ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
  432. "Failed to get tx DMA ch\n");
  433. goto err_tx;
  434. }
  435. mas->rx = dma_request_chan(mas->dev, "rx");
  436. if (IS_ERR(mas->rx)) {
  437. ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
  438. "Failed to get rx DMA ch\n");
  439. goto err_rx;
  440. }
  441. return 0;
  442. err_rx:
  443. mas->rx = NULL;
  444. dma_release_channel(mas->tx);
  445. err_tx:
  446. mas->tx = NULL;
  447. return ret;
  448. }
  449. static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
  450. {
  451. if (mas->rx) {
  452. dma_release_channel(mas->rx);
  453. mas->rx = NULL;
  454. }
  455. if (mas->tx) {
  456. dma_release_channel(mas->tx);
  457. mas->tx = NULL;
  458. }
  459. }
  460. static int spi_geni_init(struct spi_geni_master *mas)
  461. {
  462. struct geni_se *se = &mas->se;
  463. unsigned int proto, major, minor, ver;
  464. u32 spi_tx_cfg, fifo_disable;
  465. int ret = -ENXIO;
  466. pm_runtime_get_sync(mas->dev);
  467. proto = geni_se_read_proto(se);
  468. if (proto != GENI_SE_SPI) {
  469. dev_err(mas->dev, "Invalid proto %d\n", proto);
  470. goto out_pm;
  471. }
  472. mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
  473. /* Width of Tx and Rx FIFO is same */
  474. mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
  475. /*
  476. * Hardware programming guide suggests to configure
  477. * RX FIFO RFR level to fifo_depth-2.
  478. */
  479. geni_se_init(se, mas->tx_fifo_depth - 3, mas->tx_fifo_depth - 2);
  480. /* Transmit an entire FIFO worth of data per IRQ */
  481. mas->tx_wm = 1;
  482. ver = geni_se_get_qup_hw_version(se);
  483. major = GENI_SE_VERSION_MAJOR(ver);
  484. minor = GENI_SE_VERSION_MINOR(ver);
  485. if (major == 1 && minor == 0)
  486. mas->oversampling = 2;
  487. else
  488. mas->oversampling = 1;
  489. fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
  490. switch (fifo_disable) {
  491. case 1:
  492. ret = spi_geni_grab_gpi_chan(mas);
  493. if (!ret) { /* success case */
  494. mas->cur_xfer_mode = GENI_GPI_DMA;
  495. geni_se_select_mode(se, GENI_GPI_DMA);
  496. dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
  497. break;
  498. } else if (ret == -EPROBE_DEFER) {
  499. goto out_pm;
  500. }
  501. /*
  502. * in case of failure to get dma channel, we can still do the
  503. * FIFO mode, so fallthrough
  504. */
  505. dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
  506. fallthrough;
  507. case 0:
  508. mas->cur_xfer_mode = GENI_SE_FIFO;
  509. geni_se_select_mode(se, GENI_SE_FIFO);
  510. ret = 0;
  511. break;
  512. }
  513. /* We always control CS manually */
  514. spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
  515. spi_tx_cfg &= ~CS_TOGGLE;
  516. writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
  517. out_pm:
  518. pm_runtime_put(mas->dev);
  519. return ret;
  520. }
  521. static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
  522. {
  523. /*
  524. * Calculate how many bytes we'll put in each FIFO word. If the
  525. * transfer words don't pack cleanly into a FIFO word we'll just put
  526. * one transfer word in each FIFO word. If they do pack we'll pack 'em.
  527. */
  528. if (mas->fifo_width_bits % mas->cur_bits_per_word)
  529. return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
  530. BITS_PER_BYTE));
  531. return mas->fifo_width_bits / BITS_PER_BYTE;
  532. }
  533. static bool geni_spi_handle_tx(struct spi_geni_master *mas)
  534. {
  535. struct geni_se *se = &mas->se;
  536. unsigned int max_bytes;
  537. const u8 *tx_buf;
  538. unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
  539. unsigned int i = 0;
  540. /* Stop the watermark IRQ if nothing to send */
  541. if (!mas->cur_xfer) {
  542. writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
  543. return false;
  544. }
  545. max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
  546. if (mas->tx_rem_bytes < max_bytes)
  547. max_bytes = mas->tx_rem_bytes;
  548. tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
  549. while (i < max_bytes) {
  550. unsigned int j;
  551. unsigned int bytes_to_write;
  552. u32 fifo_word = 0;
  553. u8 *fifo_byte = (u8 *)&fifo_word;
  554. bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
  555. for (j = 0; j < bytes_to_write; j++)
  556. fifo_byte[j] = tx_buf[i++];
  557. iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
  558. }
  559. mas->tx_rem_bytes -= max_bytes;
  560. if (!mas->tx_rem_bytes) {
  561. writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
  562. return false;
  563. }
  564. return true;
  565. }
  566. static void geni_spi_handle_rx(struct spi_geni_master *mas)
  567. {
  568. struct geni_se *se = &mas->se;
  569. u32 rx_fifo_status;
  570. unsigned int rx_bytes;
  571. unsigned int rx_last_byte_valid;
  572. u8 *rx_buf;
  573. unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
  574. unsigned int i = 0;
  575. rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
  576. rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
  577. if (rx_fifo_status & RX_LAST) {
  578. rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
  579. rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
  580. if (rx_last_byte_valid && rx_last_byte_valid < 4)
  581. rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
  582. }
  583. /* Clear out the FIFO and bail if nowhere to put it */
  584. if (!mas->cur_xfer) {
  585. for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
  586. readl(se->base + SE_GENI_RX_FIFOn);
  587. return;
  588. }
  589. if (mas->rx_rem_bytes < rx_bytes)
  590. rx_bytes = mas->rx_rem_bytes;
  591. rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
  592. while (i < rx_bytes) {
  593. u32 fifo_word = 0;
  594. u8 *fifo_byte = (u8 *)&fifo_word;
  595. unsigned int bytes_to_read;
  596. unsigned int j;
  597. bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
  598. ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
  599. for (j = 0; j < bytes_to_read; j++)
  600. rx_buf[i++] = fifo_byte[j];
  601. }
  602. mas->rx_rem_bytes -= rx_bytes;
  603. }
  604. static void setup_fifo_xfer(struct spi_transfer *xfer,
  605. struct spi_geni_master *mas,
  606. u16 mode, struct spi_master *spi)
  607. {
  608. u32 m_cmd = 0;
  609. u32 len;
  610. struct geni_se *se = &mas->se;
  611. int ret;
  612. /*
  613. * Ensure that our interrupt handler isn't still running from some
  614. * prior command before we start messing with the hardware behind
  615. * its back. We don't need to _keep_ the lock here since we're only
  616. * worried about racing with out interrupt handler. The SPI core
  617. * already handles making sure that we're not trying to do two
  618. * transfers at once or setting a chip select and doing a transfer
  619. * concurrently.
  620. *
  621. * NOTE: we actually _can't_ hold the lock here because possibly we
  622. * might call clk_set_rate() which needs to be able to sleep.
  623. */
  624. spin_lock_irq(&mas->lock);
  625. spin_unlock_irq(&mas->lock);
  626. if (xfer->bits_per_word != mas->cur_bits_per_word) {
  627. spi_setup_word_len(mas, mode, xfer->bits_per_word);
  628. mas->cur_bits_per_word = xfer->bits_per_word;
  629. }
  630. /* Speed and bits per word can be overridden per transfer */
  631. ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
  632. if (ret)
  633. return;
  634. mas->tx_rem_bytes = 0;
  635. mas->rx_rem_bytes = 0;
  636. if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
  637. len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
  638. else
  639. len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
  640. len &= TRANS_LEN_MSK;
  641. mas->cur_xfer = xfer;
  642. if (xfer->tx_buf) {
  643. m_cmd |= SPI_TX_ONLY;
  644. mas->tx_rem_bytes = xfer->len;
  645. writel(len, se->base + SE_SPI_TX_TRANS_LEN);
  646. }
  647. if (xfer->rx_buf) {
  648. m_cmd |= SPI_RX_ONLY;
  649. writel(len, se->base + SE_SPI_RX_TRANS_LEN);
  650. mas->rx_rem_bytes = xfer->len;
  651. }
  652. /*
  653. * Lock around right before we start the transfer since our
  654. * interrupt could come in at any time now.
  655. */
  656. spin_lock_irq(&mas->lock);
  657. geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
  658. if (m_cmd & SPI_TX_ONLY) {
  659. if (geni_spi_handle_tx(mas))
  660. writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
  661. }
  662. spin_unlock_irq(&mas->lock);
  663. }
  664. static int spi_geni_transfer_one(struct spi_master *spi,
  665. struct spi_device *slv,
  666. struct spi_transfer *xfer)
  667. {
  668. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  669. if (spi_geni_is_abort_still_pending(mas))
  670. return -EBUSY;
  671. /* Terminate and return success for 0 byte length transfer */
  672. if (!xfer->len)
  673. return 0;
  674. if (mas->cur_xfer_mode == GENI_SE_FIFO) {
  675. setup_fifo_xfer(xfer, mas, slv->mode, spi);
  676. return 1;
  677. }
  678. return setup_gsi_xfer(xfer, mas, slv, spi);
  679. }
  680. static irqreturn_t geni_spi_isr(int irq, void *data)
  681. {
  682. struct spi_master *spi = data;
  683. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  684. struct geni_se *se = &mas->se;
  685. u32 m_irq;
  686. m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
  687. if (!m_irq)
  688. return IRQ_NONE;
  689. if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |
  690. M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN |
  691. M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN))
  692. dev_warn(mas->dev, "Unexpected IRQ err status %#010x\n", m_irq);
  693. spin_lock(&mas->lock);
  694. if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
  695. geni_spi_handle_rx(mas);
  696. if (m_irq & M_TX_FIFO_WATERMARK_EN)
  697. geni_spi_handle_tx(mas);
  698. if (m_irq & M_CMD_DONE_EN) {
  699. if (mas->cur_xfer) {
  700. spi_finalize_current_transfer(spi);
  701. mas->cur_xfer = NULL;
  702. /*
  703. * If this happens, then a CMD_DONE came before all the
  704. * Tx buffer bytes were sent out. This is unusual, log
  705. * this condition and disable the WM interrupt to
  706. * prevent the system from stalling due an interrupt
  707. * storm.
  708. *
  709. * If this happens when all Rx bytes haven't been
  710. * received, log the condition. The only known time
  711. * this can happen is if bits_per_word != 8 and some
  712. * registers that expect xfer lengths in num spi_words
  713. * weren't written correctly.
  714. */
  715. if (mas->tx_rem_bytes) {
  716. writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
  717. dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
  718. mas->tx_rem_bytes, mas->cur_bits_per_word);
  719. }
  720. if (mas->rx_rem_bytes)
  721. dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
  722. mas->rx_rem_bytes, mas->cur_bits_per_word);
  723. } else {
  724. complete(&mas->cs_done);
  725. }
  726. }
  727. if (m_irq & M_CMD_CANCEL_EN)
  728. complete(&mas->cancel_done);
  729. if (m_irq & M_CMD_ABORT_EN)
  730. complete(&mas->abort_done);
  731. /*
  732. * It's safe or a good idea to Ack all of our interrupts at the end
  733. * of the function. Specifically:
  734. * - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
  735. * clearing Acks. Clearing at the end relies on nobody else having
  736. * started a new transfer yet or else we could be clearing _their_
  737. * done bit, but everyone grabs the spinlock before starting a new
  738. * transfer.
  739. * - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear
  740. * to be "latched level" interrupts so it's important to clear them
  741. * _after_ you've handled the condition and always safe to do so
  742. * since they'll re-assert if they're still happening.
  743. */
  744. writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
  745. spin_unlock(&mas->lock);
  746. return IRQ_HANDLED;
  747. }
  748. static int spi_geni_probe(struct platform_device *pdev)
  749. {
  750. int ret, irq;
  751. struct spi_master *spi;
  752. struct spi_geni_master *mas;
  753. void __iomem *base;
  754. struct clk *clk;
  755. struct device *dev = &pdev->dev;
  756. irq = platform_get_irq(pdev, 0);
  757. if (irq < 0)
  758. return irq;
  759. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  760. if (ret)
  761. return dev_err_probe(dev, ret, "could not set DMA mask\n");
  762. base = devm_platform_ioremap_resource(pdev, 0);
  763. if (IS_ERR(base))
  764. return PTR_ERR(base);
  765. clk = devm_clk_get(dev, "se");
  766. if (IS_ERR(clk))
  767. return PTR_ERR(clk);
  768. spi = devm_spi_alloc_master(dev, sizeof(*mas));
  769. if (!spi)
  770. return -ENOMEM;
  771. platform_set_drvdata(pdev, spi);
  772. mas = spi_master_get_devdata(spi);
  773. mas->irq = irq;
  774. mas->dev = dev;
  775. mas->se.dev = dev;
  776. mas->se.wrapper = dev_get_drvdata(dev->parent);
  777. mas->se.base = base;
  778. mas->se.clk = clk;
  779. ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
  780. if (ret)
  781. return ret;
  782. /* OPP table is optional */
  783. ret = devm_pm_opp_of_add_table(&pdev->dev);
  784. if (ret && ret != -ENODEV) {
  785. dev_err(&pdev->dev, "invalid OPP table in device tree\n");
  786. return ret;
  787. }
  788. spi->bus_num = -1;
  789. spi->dev.of_node = dev->of_node;
  790. spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
  791. spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  792. spi->num_chipselect = 4;
  793. spi->max_speed_hz = 50000000;
  794. spi->prepare_message = spi_geni_prepare_message;
  795. spi->transfer_one = spi_geni_transfer_one;
  796. spi->can_dma = geni_can_dma;
  797. spi->dma_map_dev = dev->parent;
  798. spi->auto_runtime_pm = true;
  799. spi->handle_err = spi_geni_handle_err;
  800. spi->use_gpio_descriptors = true;
  801. init_completion(&mas->cs_done);
  802. init_completion(&mas->cancel_done);
  803. init_completion(&mas->abort_done);
  804. spin_lock_init(&mas->lock);
  805. pm_runtime_use_autosuspend(&pdev->dev);
  806. pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
  807. pm_runtime_enable(dev);
  808. ret = geni_icc_get(&mas->se, NULL);
  809. if (ret)
  810. goto spi_geni_probe_runtime_disable;
  811. /* Set the bus quota to a reasonable value for register access */
  812. mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
  813. mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
  814. ret = geni_icc_set_bw(&mas->se);
  815. if (ret)
  816. goto spi_geni_probe_runtime_disable;
  817. ret = spi_geni_init(mas);
  818. if (ret)
  819. goto spi_geni_probe_runtime_disable;
  820. /*
  821. * check the mode supported and set_cs for fifo mode only
  822. * for dma (gsi) mode, the gsi will set cs based on params passed in
  823. * TRE
  824. */
  825. if (mas->cur_xfer_mode == GENI_SE_FIFO)
  826. spi->set_cs = spi_geni_set_cs;
  827. /*
  828. * TX is required per GSI spec, see setup_gsi_xfer().
  829. */
  830. if (mas->cur_xfer_mode == GENI_GPI_DMA)
  831. spi->flags = SPI_CONTROLLER_MUST_TX;
  832. ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
  833. if (ret)
  834. goto spi_geni_release_dma;
  835. ret = spi_register_master(spi);
  836. if (ret)
  837. goto spi_geni_probe_free_irq;
  838. return 0;
  839. spi_geni_probe_free_irq:
  840. free_irq(mas->irq, spi);
  841. spi_geni_release_dma:
  842. spi_geni_release_dma_chan(mas);
  843. spi_geni_probe_runtime_disable:
  844. pm_runtime_disable(dev);
  845. return ret;
  846. }
  847. static int spi_geni_remove(struct platform_device *pdev)
  848. {
  849. struct spi_master *spi = platform_get_drvdata(pdev);
  850. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  851. /* Unregister _before_ disabling pm_runtime() so we stop transfers */
  852. spi_unregister_master(spi);
  853. spi_geni_release_dma_chan(mas);
  854. free_irq(mas->irq, spi);
  855. pm_runtime_disable(&pdev->dev);
  856. return 0;
  857. }
  858. static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
  859. {
  860. struct spi_master *spi = dev_get_drvdata(dev);
  861. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  862. int ret;
  863. /* Drop the performance state vote */
  864. dev_pm_opp_set_rate(dev, 0);
  865. ret = geni_se_resources_off(&mas->se);
  866. if (ret)
  867. return ret;
  868. return geni_icc_disable(&mas->se);
  869. }
  870. static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
  871. {
  872. struct spi_master *spi = dev_get_drvdata(dev);
  873. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  874. int ret;
  875. ret = geni_icc_enable(&mas->se);
  876. if (ret)
  877. return ret;
  878. ret = geni_se_resources_on(&mas->se);
  879. if (ret)
  880. return ret;
  881. return dev_pm_opp_set_rate(mas->dev, mas->cur_sclk_hz);
  882. }
  883. static int __maybe_unused spi_geni_suspend(struct device *dev)
  884. {
  885. struct spi_master *spi = dev_get_drvdata(dev);
  886. int ret;
  887. ret = spi_master_suspend(spi);
  888. if (ret)
  889. return ret;
  890. ret = pm_runtime_force_suspend(dev);
  891. if (ret)
  892. spi_master_resume(spi);
  893. return ret;
  894. }
  895. static int __maybe_unused spi_geni_resume(struct device *dev)
  896. {
  897. struct spi_master *spi = dev_get_drvdata(dev);
  898. int ret;
  899. ret = pm_runtime_force_resume(dev);
  900. if (ret)
  901. return ret;
  902. ret = spi_master_resume(spi);
  903. if (ret)
  904. pm_runtime_force_suspend(dev);
  905. return ret;
  906. }
  907. static const struct dev_pm_ops spi_geni_pm_ops = {
  908. SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
  909. spi_geni_runtime_resume, NULL)
  910. SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
  911. };
  912. static const struct of_device_id spi_geni_dt_match[] = {
  913. { .compatible = "qcom,geni-spi" },
  914. {}
  915. };
  916. MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
  917. static struct platform_driver spi_geni_driver = {
  918. .probe = spi_geni_probe,
  919. .remove = spi_geni_remove,
  920. .driver = {
  921. .name = "geni_spi",
  922. .pm = &spi_geni_pm_ops,
  923. .of_match_table = spi_geni_dt_match,
  924. },
  925. };
  926. module_platform_driver(spi_geni_driver);
  927. MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
  928. MODULE_LICENSE("GPL v2");