spi-stm32-qspi.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
  4. * Author: Ludovic Barre <[email protected]> for STMicroelectronics.
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/clk.h>
  8. #include <linux/dmaengine.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/errno.h>
  11. #include <linux/io.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/module.h>
  15. #include <linux/mutex.h>
  16. #include <linux/of.h>
  17. #include <linux/of_device.h>
  18. #include <linux/of_gpio.h>
  19. #include <linux/pinctrl/consumer.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/reset.h>
  23. #include <linux/sizes.h>
  24. #include <linux/spi/spi-mem.h>
  25. #define QSPI_CR 0x00
  26. #define CR_EN BIT(0)
  27. #define CR_ABORT BIT(1)
  28. #define CR_DMAEN BIT(2)
  29. #define CR_TCEN BIT(3)
  30. #define CR_SSHIFT BIT(4)
  31. #define CR_DFM BIT(6)
  32. #define CR_FSEL BIT(7)
  33. #define CR_FTHRES_SHIFT 8
  34. #define CR_TEIE BIT(16)
  35. #define CR_TCIE BIT(17)
  36. #define CR_FTIE BIT(18)
  37. #define CR_SMIE BIT(19)
  38. #define CR_TOIE BIT(20)
  39. #define CR_APMS BIT(22)
  40. #define CR_PRESC_MASK GENMASK(31, 24)
  41. #define QSPI_DCR 0x04
  42. #define DCR_FSIZE_MASK GENMASK(20, 16)
  43. #define QSPI_SR 0x08
  44. #define SR_TEF BIT(0)
  45. #define SR_TCF BIT(1)
  46. #define SR_FTF BIT(2)
  47. #define SR_SMF BIT(3)
  48. #define SR_TOF BIT(4)
  49. #define SR_BUSY BIT(5)
  50. #define SR_FLEVEL_MASK GENMASK(13, 8)
  51. #define QSPI_FCR 0x0c
  52. #define FCR_CTEF BIT(0)
  53. #define FCR_CTCF BIT(1)
  54. #define FCR_CSMF BIT(3)
  55. #define QSPI_DLR 0x10
  56. #define QSPI_CCR 0x14
  57. #define CCR_INST_MASK GENMASK(7, 0)
  58. #define CCR_IMODE_MASK GENMASK(9, 8)
  59. #define CCR_ADMODE_MASK GENMASK(11, 10)
  60. #define CCR_ADSIZE_MASK GENMASK(13, 12)
  61. #define CCR_DCYC_MASK GENMASK(22, 18)
  62. #define CCR_DMODE_MASK GENMASK(25, 24)
  63. #define CCR_FMODE_MASK GENMASK(27, 26)
  64. #define CCR_FMODE_INDW (0U << 26)
  65. #define CCR_FMODE_INDR (1U << 26)
  66. #define CCR_FMODE_APM (2U << 26)
  67. #define CCR_FMODE_MM (3U << 26)
  68. #define CCR_BUSWIDTH_0 0x0
  69. #define CCR_BUSWIDTH_1 0x1
  70. #define CCR_BUSWIDTH_2 0x2
  71. #define CCR_BUSWIDTH_4 0x3
  72. #define QSPI_AR 0x18
  73. #define QSPI_ABR 0x1c
  74. #define QSPI_DR 0x20
  75. #define QSPI_PSMKR 0x24
  76. #define QSPI_PSMAR 0x28
  77. #define QSPI_PIR 0x2c
  78. #define QSPI_LPTR 0x30
  79. #define STM32_QSPI_MAX_MMAP_SZ SZ_256M
  80. #define STM32_QSPI_MAX_NORCHIP 2
  81. #define STM32_FIFO_TIMEOUT_US 30000
  82. #define STM32_BUSY_TIMEOUT_US 100000
  83. #define STM32_ABT_TIMEOUT_US 100000
  84. #define STM32_COMP_TIMEOUT_MS 1000
  85. #define STM32_AUTOSUSPEND_DELAY -1
  86. struct stm32_qspi_flash {
  87. u32 cs;
  88. u32 presc;
  89. };
  90. struct stm32_qspi {
  91. struct device *dev;
  92. struct spi_controller *ctrl;
  93. phys_addr_t phys_base;
  94. void __iomem *io_base;
  95. void __iomem *mm_base;
  96. resource_size_t mm_size;
  97. struct clk *clk;
  98. u32 clk_rate;
  99. struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
  100. struct completion data_completion;
  101. struct completion match_completion;
  102. u32 fmode;
  103. struct dma_chan *dma_chtx;
  104. struct dma_chan *dma_chrx;
  105. struct completion dma_completion;
  106. u32 cr_reg;
  107. u32 dcr_reg;
  108. unsigned long status_timeout;
  109. /*
  110. * to protect device configuration, could be different between
  111. * 2 flash access (bk1, bk2)
  112. */
  113. struct mutex lock;
  114. };
  115. static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
  116. {
  117. struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
  118. u32 cr, sr;
  119. cr = readl_relaxed(qspi->io_base + QSPI_CR);
  120. sr = readl_relaxed(qspi->io_base + QSPI_SR);
  121. if (cr & CR_SMIE && sr & SR_SMF) {
  122. /* disable irq */
  123. cr &= ~CR_SMIE;
  124. writel_relaxed(cr, qspi->io_base + QSPI_CR);
  125. complete(&qspi->match_completion);
  126. return IRQ_HANDLED;
  127. }
  128. if (sr & (SR_TEF | SR_TCF)) {
  129. /* disable irq */
  130. cr &= ~CR_TCIE & ~CR_TEIE;
  131. writel_relaxed(cr, qspi->io_base + QSPI_CR);
  132. complete(&qspi->data_completion);
  133. }
  134. return IRQ_HANDLED;
  135. }
  136. static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
  137. {
  138. *val = readb_relaxed(addr);
  139. }
  140. static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
  141. {
  142. writeb_relaxed(*val, addr);
  143. }
  144. static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
  145. const struct spi_mem_op *op)
  146. {
  147. void (*tx_fifo)(u8 *val, void __iomem *addr);
  148. u32 len = op->data.nbytes, sr;
  149. u8 *buf;
  150. int ret;
  151. if (op->data.dir == SPI_MEM_DATA_IN) {
  152. tx_fifo = stm32_qspi_read_fifo;
  153. buf = op->data.buf.in;
  154. } else {
  155. tx_fifo = stm32_qspi_write_fifo;
  156. buf = (u8 *)op->data.buf.out;
  157. }
  158. while (len--) {
  159. ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR,
  160. sr, (sr & SR_FTF), 1,
  161. STM32_FIFO_TIMEOUT_US);
  162. if (ret) {
  163. dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n",
  164. len, sr);
  165. return ret;
  166. }
  167. tx_fifo(buf++, qspi->io_base + QSPI_DR);
  168. }
  169. return 0;
  170. }
  171. static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
  172. const struct spi_mem_op *op)
  173. {
  174. memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val,
  175. op->data.nbytes);
  176. return 0;
  177. }
  178. static void stm32_qspi_dma_callback(void *arg)
  179. {
  180. struct completion *dma_completion = arg;
  181. complete(dma_completion);
  182. }
  183. static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
  184. const struct spi_mem_op *op)
  185. {
  186. struct dma_async_tx_descriptor *desc;
  187. enum dma_transfer_direction dma_dir;
  188. struct dma_chan *dma_ch;
  189. struct sg_table sgt;
  190. dma_cookie_t cookie;
  191. u32 cr, t_out;
  192. int err;
  193. if (op->data.dir == SPI_MEM_DATA_IN) {
  194. dma_dir = DMA_DEV_TO_MEM;
  195. dma_ch = qspi->dma_chrx;
  196. } else {
  197. dma_dir = DMA_MEM_TO_DEV;
  198. dma_ch = qspi->dma_chtx;
  199. }
  200. /*
  201. * spi_map_buf return -EINVAL if the buffer is not DMA-able
  202. * (DMA-able: in vmalloc | kmap | virt_addr_valid)
  203. */
  204. err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
  205. if (err)
  206. return err;
  207. desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
  208. dma_dir, DMA_PREP_INTERRUPT);
  209. if (!desc) {
  210. err = -ENOMEM;
  211. goto out_unmap;
  212. }
  213. cr = readl_relaxed(qspi->io_base + QSPI_CR);
  214. reinit_completion(&qspi->dma_completion);
  215. desc->callback = stm32_qspi_dma_callback;
  216. desc->callback_param = &qspi->dma_completion;
  217. cookie = dmaengine_submit(desc);
  218. err = dma_submit_error(cookie);
  219. if (err)
  220. goto out;
  221. dma_async_issue_pending(dma_ch);
  222. writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
  223. t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
  224. if (!wait_for_completion_timeout(&qspi->dma_completion,
  225. msecs_to_jiffies(t_out)))
  226. err = -ETIMEDOUT;
  227. if (err)
  228. dmaengine_terminate_all(dma_ch);
  229. out:
  230. writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
  231. out_unmap:
  232. spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
  233. return err;
  234. }
  235. static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
  236. {
  237. if (!op->data.nbytes)
  238. return 0;
  239. if (qspi->fmode == CCR_FMODE_MM)
  240. return stm32_qspi_tx_mm(qspi, op);
  241. else if (((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
  242. (op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx)) &&
  243. op->data.nbytes > 4)
  244. if (!stm32_qspi_tx_dma(qspi, op))
  245. return 0;
  246. return stm32_qspi_tx_poll(qspi, op);
  247. }
  248. static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
  249. {
  250. u32 sr;
  251. return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr,
  252. !(sr & SR_BUSY), 1,
  253. STM32_BUSY_TIMEOUT_US);
  254. }
  255. static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
  256. {
  257. u32 cr, sr;
  258. int err = 0;
  259. if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
  260. qspi->fmode == CCR_FMODE_APM)
  261. goto out;
  262. reinit_completion(&qspi->data_completion);
  263. cr = readl_relaxed(qspi->io_base + QSPI_CR);
  264. writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
  265. if (!wait_for_completion_timeout(&qspi->data_completion,
  266. msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
  267. err = -ETIMEDOUT;
  268. } else {
  269. sr = readl_relaxed(qspi->io_base + QSPI_SR);
  270. if (sr & SR_TEF)
  271. err = -EIO;
  272. }
  273. out:
  274. /* clear flags */
  275. writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
  276. if (!err)
  277. err = stm32_qspi_wait_nobusy(qspi);
  278. return err;
  279. }
  280. static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
  281. {
  282. u32 cr;
  283. reinit_completion(&qspi->match_completion);
  284. cr = readl_relaxed(qspi->io_base + QSPI_CR);
  285. writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR);
  286. if (!wait_for_completion_timeout(&qspi->match_completion,
  287. msecs_to_jiffies(qspi->status_timeout)))
  288. return -ETIMEDOUT;
  289. writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR);
  290. return 0;
  291. }
  292. static int stm32_qspi_get_mode(u8 buswidth)
  293. {
  294. if (buswidth == 4)
  295. return CCR_BUSWIDTH_4;
  296. return buswidth;
  297. }
  298. static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
  299. {
  300. struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master);
  301. struct stm32_qspi_flash *flash = &qspi->flash[spi->chip_select];
  302. u32 ccr, cr;
  303. int timeout, err = 0, err_poll_status = 0;
  304. dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
  305. op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
  306. op->dummy.buswidth, op->data.buswidth,
  307. op->addr.val, op->data.nbytes);
  308. cr = readl_relaxed(qspi->io_base + QSPI_CR);
  309. cr &= ~CR_PRESC_MASK & ~CR_FSEL;
  310. cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
  311. cr |= FIELD_PREP(CR_FSEL, flash->cs);
  312. writel_relaxed(cr, qspi->io_base + QSPI_CR);
  313. if (op->data.nbytes)
  314. writel_relaxed(op->data.nbytes - 1,
  315. qspi->io_base + QSPI_DLR);
  316. ccr = qspi->fmode;
  317. ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
  318. ccr |= FIELD_PREP(CCR_IMODE_MASK,
  319. stm32_qspi_get_mode(op->cmd.buswidth));
  320. if (op->addr.nbytes) {
  321. ccr |= FIELD_PREP(CCR_ADMODE_MASK,
  322. stm32_qspi_get_mode(op->addr.buswidth));
  323. ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
  324. }
  325. if (op->dummy.nbytes)
  326. ccr |= FIELD_PREP(CCR_DCYC_MASK,
  327. op->dummy.nbytes * 8 / op->dummy.buswidth);
  328. if (op->data.nbytes) {
  329. ccr |= FIELD_PREP(CCR_DMODE_MASK,
  330. stm32_qspi_get_mode(op->data.buswidth));
  331. }
  332. writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
  333. if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
  334. writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
  335. if (qspi->fmode == CCR_FMODE_APM)
  336. err_poll_status = stm32_qspi_wait_poll_status(qspi);
  337. err = stm32_qspi_tx(qspi, op);
  338. /*
  339. * Abort in:
  340. * -error case
  341. * -read memory map: prefetching must be stopped if we read the last
  342. * byte of device (device size - fifo size). like device size is not
  343. * knows, the prefetching is always stop.
  344. */
  345. if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM)
  346. goto abort;
  347. /* wait end of tx in indirect mode */
  348. err = stm32_qspi_wait_cmd(qspi);
  349. if (err)
  350. goto abort;
  351. return 0;
  352. abort:
  353. cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT;
  354. writel_relaxed(cr, qspi->io_base + QSPI_CR);
  355. /* wait clear of abort bit by hw */
  356. timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR,
  357. cr, !(cr & CR_ABORT), 1,
  358. STM32_ABT_TIMEOUT_US);
  359. writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR);
  360. if (err || err_poll_status || timeout)
  361. dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
  362. __func__, err, err_poll_status, timeout);
  363. return err;
  364. }
  365. static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op,
  366. u16 mask, u16 match,
  367. unsigned long initial_delay_us,
  368. unsigned long polling_rate_us,
  369. unsigned long timeout_ms)
  370. {
  371. struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
  372. int ret;
  373. if (!spi_mem_supports_op(mem, op))
  374. return -EOPNOTSUPP;
  375. ret = pm_runtime_resume_and_get(qspi->dev);
  376. if (ret < 0)
  377. return ret;
  378. mutex_lock(&qspi->lock);
  379. writel_relaxed(mask, qspi->io_base + QSPI_PSMKR);
  380. writel_relaxed(match, qspi->io_base + QSPI_PSMAR);
  381. qspi->fmode = CCR_FMODE_APM;
  382. qspi->status_timeout = timeout_ms;
  383. ret = stm32_qspi_send(mem->spi, op);
  384. mutex_unlock(&qspi->lock);
  385. pm_runtime_mark_last_busy(qspi->dev);
  386. pm_runtime_put_autosuspend(qspi->dev);
  387. return ret;
  388. }
  389. static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
  390. {
  391. struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
  392. int ret;
  393. ret = pm_runtime_resume_and_get(qspi->dev);
  394. if (ret < 0)
  395. return ret;
  396. mutex_lock(&qspi->lock);
  397. if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
  398. qspi->fmode = CCR_FMODE_INDR;
  399. else
  400. qspi->fmode = CCR_FMODE_INDW;
  401. ret = stm32_qspi_send(mem->spi, op);
  402. mutex_unlock(&qspi->lock);
  403. pm_runtime_mark_last_busy(qspi->dev);
  404. pm_runtime_put_autosuspend(qspi->dev);
  405. return ret;
  406. }
  407. static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
  408. {
  409. struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
  410. if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
  411. return -EOPNOTSUPP;
  412. /* should never happen, as mm_base == null is an error probe exit condition */
  413. if (!qspi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
  414. return -EOPNOTSUPP;
  415. if (!qspi->mm_size)
  416. return -EOPNOTSUPP;
  417. return 0;
  418. }
  419. static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
  420. u64 offs, size_t len, void *buf)
  421. {
  422. struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
  423. struct spi_mem_op op;
  424. u32 addr_max;
  425. int ret;
  426. ret = pm_runtime_resume_and_get(qspi->dev);
  427. if (ret < 0)
  428. return ret;
  429. mutex_lock(&qspi->lock);
  430. /* make a local copy of desc op_tmpl and complete dirmap rdesc
  431. * spi_mem_op template with offs, len and *buf in order to get
  432. * all needed transfer information into struct spi_mem_op
  433. */
  434. memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
  435. dev_dbg(qspi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
  436. op.data.nbytes = len;
  437. op.addr.val = desc->info.offset + offs;
  438. op.data.buf.in = buf;
  439. addr_max = op.addr.val + op.data.nbytes + 1;
  440. if (addr_max < qspi->mm_size && op.addr.buswidth)
  441. qspi->fmode = CCR_FMODE_MM;
  442. else
  443. qspi->fmode = CCR_FMODE_INDR;
  444. ret = stm32_qspi_send(desc->mem->spi, &op);
  445. mutex_unlock(&qspi->lock);
  446. pm_runtime_mark_last_busy(qspi->dev);
  447. pm_runtime_put_autosuspend(qspi->dev);
  448. return ret ?: len;
  449. }
  450. static int stm32_qspi_transfer_one_message(struct spi_controller *ctrl,
  451. struct spi_message *msg)
  452. {
  453. struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
  454. struct spi_transfer *transfer;
  455. struct spi_device *spi = msg->spi;
  456. struct spi_mem_op op;
  457. int ret = 0;
  458. if (!spi->cs_gpiod)
  459. return -EOPNOTSUPP;
  460. ret = pm_runtime_resume_and_get(qspi->dev);
  461. if (ret < 0)
  462. return ret;
  463. mutex_lock(&qspi->lock);
  464. gpiod_set_value_cansleep(spi->cs_gpiod, true);
  465. list_for_each_entry(transfer, &msg->transfers, transfer_list) {
  466. u8 dummy_bytes = 0;
  467. memset(&op, 0, sizeof(op));
  468. dev_dbg(qspi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
  469. transfer->tx_buf, transfer->tx_nbits,
  470. transfer->rx_buf, transfer->rx_nbits,
  471. transfer->len, transfer->dummy_data);
  472. /*
  473. * QSPI hardware supports dummy bytes transfer.
  474. * If current transfer is dummy byte, merge it with the next
  475. * transfer in order to take into account QSPI block constraint
  476. */
  477. if (transfer->dummy_data) {
  478. op.dummy.buswidth = transfer->tx_nbits;
  479. op.dummy.nbytes = transfer->len;
  480. dummy_bytes = transfer->len;
  481. /* if happens, means that message is not correctly built */
  482. if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
  483. ret = -EINVAL;
  484. goto end_of_transfer;
  485. }
  486. transfer = list_next_entry(transfer, transfer_list);
  487. }
  488. op.data.nbytes = transfer->len;
  489. if (transfer->rx_buf) {
  490. qspi->fmode = CCR_FMODE_INDR;
  491. op.data.buswidth = transfer->rx_nbits;
  492. op.data.dir = SPI_MEM_DATA_IN;
  493. op.data.buf.in = transfer->rx_buf;
  494. } else {
  495. qspi->fmode = CCR_FMODE_INDW;
  496. op.data.buswidth = transfer->tx_nbits;
  497. op.data.dir = SPI_MEM_DATA_OUT;
  498. op.data.buf.out = transfer->tx_buf;
  499. }
  500. ret = stm32_qspi_send(spi, &op);
  501. if (ret)
  502. goto end_of_transfer;
  503. msg->actual_length += transfer->len + dummy_bytes;
  504. }
  505. end_of_transfer:
  506. gpiod_set_value_cansleep(spi->cs_gpiod, false);
  507. mutex_unlock(&qspi->lock);
  508. msg->status = ret;
  509. spi_finalize_current_message(ctrl);
  510. pm_runtime_mark_last_busy(qspi->dev);
  511. pm_runtime_put_autosuspend(qspi->dev);
  512. return ret;
  513. }
  514. static int stm32_qspi_setup(struct spi_device *spi)
  515. {
  516. struct spi_controller *ctrl = spi->master;
  517. struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
  518. struct stm32_qspi_flash *flash;
  519. u32 presc, mode;
  520. int ret;
  521. if (ctrl->busy)
  522. return -EBUSY;
  523. if (!spi->max_speed_hz)
  524. return -EINVAL;
  525. mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
  526. if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
  527. ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
  528. gpiod_count(qspi->dev, "cs") == -ENOENT)) {
  529. dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
  530. dev_err(qspi->dev, "configuration not supported\n");
  531. return -EINVAL;
  532. }
  533. ret = pm_runtime_resume_and_get(qspi->dev);
  534. if (ret < 0)
  535. return ret;
  536. presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
  537. flash = &qspi->flash[spi->chip_select];
  538. flash->cs = spi->chip_select;
  539. flash->presc = presc;
  540. mutex_lock(&qspi->lock);
  541. qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
  542. /*
  543. * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
  544. * are both set in spi->mode and "cs-gpios" properties is found in DT
  545. */
  546. if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
  547. qspi->cr_reg |= CR_DFM;
  548. dev_dbg(qspi->dev, "Dual flash mode enable");
  549. }
  550. writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
  551. /* set dcr fsize to max address */
  552. qspi->dcr_reg = DCR_FSIZE_MASK;
  553. writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
  554. mutex_unlock(&qspi->lock);
  555. pm_runtime_mark_last_busy(qspi->dev);
  556. pm_runtime_put_autosuspend(qspi->dev);
  557. return 0;
  558. }
  559. static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
  560. {
  561. struct dma_slave_config dma_cfg;
  562. struct device *dev = qspi->dev;
  563. int ret = 0;
  564. memset(&dma_cfg, 0, sizeof(dma_cfg));
  565. dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  566. dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  567. dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
  568. dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
  569. dma_cfg.src_maxburst = 4;
  570. dma_cfg.dst_maxburst = 4;
  571. qspi->dma_chrx = dma_request_chan(dev, "rx");
  572. if (IS_ERR(qspi->dma_chrx)) {
  573. ret = PTR_ERR(qspi->dma_chrx);
  574. qspi->dma_chrx = NULL;
  575. if (ret == -EPROBE_DEFER)
  576. goto out;
  577. } else {
  578. if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
  579. dev_err(dev, "dma rx config failed\n");
  580. dma_release_channel(qspi->dma_chrx);
  581. qspi->dma_chrx = NULL;
  582. }
  583. }
  584. qspi->dma_chtx = dma_request_chan(dev, "tx");
  585. if (IS_ERR(qspi->dma_chtx)) {
  586. ret = PTR_ERR(qspi->dma_chtx);
  587. qspi->dma_chtx = NULL;
  588. } else {
  589. if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
  590. dev_err(dev, "dma tx config failed\n");
  591. dma_release_channel(qspi->dma_chtx);
  592. qspi->dma_chtx = NULL;
  593. }
  594. }
  595. out:
  596. init_completion(&qspi->dma_completion);
  597. if (ret != -EPROBE_DEFER)
  598. ret = 0;
  599. return ret;
  600. }
  601. static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
  602. {
  603. if (qspi->dma_chtx)
  604. dma_release_channel(qspi->dma_chtx);
  605. if (qspi->dma_chrx)
  606. dma_release_channel(qspi->dma_chrx);
  607. }
  608. /*
  609. * no special host constraint, so use default spi_mem_default_supports_op
  610. * to check supported mode.
  611. */
  612. static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
  613. .exec_op = stm32_qspi_exec_op,
  614. .dirmap_create = stm32_qspi_dirmap_create,
  615. .dirmap_read = stm32_qspi_dirmap_read,
  616. .poll_status = stm32_qspi_poll_status,
  617. };
  618. static int stm32_qspi_probe(struct platform_device *pdev)
  619. {
  620. struct device *dev = &pdev->dev;
  621. struct spi_controller *ctrl;
  622. struct reset_control *rstc;
  623. struct stm32_qspi *qspi;
  624. struct resource *res;
  625. int ret, irq;
  626. ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
  627. if (!ctrl)
  628. return -ENOMEM;
  629. qspi = spi_controller_get_devdata(ctrl);
  630. qspi->ctrl = ctrl;
  631. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
  632. qspi->io_base = devm_ioremap_resource(dev, res);
  633. if (IS_ERR(qspi->io_base))
  634. return PTR_ERR(qspi->io_base);
  635. qspi->phys_base = res->start;
  636. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
  637. qspi->mm_base = devm_ioremap_resource(dev, res);
  638. if (IS_ERR(qspi->mm_base))
  639. return PTR_ERR(qspi->mm_base);
  640. qspi->mm_size = resource_size(res);
  641. if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
  642. return -EINVAL;
  643. irq = platform_get_irq(pdev, 0);
  644. if (irq < 0)
  645. return irq;
  646. ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
  647. dev_name(dev), qspi);
  648. if (ret) {
  649. dev_err(dev, "failed to request irq\n");
  650. return ret;
  651. }
  652. init_completion(&qspi->data_completion);
  653. init_completion(&qspi->match_completion);
  654. qspi->clk = devm_clk_get(dev, NULL);
  655. if (IS_ERR(qspi->clk))
  656. return PTR_ERR(qspi->clk);
  657. qspi->clk_rate = clk_get_rate(qspi->clk);
  658. if (!qspi->clk_rate)
  659. return -EINVAL;
  660. ret = clk_prepare_enable(qspi->clk);
  661. if (ret) {
  662. dev_err(dev, "can not enable the clock\n");
  663. return ret;
  664. }
  665. rstc = devm_reset_control_get_exclusive(dev, NULL);
  666. if (IS_ERR(rstc)) {
  667. ret = PTR_ERR(rstc);
  668. if (ret == -EPROBE_DEFER)
  669. goto err_clk_disable;
  670. } else {
  671. reset_control_assert(rstc);
  672. udelay(2);
  673. reset_control_deassert(rstc);
  674. }
  675. qspi->dev = dev;
  676. platform_set_drvdata(pdev, qspi);
  677. ret = stm32_qspi_dma_setup(qspi);
  678. if (ret)
  679. goto err_dma_free;
  680. mutex_init(&qspi->lock);
  681. ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL
  682. | SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_OCTAL;
  683. ctrl->setup = stm32_qspi_setup;
  684. ctrl->bus_num = -1;
  685. ctrl->mem_ops = &stm32_qspi_mem_ops;
  686. ctrl->use_gpio_descriptors = true;
  687. ctrl->transfer_one_message = stm32_qspi_transfer_one_message;
  688. ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
  689. ctrl->dev.of_node = dev->of_node;
  690. pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY);
  691. pm_runtime_use_autosuspend(dev);
  692. pm_runtime_set_active(dev);
  693. pm_runtime_enable(dev);
  694. pm_runtime_get_noresume(dev);
  695. ret = spi_register_master(ctrl);
  696. if (ret)
  697. goto err_pm_runtime_free;
  698. pm_runtime_mark_last_busy(dev);
  699. pm_runtime_put_autosuspend(dev);
  700. return 0;
  701. err_pm_runtime_free:
  702. pm_runtime_get_sync(qspi->dev);
  703. /* disable qspi */
  704. writel_relaxed(0, qspi->io_base + QSPI_CR);
  705. mutex_destroy(&qspi->lock);
  706. pm_runtime_put_noidle(qspi->dev);
  707. pm_runtime_disable(qspi->dev);
  708. pm_runtime_set_suspended(qspi->dev);
  709. pm_runtime_dont_use_autosuspend(qspi->dev);
  710. err_dma_free:
  711. stm32_qspi_dma_free(qspi);
  712. err_clk_disable:
  713. clk_disable_unprepare(qspi->clk);
  714. return ret;
  715. }
  716. static int stm32_qspi_remove(struct platform_device *pdev)
  717. {
  718. struct stm32_qspi *qspi = platform_get_drvdata(pdev);
  719. pm_runtime_get_sync(qspi->dev);
  720. spi_unregister_master(qspi->ctrl);
  721. /* disable qspi */
  722. writel_relaxed(0, qspi->io_base + QSPI_CR);
  723. stm32_qspi_dma_free(qspi);
  724. mutex_destroy(&qspi->lock);
  725. pm_runtime_put_noidle(qspi->dev);
  726. pm_runtime_disable(qspi->dev);
  727. pm_runtime_set_suspended(qspi->dev);
  728. pm_runtime_dont_use_autosuspend(qspi->dev);
  729. clk_disable_unprepare(qspi->clk);
  730. return 0;
  731. }
  732. static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev)
  733. {
  734. struct stm32_qspi *qspi = dev_get_drvdata(dev);
  735. clk_disable_unprepare(qspi->clk);
  736. return 0;
  737. }
  738. static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev)
  739. {
  740. struct stm32_qspi *qspi = dev_get_drvdata(dev);
  741. return clk_prepare_enable(qspi->clk);
  742. }
  743. static int __maybe_unused stm32_qspi_suspend(struct device *dev)
  744. {
  745. pinctrl_pm_select_sleep_state(dev);
  746. return pm_runtime_force_suspend(dev);
  747. }
  748. static int __maybe_unused stm32_qspi_resume(struct device *dev)
  749. {
  750. struct stm32_qspi *qspi = dev_get_drvdata(dev);
  751. int ret;
  752. ret = pm_runtime_force_resume(dev);
  753. if (ret < 0)
  754. return ret;
  755. pinctrl_pm_select_default_state(dev);
  756. ret = pm_runtime_resume_and_get(dev);
  757. if (ret < 0)
  758. return ret;
  759. writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
  760. writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
  761. pm_runtime_mark_last_busy(dev);
  762. pm_runtime_put_autosuspend(dev);
  763. return 0;
  764. }
  765. static const struct dev_pm_ops stm32_qspi_pm_ops = {
  766. SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend,
  767. stm32_qspi_runtime_resume, NULL)
  768. SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume)
  769. };
  770. static const struct of_device_id stm32_qspi_match[] = {
  771. {.compatible = "st,stm32f469-qspi"},
  772. {}
  773. };
  774. MODULE_DEVICE_TABLE(of, stm32_qspi_match);
  775. static struct platform_driver stm32_qspi_driver = {
  776. .probe = stm32_qspi_probe,
  777. .remove = stm32_qspi_remove,
  778. .driver = {
  779. .name = "stm32-qspi",
  780. .of_match_table = stm32_qspi_match,
  781. .pm = &stm32_qspi_pm_ops,
  782. },
  783. };
  784. module_platform_driver(stm32_qspi_driver);
  785. MODULE_AUTHOR("Ludovic Barre <[email protected]>");
  786. MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
  787. MODULE_LICENSE("GPL v2");