spi-dw-core.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Designware SPI core controller driver (refer pxa2xx_spi.c)
  4. *
  5. * Copyright (c) 2009, Intel Corporation.
  6. */
  7. #include <linux/bitfield.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/module.h>
  11. #include <linux/preempt.h>
  12. #include <linux/highmem.h>
  13. #include <linux/delay.h>
  14. #include <linux/slab.h>
  15. #include <linux/spi/spi.h>
  16. #include <linux/spi/spi-mem.h>
  17. #include <linux/string.h>
  18. #include <linux/of.h>
  19. #include "spi-dw.h"
  20. #ifdef CONFIG_DEBUG_FS
  21. #include <linux/debugfs.h>
  22. #endif
  23. /* Slave spi_device related */
  24. struct dw_spi_chip_data {
  25. u32 cr0;
  26. u32 rx_sample_dly; /* RX sample delay */
  27. };
  28. #ifdef CONFIG_DEBUG_FS
  29. #define DW_SPI_DBGFS_REG(_name, _off) \
  30. { \
  31. .name = _name, \
  32. .offset = _off, \
  33. }
  34. static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
  35. DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
  36. DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
  37. DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
  38. DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
  39. DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
  40. DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
  41. DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
  42. DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
  43. DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
  44. DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
  45. DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
  46. DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
  47. DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
  48. DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
  49. DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
  50. DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
  51. };
  52. static int dw_spi_debugfs_init(struct dw_spi *dws)
  53. {
  54. char name[32];
  55. snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
  56. dws->debugfs = debugfs_create_dir(name, NULL);
  57. if (!dws->debugfs)
  58. return -ENOMEM;
  59. dws->regset.regs = dw_spi_dbgfs_regs;
  60. dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
  61. dws->regset.base = dws->regs;
  62. debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
  63. return 0;
  64. }
  65. static void dw_spi_debugfs_remove(struct dw_spi *dws)
  66. {
  67. debugfs_remove_recursive(dws->debugfs);
  68. }
  69. #else
  70. static inline int dw_spi_debugfs_init(struct dw_spi *dws)
  71. {
  72. return 0;
  73. }
  74. static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
  75. {
  76. }
  77. #endif /* CONFIG_DEBUG_FS */
  78. void dw_spi_set_cs(struct spi_device *spi, bool enable)
  79. {
  80. struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
  81. bool cs_high = !!(spi->mode & SPI_CS_HIGH);
  82. /*
  83. * DW SPI controller demands any native CS being set in order to
  84. * proceed with data transfer. So in order to activate the SPI
  85. * communications we must set a corresponding bit in the Slave
  86. * Enable register no matter whether the SPI core is configured to
  87. * support active-high or active-low CS level.
  88. */
  89. if (cs_high == enable)
  90. dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
  91. else
  92. dw_writel(dws, DW_SPI_SER, 0);
  93. }
  94. EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, SPI_DW_CORE);
  95. /* Return the max entries we can fill into tx fifo */
  96. static inline u32 dw_spi_tx_max(struct dw_spi *dws)
  97. {
  98. u32 tx_room, rxtx_gap;
  99. tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
  100. /*
  101. * Another concern is about the tx/rx mismatch, we
  102. * though to use (dws->fifo_len - rxflr - txflr) as
  103. * one maximum value for tx, but it doesn't cover the
  104. * data which is out of tx/rx fifo and inside the
  105. * shift registers. So a control from sw point of
  106. * view is taken.
  107. */
  108. rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
  109. return min3((u32)dws->tx_len, tx_room, rxtx_gap);
  110. }
  111. /* Return the max entries we should read out of rx fifo */
  112. static inline u32 dw_spi_rx_max(struct dw_spi *dws)
  113. {
  114. return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
  115. }
  116. static void dw_writer(struct dw_spi *dws)
  117. {
  118. u32 max = dw_spi_tx_max(dws);
  119. u32 txw = 0;
  120. while (max--) {
  121. if (dws->tx) {
  122. if (dws->n_bytes == 1)
  123. txw = *(u8 *)(dws->tx);
  124. else if (dws->n_bytes == 2)
  125. txw = *(u16 *)(dws->tx);
  126. else
  127. txw = *(u32 *)(dws->tx);
  128. dws->tx += dws->n_bytes;
  129. }
  130. dw_write_io_reg(dws, DW_SPI_DR, txw);
  131. --dws->tx_len;
  132. }
  133. }
  134. static void dw_reader(struct dw_spi *dws)
  135. {
  136. u32 max = dw_spi_rx_max(dws);
  137. u32 rxw;
  138. while (max--) {
  139. rxw = dw_read_io_reg(dws, DW_SPI_DR);
  140. if (dws->rx) {
  141. if (dws->n_bytes == 1)
  142. *(u8 *)(dws->rx) = rxw;
  143. else if (dws->n_bytes == 2)
  144. *(u16 *)(dws->rx) = rxw;
  145. else
  146. *(u32 *)(dws->rx) = rxw;
  147. dws->rx += dws->n_bytes;
  148. }
  149. --dws->rx_len;
  150. }
  151. }
  152. int dw_spi_check_status(struct dw_spi *dws, bool raw)
  153. {
  154. u32 irq_status;
  155. int ret = 0;
  156. if (raw)
  157. irq_status = dw_readl(dws, DW_SPI_RISR);
  158. else
  159. irq_status = dw_readl(dws, DW_SPI_ISR);
  160. if (irq_status & DW_SPI_INT_RXOI) {
  161. dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
  162. ret = -EIO;
  163. }
  164. if (irq_status & DW_SPI_INT_RXUI) {
  165. dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
  166. ret = -EIO;
  167. }
  168. if (irq_status & DW_SPI_INT_TXOI) {
  169. dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
  170. ret = -EIO;
  171. }
  172. /* Generically handle the erroneous situation */
  173. if (ret) {
  174. dw_spi_reset_chip(dws);
  175. if (dws->master->cur_msg)
  176. dws->master->cur_msg->status = ret;
  177. }
  178. return ret;
  179. }
  180. EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, SPI_DW_CORE);
  181. static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
  182. {
  183. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  184. if (dw_spi_check_status(dws, false)) {
  185. spi_finalize_current_transfer(dws->master);
  186. return IRQ_HANDLED;
  187. }
  188. /*
  189. * Read data from the Rx FIFO every time we've got a chance executing
  190. * this method. If there is nothing left to receive, terminate the
  191. * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
  192. * final stage of the transfer. By doing so we'll get the next IRQ
  193. * right when the leftover incoming data is received.
  194. */
  195. dw_reader(dws);
  196. if (!dws->rx_len) {
  197. dw_spi_mask_intr(dws, 0xff);
  198. spi_finalize_current_transfer(dws->master);
  199. } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
  200. dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
  201. }
  202. /*
  203. * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
  204. * disabled after the data transmission is finished so not to
  205. * have the TXE IRQ flood at the final stage of the transfer.
  206. */
  207. if (irq_status & DW_SPI_INT_TXEI) {
  208. dw_writer(dws);
  209. if (!dws->tx_len)
  210. dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
  211. }
  212. return IRQ_HANDLED;
  213. }
  214. static irqreturn_t dw_spi_irq(int irq, void *dev_id)
  215. {
  216. struct spi_controller *master = dev_id;
  217. struct dw_spi *dws = spi_controller_get_devdata(master);
  218. u16 irq_status = dw_readl(dws, DW_SPI_ISR) & DW_SPI_INT_MASK;
  219. if (!irq_status)
  220. return IRQ_NONE;
  221. if (!master->cur_msg) {
  222. dw_spi_mask_intr(dws, 0xff);
  223. return IRQ_HANDLED;
  224. }
  225. return dws->transfer_handler(dws);
  226. }
  227. static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
  228. {
  229. u32 cr0 = 0;
  230. if (dw_spi_ip_is(dws, PSSI)) {
  231. /* CTRLR0[ 5: 4] Frame Format */
  232. cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
  233. /*
  234. * SPI mode (SCPOL|SCPH)
  235. * CTRLR0[ 6] Serial Clock Phase
  236. * CTRLR0[ 7] Serial Clock Polarity
  237. */
  238. if (spi->mode & SPI_CPOL)
  239. cr0 |= DW_PSSI_CTRLR0_SCPOL;
  240. if (spi->mode & SPI_CPHA)
  241. cr0 |= DW_PSSI_CTRLR0_SCPHA;
  242. /* CTRLR0[11] Shift Register Loop */
  243. if (spi->mode & SPI_LOOP)
  244. cr0 |= DW_PSSI_CTRLR0_SRL;
  245. } else {
  246. /* CTRLR0[ 7: 6] Frame Format */
  247. cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
  248. /*
  249. * SPI mode (SCPOL|SCPH)
  250. * CTRLR0[ 8] Serial Clock Phase
  251. * CTRLR0[ 9] Serial Clock Polarity
  252. */
  253. if (spi->mode & SPI_CPOL)
  254. cr0 |= DW_HSSI_CTRLR0_SCPOL;
  255. if (spi->mode & SPI_CPHA)
  256. cr0 |= DW_HSSI_CTRLR0_SCPHA;
  257. /* CTRLR0[13] Shift Register Loop */
  258. if (spi->mode & SPI_LOOP)
  259. cr0 |= DW_HSSI_CTRLR0_SRL;
  260. /* CTRLR0[31] MST */
  261. if (dw_spi_ver_is_ge(dws, HSSI, 102A))
  262. cr0 |= DW_HSSI_CTRLR0_MST;
  263. }
  264. return cr0;
  265. }
  266. void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
  267. struct dw_spi_cfg *cfg)
  268. {
  269. struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
  270. u32 cr0 = chip->cr0;
  271. u32 speed_hz;
  272. u16 clk_div;
  273. /* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
  274. cr0 |= (cfg->dfs - 1) << dws->dfs_offset;
  275. if (dw_spi_ip_is(dws, PSSI))
  276. /* CTRLR0[ 9:8] Transfer Mode */
  277. cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_TMOD_MASK, cfg->tmode);
  278. else
  279. /* CTRLR0[11:10] Transfer Mode */
  280. cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_TMOD_MASK, cfg->tmode);
  281. dw_writel(dws, DW_SPI_CTRLR0, cr0);
  282. if (cfg->tmode == DW_SPI_CTRLR0_TMOD_EPROMREAD ||
  283. cfg->tmode == DW_SPI_CTRLR0_TMOD_RO)
  284. dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
  285. /* Note DW APB SSI clock divider doesn't support odd numbers */
  286. clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
  287. speed_hz = dws->max_freq / clk_div;
  288. if (dws->current_freq != speed_hz) {
  289. dw_spi_set_clk(dws, clk_div);
  290. dws->current_freq = speed_hz;
  291. }
  292. /* Update RX sample delay if required */
  293. if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
  294. dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
  295. dws->cur_rx_sample_dly = chip->rx_sample_dly;
  296. }
  297. }
  298. EXPORT_SYMBOL_NS_GPL(dw_spi_update_config, SPI_DW_CORE);
  299. static void dw_spi_irq_setup(struct dw_spi *dws)
  300. {
  301. u16 level;
  302. u8 imask;
  303. /*
  304. * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
  305. * will be adjusted at the final stage of the IRQ-based SPI transfer
  306. * execution so not to lose the leftover of the incoming data.
  307. */
  308. level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
  309. dw_writel(dws, DW_SPI_TXFTLR, level);
  310. dw_writel(dws, DW_SPI_RXFTLR, level - 1);
  311. dws->transfer_handler = dw_spi_transfer_handler;
  312. imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
  313. DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
  314. dw_spi_umask_intr(dws, imask);
  315. }
  316. /*
  317. * The iterative procedure of the poll-based transfer is simple: write as much
  318. * as possible to the Tx FIFO, wait until the pending to receive data is ready
  319. * to be read, read it from the Rx FIFO and check whether the performed
  320. * procedure has been successful.
  321. *
  322. * Note this method the same way as the IRQ-based transfer won't work well for
  323. * the SPI devices connected to the controller with native CS due to the
  324. * automatic CS assertion/de-assertion.
  325. */
  326. static int dw_spi_poll_transfer(struct dw_spi *dws,
  327. struct spi_transfer *transfer)
  328. {
  329. struct spi_delay delay;
  330. u16 nbits;
  331. int ret;
  332. delay.unit = SPI_DELAY_UNIT_SCK;
  333. nbits = dws->n_bytes * BITS_PER_BYTE;
  334. do {
  335. dw_writer(dws);
  336. delay.value = nbits * (dws->rx_len - dws->tx_len);
  337. spi_delay_exec(&delay, transfer);
  338. dw_reader(dws);
  339. ret = dw_spi_check_status(dws, true);
  340. if (ret)
  341. return ret;
  342. } while (dws->rx_len);
  343. return 0;
  344. }
  345. static int dw_spi_transfer_one(struct spi_controller *master,
  346. struct spi_device *spi,
  347. struct spi_transfer *transfer)
  348. {
  349. struct dw_spi *dws = spi_controller_get_devdata(master);
  350. struct dw_spi_cfg cfg = {
  351. .tmode = DW_SPI_CTRLR0_TMOD_TR,
  352. .dfs = transfer->bits_per_word,
  353. .freq = transfer->speed_hz,
  354. };
  355. int ret;
  356. dws->dma_mapped = 0;
  357. dws->n_bytes =
  358. roundup_pow_of_two(DIV_ROUND_UP(transfer->bits_per_word,
  359. BITS_PER_BYTE));
  360. dws->tx = (void *)transfer->tx_buf;
  361. dws->tx_len = transfer->len / dws->n_bytes;
  362. dws->rx = transfer->rx_buf;
  363. dws->rx_len = dws->tx_len;
  364. /* Ensure the data above is visible for all CPUs */
  365. smp_mb();
  366. dw_spi_enable_chip(dws, 0);
  367. dw_spi_update_config(dws, spi, &cfg);
  368. transfer->effective_speed_hz = dws->current_freq;
  369. /* Check if current transfer is a DMA transaction */
  370. if (master->can_dma && master->can_dma(master, spi, transfer))
  371. dws->dma_mapped = master->cur_msg_mapped;
  372. /* For poll mode just disable all interrupts */
  373. dw_spi_mask_intr(dws, 0xff);
  374. if (dws->dma_mapped) {
  375. ret = dws->dma_ops->dma_setup(dws, transfer);
  376. if (ret)
  377. return ret;
  378. }
  379. dw_spi_enable_chip(dws, 1);
  380. if (dws->dma_mapped)
  381. return dws->dma_ops->dma_transfer(dws, transfer);
  382. else if (dws->irq == IRQ_NOTCONNECTED)
  383. return dw_spi_poll_transfer(dws, transfer);
  384. dw_spi_irq_setup(dws);
  385. return 1;
  386. }
  387. static void dw_spi_handle_err(struct spi_controller *master,
  388. struct spi_message *msg)
  389. {
  390. struct dw_spi *dws = spi_controller_get_devdata(master);
  391. if (dws->dma_mapped)
  392. dws->dma_ops->dma_stop(dws);
  393. dw_spi_reset_chip(dws);
  394. }
  395. static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
  396. {
  397. if (op->data.dir == SPI_MEM_DATA_IN)
  398. op->data.nbytes = clamp_val(op->data.nbytes, 0, DW_SPI_NDF_MASK + 1);
  399. return 0;
  400. }
  401. static bool dw_spi_supports_mem_op(struct spi_mem *mem,
  402. const struct spi_mem_op *op)
  403. {
  404. if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
  405. op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
  406. return false;
  407. return spi_mem_default_supports_op(mem, op);
  408. }
  409. static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
  410. {
  411. unsigned int i, j, len;
  412. u8 *out;
  413. /*
  414. * Calculate the total length of the EEPROM command transfer and
  415. * either use the pre-allocated buffer or create a temporary one.
  416. */
  417. len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
  418. if (op->data.dir == SPI_MEM_DATA_OUT)
  419. len += op->data.nbytes;
  420. if (len <= DW_SPI_BUF_SIZE) {
  421. out = dws->buf;
  422. } else {
  423. out = kzalloc(len, GFP_KERNEL);
  424. if (!out)
  425. return -ENOMEM;
  426. }
  427. /*
  428. * Collect the operation code, address and dummy bytes into the single
  429. * buffer. If it's a transfer with data to be sent, also copy it into the
  430. * single buffer in order to speed the data transmission up.
  431. */
  432. for (i = 0; i < op->cmd.nbytes; ++i)
  433. out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
  434. for (j = 0; j < op->addr.nbytes; ++i, ++j)
  435. out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
  436. for (j = 0; j < op->dummy.nbytes; ++i, ++j)
  437. out[i] = 0x0;
  438. if (op->data.dir == SPI_MEM_DATA_OUT)
  439. memcpy(&out[i], op->data.buf.out, op->data.nbytes);
  440. dws->n_bytes = 1;
  441. dws->tx = out;
  442. dws->tx_len = len;
  443. if (op->data.dir == SPI_MEM_DATA_IN) {
  444. dws->rx = op->data.buf.in;
  445. dws->rx_len = op->data.nbytes;
  446. } else {
  447. dws->rx = NULL;
  448. dws->rx_len = 0;
  449. }
  450. return 0;
  451. }
  452. static void dw_spi_free_mem_buf(struct dw_spi *dws)
  453. {
  454. if (dws->tx != dws->buf)
  455. kfree(dws->tx);
  456. }
  457. static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
  458. {
  459. u32 room, entries, sts;
  460. unsigned int len;
  461. u8 *buf;
  462. /*
  463. * At initial stage we just pre-fill the Tx FIFO in with no rush,
  464. * since native CS hasn't been enabled yet and the automatic data
  465. * transmission won't start til we do that.
  466. */
  467. len = min(dws->fifo_len, dws->tx_len);
  468. buf = dws->tx;
  469. while (len--)
  470. dw_write_io_reg(dws, DW_SPI_DR, *buf++);
  471. /*
  472. * After setting any bit in the SER register the transmission will
  473. * start automatically. We have to keep up with that procedure
  474. * otherwise the CS de-assertion will happen whereupon the memory
  475. * operation will be pre-terminated.
  476. */
  477. len = dws->tx_len - ((void *)buf - dws->tx);
  478. dw_spi_set_cs(spi, false);
  479. while (len) {
  480. entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
  481. if (!entries) {
  482. dev_err(&dws->master->dev, "CS de-assertion on Tx\n");
  483. return -EIO;
  484. }
  485. room = min(dws->fifo_len - entries, len);
  486. for (; room; --room, --len)
  487. dw_write_io_reg(dws, DW_SPI_DR, *buf++);
  488. }
  489. /*
  490. * Data fetching will start automatically if the EEPROM-read mode is
  491. * activated. We have to keep up with the incoming data pace to
  492. * prevent the Rx FIFO overflow causing the inbound data loss.
  493. */
  494. len = dws->rx_len;
  495. buf = dws->rx;
  496. while (len) {
  497. entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
  498. if (!entries) {
  499. sts = readl_relaxed(dws->regs + DW_SPI_RISR);
  500. if (sts & DW_SPI_INT_RXOI) {
  501. dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
  502. return -EIO;
  503. }
  504. continue;
  505. }
  506. entries = min(entries, len);
  507. for (; entries; --entries, --len)
  508. *buf++ = dw_read_io_reg(dws, DW_SPI_DR);
  509. }
  510. return 0;
  511. }
  512. static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
  513. {
  514. return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
  515. }
  516. static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
  517. {
  518. int retry = DW_SPI_WAIT_RETRIES;
  519. struct spi_delay delay;
  520. unsigned long ns, us;
  521. u32 nents;
  522. nents = dw_readl(dws, DW_SPI_TXFLR);
  523. ns = NSEC_PER_SEC / dws->current_freq * nents;
  524. ns *= dws->n_bytes * BITS_PER_BYTE;
  525. if (ns <= NSEC_PER_USEC) {
  526. delay.unit = SPI_DELAY_UNIT_NSECS;
  527. delay.value = ns;
  528. } else {
  529. us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
  530. delay.unit = SPI_DELAY_UNIT_USECS;
  531. delay.value = clamp_val(us, 0, USHRT_MAX);
  532. }
  533. while (dw_spi_ctlr_busy(dws) && retry--)
  534. spi_delay_exec(&delay, NULL);
  535. if (retry < 0) {
  536. dev_err(&dws->master->dev, "Mem op hanged up\n");
  537. return -EIO;
  538. }
  539. return 0;
  540. }
  541. static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
  542. {
  543. dw_spi_enable_chip(dws, 0);
  544. dw_spi_set_cs(spi, true);
  545. dw_spi_enable_chip(dws, 1);
  546. }
  547. /*
  548. * The SPI memory operation implementation below is the best choice for the
  549. * devices, which are selected by the native chip-select lane. It's
  550. * specifically developed to workaround the problem with automatic chip-select
  551. * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
  552. * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
  553. * unavailable.
  554. */
  555. static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
  556. {
  557. struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
  558. struct dw_spi_cfg cfg;
  559. unsigned long flags;
  560. int ret;
  561. /*
  562. * Collect the outbound data into a single buffer to speed the
  563. * transmission up at least on the initial stage.
  564. */
  565. ret = dw_spi_init_mem_buf(dws, op);
  566. if (ret)
  567. return ret;
  568. /*
  569. * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
  570. * operation. Transmit-only mode is suitable for the rest of them.
  571. */
  572. cfg.dfs = 8;
  573. cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
  574. if (op->data.dir == SPI_MEM_DATA_IN) {
  575. cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
  576. cfg.ndf = op->data.nbytes;
  577. } else {
  578. cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
  579. }
  580. dw_spi_enable_chip(dws, 0);
  581. dw_spi_update_config(dws, mem->spi, &cfg);
  582. dw_spi_mask_intr(dws, 0xff);
  583. dw_spi_enable_chip(dws, 1);
  584. /*
  585. * DW APB SSI controller has very nasty peculiarities. First originally
  586. * (without any vendor-specific modifications) it doesn't provide a
  587. * direct way to set and clear the native chip-select signal. Instead
  588. * the controller asserts the CS lane if Tx FIFO isn't empty and a
  589. * transmission is going on, and automatically de-asserts it back to
  590. * the high level if the Tx FIFO doesn't have anything to be pushed
  591. * out. Due to that a multi-tasking or heavy IRQs activity might be
  592. * fatal, since the transfer procedure preemption may cause the Tx FIFO
  593. * getting empty and sudden CS de-assertion, which in the middle of the
  594. * transfer will most likely cause the data loss. Secondly the
  595. * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
  596. * data being automatically pulled in into the Rx FIFO. So if the
  597. * driver software is late in fetching the data from the FIFO before
  598. * it's overflown, new incoming data will be lost. In order to make
  599. * sure the executed memory operations are CS-atomic and to prevent the
  600. * Rx FIFO overflow we have to disable the local interrupts so to block
  601. * any preemption during the subsequent IO operations.
  602. *
  603. * Note. At some circumstances disabling IRQs may not help to prevent
  604. * the problems described above. The CS de-assertion and Rx FIFO
  605. * overflow may still happen due to the relatively slow system bus or
  606. * CPU not working fast enough, so the write-then-read algo implemented
  607. * here just won't keep up with the SPI bus data transfer. Such
  608. * situation is highly platform specific and is supposed to be fixed by
  609. * manually restricting the SPI bus frequency using the
  610. * dws->max_mem_freq parameter.
  611. */
  612. local_irq_save(flags);
  613. preempt_disable();
  614. ret = dw_spi_write_then_read(dws, mem->spi);
  615. local_irq_restore(flags);
  616. preempt_enable();
  617. /*
  618. * Wait for the operation being finished and check the controller
  619. * status only if there hasn't been any run-time error detected. In the
  620. * former case it's just pointless. In the later one to prevent an
  621. * additional error message printing since any hw error flag being set
  622. * would be due to an error detected on the data transfer.
  623. */
  624. if (!ret) {
  625. ret = dw_spi_wait_mem_op_done(dws);
  626. if (!ret)
  627. ret = dw_spi_check_status(dws, true);
  628. }
  629. dw_spi_stop_mem_op(dws, mem->spi);
  630. dw_spi_free_mem_buf(dws);
  631. return ret;
  632. }
  633. /*
  634. * Initialize the default memory operations if a glue layer hasn't specified
  635. * custom ones. Direct mapping operations will be preserved anyway since DW SPI
  636. * controller doesn't have an embedded dirmap interface. Note the memory
  637. * operations implemented in this driver is the best choice only for the DW APB
  638. * SSI controller with standard native CS functionality. If a hardware vendor
  639. * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
  640. * be safer to use the normal SPI-messages-based transfers implementation.
  641. */
  642. static void dw_spi_init_mem_ops(struct dw_spi *dws)
  643. {
  644. if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
  645. !dws->set_cs) {
  646. dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
  647. dws->mem_ops.supports_op = dw_spi_supports_mem_op;
  648. dws->mem_ops.exec_op = dw_spi_exec_mem_op;
  649. if (!dws->max_mem_freq)
  650. dws->max_mem_freq = dws->max_freq;
  651. }
  652. }
  653. /* This may be called twice for each spi dev */
  654. static int dw_spi_setup(struct spi_device *spi)
  655. {
  656. struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
  657. struct dw_spi_chip_data *chip;
  658. /* Only alloc on first setup */
  659. chip = spi_get_ctldata(spi);
  660. if (!chip) {
  661. struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
  662. u32 rx_sample_dly_ns;
  663. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  664. if (!chip)
  665. return -ENOMEM;
  666. spi_set_ctldata(spi, chip);
  667. /* Get specific / default rx-sample-delay */
  668. if (device_property_read_u32(&spi->dev,
  669. "rx-sample-delay-ns",
  670. &rx_sample_dly_ns) != 0)
  671. /* Use default controller value */
  672. rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
  673. chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
  674. NSEC_PER_SEC /
  675. dws->max_freq);
  676. }
  677. /*
  678. * Update CR0 data each time the setup callback is invoked since
  679. * the device parameters could have been changed, for instance, by
  680. * the MMC SPI driver or something else.
  681. */
  682. chip->cr0 = dw_spi_prepare_cr0(dws, spi);
  683. return 0;
  684. }
  685. static void dw_spi_cleanup(struct spi_device *spi)
  686. {
  687. struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
  688. kfree(chip);
  689. spi_set_ctldata(spi, NULL);
  690. }
  691. /* Restart the controller, disable all interrupts, clean rx fifo */
  692. static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
  693. {
  694. dw_spi_reset_chip(dws);
  695. /*
  696. * Retrieve the Synopsys component version if it hasn't been specified
  697. * by the platform. CoreKit version ID is encoded as a 3-chars ASCII
  698. * code enclosed with '*' (typical for the most of Synopsys IP-cores).
  699. */
  700. if (!dws->ver) {
  701. dws->ver = dw_readl(dws, DW_SPI_VERSION);
  702. dev_dbg(dev, "Synopsys DWC%sSSI v%c.%c%c\n",
  703. dw_spi_ip_is(dws, PSSI) ? " APB " : " ",
  704. DW_SPI_GET_BYTE(dws->ver, 3), DW_SPI_GET_BYTE(dws->ver, 2),
  705. DW_SPI_GET_BYTE(dws->ver, 1));
  706. }
  707. /*
  708. * Try to detect the FIFO depth if not set by interface driver,
  709. * the depth could be from 2 to 256 from HW spec
  710. */
  711. if (!dws->fifo_len) {
  712. u32 fifo;
  713. for (fifo = 1; fifo < 256; fifo++) {
  714. dw_writel(dws, DW_SPI_TXFTLR, fifo);
  715. if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
  716. break;
  717. }
  718. dw_writel(dws, DW_SPI_TXFTLR, 0);
  719. dws->fifo_len = (fifo == 1) ? 0 : fifo;
  720. dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
  721. }
  722. /*
  723. * Detect CTRLR0.DFS field size and offset by testing the lowest bits
  724. * writability. Note DWC SSI controller also has the extended DFS, but
  725. * with zero offset.
  726. */
  727. if (dw_spi_ip_is(dws, PSSI)) {
  728. u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
  729. dw_spi_enable_chip(dws, 0);
  730. dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff);
  731. cr0 = dw_readl(dws, DW_SPI_CTRLR0);
  732. dw_writel(dws, DW_SPI_CTRLR0, tmp);
  733. dw_spi_enable_chip(dws, 1);
  734. if (!(cr0 & DW_PSSI_CTRLR0_DFS_MASK)) {
  735. dws->caps |= DW_SPI_CAP_DFS32;
  736. dws->dfs_offset = __bf_shf(DW_PSSI_CTRLR0_DFS32_MASK);
  737. dev_dbg(dev, "Detected 32-bits max data frame size\n");
  738. }
  739. } else {
  740. dws->caps |= DW_SPI_CAP_DFS32;
  741. }
  742. /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
  743. if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
  744. dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
  745. }
  746. int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
  747. {
  748. struct spi_controller *master;
  749. int ret;
  750. if (!dws)
  751. return -EINVAL;
  752. master = spi_alloc_master(dev, 0);
  753. if (!master)
  754. return -ENOMEM;
  755. device_set_node(&master->dev, dev_fwnode(dev));
  756. dws->master = master;
  757. dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
  758. spi_controller_set_devdata(master, dws);
  759. /* Basic HW init */
  760. dw_spi_hw_init(dev, dws);
  761. ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
  762. master);
  763. if (ret < 0 && ret != -ENOTCONN) {
  764. dev_err(dev, "can not get IRQ\n");
  765. goto err_free_master;
  766. }
  767. dw_spi_init_mem_ops(dws);
  768. master->use_gpio_descriptors = true;
  769. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
  770. if (dws->caps & DW_SPI_CAP_DFS32)
  771. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  772. else
  773. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  774. master->bus_num = dws->bus_num;
  775. master->num_chipselect = dws->num_cs;
  776. master->setup = dw_spi_setup;
  777. master->cleanup = dw_spi_cleanup;
  778. if (dws->set_cs)
  779. master->set_cs = dws->set_cs;
  780. else
  781. master->set_cs = dw_spi_set_cs;
  782. master->transfer_one = dw_spi_transfer_one;
  783. master->handle_err = dw_spi_handle_err;
  784. if (dws->mem_ops.exec_op)
  785. master->mem_ops = &dws->mem_ops;
  786. master->max_speed_hz = dws->max_freq;
  787. master->flags = SPI_MASTER_GPIO_SS;
  788. master->auto_runtime_pm = true;
  789. /* Get default rx sample delay */
  790. device_property_read_u32(dev, "rx-sample-delay-ns",
  791. &dws->def_rx_sample_dly_ns);
  792. if (dws->dma_ops && dws->dma_ops->dma_init) {
  793. ret = dws->dma_ops->dma_init(dev, dws);
  794. if (ret == -EPROBE_DEFER) {
  795. goto err_free_irq;
  796. } else if (ret) {
  797. dev_warn(dev, "DMA init failed\n");
  798. } else {
  799. master->can_dma = dws->dma_ops->can_dma;
  800. master->flags |= SPI_CONTROLLER_MUST_TX;
  801. }
  802. }
  803. ret = spi_register_controller(master);
  804. if (ret) {
  805. dev_err_probe(dev, ret, "problem registering spi master\n");
  806. goto err_dma_exit;
  807. }
  808. dw_spi_debugfs_init(dws);
  809. return 0;
  810. err_dma_exit:
  811. if (dws->dma_ops && dws->dma_ops->dma_exit)
  812. dws->dma_ops->dma_exit(dws);
  813. dw_spi_enable_chip(dws, 0);
  814. err_free_irq:
  815. free_irq(dws->irq, master);
  816. err_free_master:
  817. spi_controller_put(master);
  818. return ret;
  819. }
  820. EXPORT_SYMBOL_NS_GPL(dw_spi_add_host, SPI_DW_CORE);
  821. void dw_spi_remove_host(struct dw_spi *dws)
  822. {
  823. dw_spi_debugfs_remove(dws);
  824. spi_unregister_controller(dws->master);
  825. if (dws->dma_ops && dws->dma_ops->dma_exit)
  826. dws->dma_ops->dma_exit(dws);
  827. dw_spi_shutdown_chip(dws);
  828. free_irq(dws->irq, dws->master);
  829. }
  830. EXPORT_SYMBOL_NS_GPL(dw_spi_remove_host, SPI_DW_CORE);
  831. int dw_spi_suspend_host(struct dw_spi *dws)
  832. {
  833. int ret;
  834. ret = spi_controller_suspend(dws->master);
  835. if (ret)
  836. return ret;
  837. dw_spi_shutdown_chip(dws);
  838. return 0;
  839. }
  840. EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_host, SPI_DW_CORE);
  841. int dw_spi_resume_host(struct dw_spi *dws)
  842. {
  843. dw_spi_hw_init(&dws->master->dev, dws);
  844. return spi_controller_resume(dws->master);
  845. }
  846. EXPORT_SYMBOL_NS_GPL(dw_spi_resume_host, SPI_DW_CORE);
  847. MODULE_AUTHOR("Feng Tang <[email protected]>");
  848. MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
  849. MODULE_LICENSE("GPL v2");