spi-bcm2835.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Driver for Broadcom BCM2835 SPI Controllers
  4. *
  5. * Copyright (C) 2012 Chris Boot
  6. * Copyright (C) 2013 Stephen Warren
  7. * Copyright (C) 2015 Martin Sperl
  8. *
  9. * This driver is inspired by:
  10. * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <[email protected]>
  11. * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/completion.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/delay.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/dmaengine.h>
  19. #include <linux/err.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/io.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/of.h>
  25. #include <linux/of_address.h>
  26. #include <linux/of_device.h>
  27. #include <linux/gpio/consumer.h>
  28. #include <linux/gpio/machine.h> /* FIXME: using chip internals */
  29. #include <linux/gpio/driver.h> /* FIXME: using chip internals */
  30. #include <linux/of_irq.h>
  31. #include <linux/spi/spi.h>
  32. /* SPI register offsets */
  33. #define BCM2835_SPI_CS 0x00
  34. #define BCM2835_SPI_FIFO 0x04
  35. #define BCM2835_SPI_CLK 0x08
  36. #define BCM2835_SPI_DLEN 0x0c
  37. #define BCM2835_SPI_LTOH 0x10
  38. #define BCM2835_SPI_DC 0x14
  39. /* Bitfields in CS */
  40. #define BCM2835_SPI_CS_LEN_LONG 0x02000000
  41. #define BCM2835_SPI_CS_DMA_LEN 0x01000000
  42. #define BCM2835_SPI_CS_CSPOL2 0x00800000
  43. #define BCM2835_SPI_CS_CSPOL1 0x00400000
  44. #define BCM2835_SPI_CS_CSPOL0 0x00200000
  45. #define BCM2835_SPI_CS_RXF 0x00100000
  46. #define BCM2835_SPI_CS_RXR 0x00080000
  47. #define BCM2835_SPI_CS_TXD 0x00040000
  48. #define BCM2835_SPI_CS_RXD 0x00020000
  49. #define BCM2835_SPI_CS_DONE 0x00010000
  50. #define BCM2835_SPI_CS_LEN 0x00002000
  51. #define BCM2835_SPI_CS_REN 0x00001000
  52. #define BCM2835_SPI_CS_ADCS 0x00000800
  53. #define BCM2835_SPI_CS_INTR 0x00000400
  54. #define BCM2835_SPI_CS_INTD 0x00000200
  55. #define BCM2835_SPI_CS_DMAEN 0x00000100
  56. #define BCM2835_SPI_CS_TA 0x00000080
  57. #define BCM2835_SPI_CS_CSPOL 0x00000040
  58. #define BCM2835_SPI_CS_CLEAR_RX 0x00000020
  59. #define BCM2835_SPI_CS_CLEAR_TX 0x00000010
  60. #define BCM2835_SPI_CS_CPOL 0x00000008
  61. #define BCM2835_SPI_CS_CPHA 0x00000004
  62. #define BCM2835_SPI_CS_CS_10 0x00000002
  63. #define BCM2835_SPI_CS_CS_01 0x00000001
  64. #define BCM2835_SPI_FIFO_SIZE 64
  65. #define BCM2835_SPI_FIFO_SIZE_3_4 48
  66. #define BCM2835_SPI_DMA_MIN_LENGTH 96
  67. #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
  68. | SPI_NO_CS | SPI_3WIRE)
  69. #define DRV_NAME "spi-bcm2835"
  70. /* define polling limits */
  71. static unsigned int polling_limit_us = 30;
  72. module_param(polling_limit_us, uint, 0664);
  73. MODULE_PARM_DESC(polling_limit_us,
  74. "time in us to run a transfer in polling mode\n");
  75. /**
  76. * struct bcm2835_spi - BCM2835 SPI controller
  77. * @regs: base address of register map
  78. * @clk: core clock, divided to calculate serial clock
  79. * @clk_hz: core clock cached speed
  80. * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
  81. * @tfr: SPI transfer currently processed
  82. * @ctlr: SPI controller reverse lookup
  83. * @tx_buf: pointer whence next transmitted byte is read
  84. * @rx_buf: pointer where next received byte is written
  85. * @tx_len: remaining bytes to transmit
  86. * @rx_len: remaining bytes to receive
  87. * @tx_prologue: bytes transmitted without DMA if first TX sglist entry's
  88. * length is not a multiple of 4 (to overcome hardware limitation)
  89. * @rx_prologue: bytes received without DMA if first RX sglist entry's
  90. * length is not a multiple of 4 (to overcome hardware limitation)
  91. * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
  92. * @debugfs_dir: the debugfs directory - neede to remove debugfs when
  93. * unloading the module
  94. * @count_transfer_polling: count of how often polling mode is used
  95. * @count_transfer_irq: count of how often interrupt mode is used
  96. * @count_transfer_irq_after_polling: count of how often we fall back to
  97. * interrupt mode after starting in polling mode.
  98. * These are counted as well in @count_transfer_polling and
  99. * @count_transfer_irq
  100. * @count_transfer_dma: count how often dma mode is used
  101. * @slv: SPI slave currently selected
  102. * (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
  103. * @tx_dma_active: whether a TX DMA descriptor is in progress
  104. * @rx_dma_active: whether a RX DMA descriptor is in progress
  105. * (used by bcm2835_spi_dma_tx_done() to handle a race)
  106. * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
  107. * (cyclically copies from zero page to TX FIFO)
  108. * @fill_tx_addr: bus address of zero page
  109. */
  110. struct bcm2835_spi {
  111. void __iomem *regs;
  112. struct clk *clk;
  113. unsigned long clk_hz;
  114. int irq;
  115. struct spi_transfer *tfr;
  116. struct spi_controller *ctlr;
  117. const u8 *tx_buf;
  118. u8 *rx_buf;
  119. int tx_len;
  120. int rx_len;
  121. int tx_prologue;
  122. int rx_prologue;
  123. unsigned int tx_spillover;
  124. struct dentry *debugfs_dir;
  125. u64 count_transfer_polling;
  126. u64 count_transfer_irq;
  127. u64 count_transfer_irq_after_polling;
  128. u64 count_transfer_dma;
  129. struct bcm2835_spidev *slv;
  130. unsigned int tx_dma_active;
  131. unsigned int rx_dma_active;
  132. struct dma_async_tx_descriptor *fill_tx_desc;
  133. dma_addr_t fill_tx_addr;
  134. };
  135. /**
  136. * struct bcm2835_spidev - BCM2835 SPI slave
  137. * @prepare_cs: precalculated CS register value for ->prepare_message()
  138. * (uses slave-specific clock polarity and phase settings)
  139. * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
  140. * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
  141. * @clear_rx_addr: bus address of @clear_rx_cs
  142. * @clear_rx_cs: precalculated CS register value to clear RX FIFO
  143. * (uses slave-specific clock polarity and phase settings)
  144. */
  145. struct bcm2835_spidev {
  146. u32 prepare_cs;
  147. struct dma_async_tx_descriptor *clear_rx_desc;
  148. dma_addr_t clear_rx_addr;
  149. u32 clear_rx_cs ____cacheline_aligned;
  150. };
  151. #if defined(CONFIG_DEBUG_FS)
  152. static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
  153. const char *dname)
  154. {
  155. char name[64];
  156. struct dentry *dir;
  157. /* get full name */
  158. snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
  159. /* the base directory */
  160. dir = debugfs_create_dir(name, NULL);
  161. bs->debugfs_dir = dir;
  162. /* the counters */
  163. debugfs_create_u64("count_transfer_polling", 0444, dir,
  164. &bs->count_transfer_polling);
  165. debugfs_create_u64("count_transfer_irq", 0444, dir,
  166. &bs->count_transfer_irq);
  167. debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
  168. &bs->count_transfer_irq_after_polling);
  169. debugfs_create_u64("count_transfer_dma", 0444, dir,
  170. &bs->count_transfer_dma);
  171. }
  172. static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
  173. {
  174. debugfs_remove_recursive(bs->debugfs_dir);
  175. bs->debugfs_dir = NULL;
  176. }
  177. #else
  178. static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
  179. const char *dname)
  180. {
  181. }
  182. static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
  183. {
  184. }
  185. #endif /* CONFIG_DEBUG_FS */
  186. static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg)
  187. {
  188. return readl(bs->regs + reg);
  189. }
  190. static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val)
  191. {
  192. writel(val, bs->regs + reg);
  193. }
  194. static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
  195. {
  196. u8 byte;
  197. while ((bs->rx_len) &&
  198. (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
  199. byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
  200. if (bs->rx_buf)
  201. *bs->rx_buf++ = byte;
  202. bs->rx_len--;
  203. }
  204. }
  205. static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
  206. {
  207. u8 byte;
  208. while ((bs->tx_len) &&
  209. (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
  210. byte = bs->tx_buf ? *bs->tx_buf++ : 0;
  211. bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
  212. bs->tx_len--;
  213. }
  214. }
  215. /**
  216. * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
  217. * @bs: BCM2835 SPI controller
  218. * @count: bytes to read from RX FIFO
  219. *
  220. * The caller must ensure that @bs->rx_len is greater than or equal to @count,
  221. * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
  222. * in the CS register is set (such that a read from the FIFO register receives
  223. * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
  224. */
  225. static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
  226. {
  227. u32 val;
  228. int len;
  229. bs->rx_len -= count;
  230. do {
  231. val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
  232. len = min(count, 4);
  233. memcpy(bs->rx_buf, &val, len);
  234. bs->rx_buf += len;
  235. count -= 4;
  236. } while (count > 0);
  237. }
  238. /**
  239. * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
  240. * @bs: BCM2835 SPI controller
  241. * @count: bytes to write to TX FIFO
  242. *
  243. * The caller must ensure that @bs->tx_len is greater than or equal to @count,
  244. * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag
  245. * in the CS register is set (such that a write to the FIFO register transmits
  246. * 32-bit instead of just 8-bit).
  247. */
  248. static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
  249. {
  250. u32 val;
  251. int len;
  252. bs->tx_len -= count;
  253. do {
  254. if (bs->tx_buf) {
  255. len = min(count, 4);
  256. memcpy(&val, bs->tx_buf, len);
  257. bs->tx_buf += len;
  258. } else {
  259. val = 0;
  260. }
  261. bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
  262. count -= 4;
  263. } while (count > 0);
  264. }
  265. /**
  266. * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
  267. * @bs: BCM2835 SPI controller
  268. *
  269. * The caller must ensure that the RX FIFO can accommodate as many bytes
  270. * as have been written to the TX FIFO: Transmission is halted once the
  271. * RX FIFO is full, causing this function to spin forever.
  272. */
  273. static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
  274. {
  275. while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
  276. cpu_relax();
  277. }
  278. /**
  279. * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
  280. * @bs: BCM2835 SPI controller
  281. * @count: bytes available for reading in RX FIFO
  282. */
  283. static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
  284. {
  285. u8 val;
  286. count = min(count, bs->rx_len);
  287. bs->rx_len -= count;
  288. do {
  289. val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
  290. if (bs->rx_buf)
  291. *bs->rx_buf++ = val;
  292. } while (--count);
  293. }
  294. /**
  295. * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
  296. * @bs: BCM2835 SPI controller
  297. * @count: bytes available for writing in TX FIFO
  298. */
  299. static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
  300. {
  301. u8 val;
  302. count = min(count, bs->tx_len);
  303. bs->tx_len -= count;
  304. do {
  305. val = bs->tx_buf ? *bs->tx_buf++ : 0;
  306. bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
  307. } while (--count);
  308. }
  309. static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs)
  310. {
  311. u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
  312. /* Disable SPI interrupts and transfer */
  313. cs &= ~(BCM2835_SPI_CS_INTR |
  314. BCM2835_SPI_CS_INTD |
  315. BCM2835_SPI_CS_DMAEN |
  316. BCM2835_SPI_CS_TA);
  317. /*
  318. * Transmission sometimes breaks unless the DONE bit is written at the
  319. * end of every transfer. The spec says it's a RO bit. Either the
  320. * spec is wrong and the bit is actually of type RW1C, or it's a
  321. * hardware erratum.
  322. */
  323. cs |= BCM2835_SPI_CS_DONE;
  324. /* and reset RX/TX FIFOS */
  325. cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
  326. /* and reset the SPI_HW */
  327. bcm2835_wr(bs, BCM2835_SPI_CS, cs);
  328. /* as well as DLEN */
  329. bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
  330. }
  331. static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
  332. {
  333. struct bcm2835_spi *bs = dev_id;
  334. u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
  335. /* Bail out early if interrupts are not enabled */
  336. if (!(cs & BCM2835_SPI_CS_INTR))
  337. return IRQ_NONE;
  338. /*
  339. * An interrupt is signaled either if DONE is set (TX FIFO empty)
  340. * or if RXR is set (RX FIFO >= ¾ full).
  341. */
  342. if (cs & BCM2835_SPI_CS_RXF)
  343. bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
  344. else if (cs & BCM2835_SPI_CS_RXR)
  345. bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
  346. if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
  347. bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
  348. /* Read as many bytes as possible from FIFO */
  349. bcm2835_rd_fifo(bs);
  350. /* Write as many bytes as possible to FIFO */
  351. bcm2835_wr_fifo(bs);
  352. if (!bs->rx_len) {
  353. /* Transfer complete - reset SPI HW */
  354. bcm2835_spi_reset_hw(bs);
  355. /* wake up the framework */
  356. spi_finalize_current_transfer(bs->ctlr);
  357. }
  358. return IRQ_HANDLED;
  359. }
  360. static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
  361. struct spi_device *spi,
  362. struct spi_transfer *tfr,
  363. u32 cs, bool fifo_empty)
  364. {
  365. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  366. /* update usage statistics */
  367. bs->count_transfer_irq++;
  368. /*
  369. * Enable HW block, but with interrupts still disabled.
  370. * Otherwise the empty TX FIFO would immediately trigger an interrupt.
  371. */
  372. bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
  373. /* fill TX FIFO as much as possible */
  374. if (fifo_empty)
  375. bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
  376. bcm2835_wr_fifo(bs);
  377. /* enable interrupts */
  378. cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
  379. bcm2835_wr(bs, BCM2835_SPI_CS, cs);
  380. /* signal that we need to wait for completion */
  381. return 1;
  382. }
  383. /**
  384. * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
  385. * @ctlr: SPI master controller
  386. * @tfr: SPI transfer
  387. * @bs: BCM2835 SPI controller
  388. * @cs: CS register
  389. *
  390. * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
  391. * Only the final write access is permitted to transmit less than 4 bytes, the
  392. * SPI controller deduces its intended size from the DLEN register.
  393. *
  394. * If a TX or RX sglist contains multiple entries, one per page, and the first
  395. * entry starts in the middle of a page, that first entry's length may not be
  396. * a multiple of 4. Subsequent entries are fine because they span an entire
  397. * page, hence do have a length that's a multiple of 4.
  398. *
  399. * This cannot happen with kmalloc'ed buffers (which is what most clients use)
  400. * because they are contiguous in physical memory and therefore not split on
  401. * page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed
  402. * buffers.
  403. *
  404. * The DMA engine is incapable of combining sglist entries into a continuous
  405. * stream of 4 byte chunks, it treats every entry separately: A TX entry is
  406. * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
  407. * entry is rounded up by throwing away received bytes.
  408. *
  409. * Overcome this limitation by transferring the first few bytes without DMA:
  410. * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
  411. * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
  412. * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
  413. * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
  414. *
  415. * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
  416. * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
  417. * Caution, the additional 4 bytes spill over to the second TX sglist entry
  418. * if the length of the first is *exactly* 1.
  419. *
  420. * At most 6 bytes are written and at most 3 bytes read. Do we know the
  421. * transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
  422. *
  423. * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
  424. * by the DMA engine. Toggling the DMA Enable flag in the CS register switches
  425. * the width but also garbles the FIFO's contents. The prologue must therefore
  426. * be transmitted in 32-bit width to ensure that the following DMA transfer can
  427. * pick up the residue in the RX FIFO in ungarbled form.
  428. */
  429. static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
  430. struct spi_transfer *tfr,
  431. struct bcm2835_spi *bs,
  432. u32 cs)
  433. {
  434. int tx_remaining;
  435. bs->tfr = tfr;
  436. bs->tx_prologue = 0;
  437. bs->rx_prologue = 0;
  438. bs->tx_spillover = false;
  439. if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
  440. bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
  441. if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
  442. bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
  443. if (bs->rx_prologue > bs->tx_prologue) {
  444. if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
  445. bs->tx_prologue = bs->rx_prologue;
  446. } else {
  447. bs->tx_prologue += 4;
  448. bs->tx_spillover =
  449. !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
  450. }
  451. }
  452. }
  453. /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
  454. if (!bs->tx_prologue)
  455. return;
  456. /* Write and read RX prologue. Adjust first entry in RX sglist. */
  457. if (bs->rx_prologue) {
  458. bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
  459. bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
  460. | BCM2835_SPI_CS_DMAEN);
  461. bcm2835_wr_fifo_count(bs, bs->rx_prologue);
  462. bcm2835_wait_tx_fifo_empty(bs);
  463. bcm2835_rd_fifo_count(bs, bs->rx_prologue);
  464. bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
  465. | BCM2835_SPI_CS_CLEAR_TX
  466. | BCM2835_SPI_CS_DONE);
  467. dma_sync_single_for_device(ctlr->dma_rx->device->dev,
  468. sg_dma_address(&tfr->rx_sg.sgl[0]),
  469. bs->rx_prologue, DMA_FROM_DEVICE);
  470. sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
  471. sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
  472. }
  473. if (!bs->tx_buf)
  474. return;
  475. /*
  476. * Write remaining TX prologue. Adjust first entry in TX sglist.
  477. * Also adjust second entry if prologue spills over to it.
  478. */
  479. tx_remaining = bs->tx_prologue - bs->rx_prologue;
  480. if (tx_remaining) {
  481. bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
  482. bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
  483. | BCM2835_SPI_CS_DMAEN);
  484. bcm2835_wr_fifo_count(bs, tx_remaining);
  485. bcm2835_wait_tx_fifo_empty(bs);
  486. bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
  487. | BCM2835_SPI_CS_DONE);
  488. }
  489. if (likely(!bs->tx_spillover)) {
  490. sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
  491. sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
  492. } else {
  493. sg_dma_len(&tfr->tx_sg.sgl[0]) = 0;
  494. sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
  495. sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4;
  496. }
  497. }
  498. /**
  499. * bcm2835_spi_undo_prologue() - reconstruct original sglist state
  500. * @bs: BCM2835 SPI controller
  501. *
  502. * Undo changes which were made to an SPI transfer's sglist when transmitting
  503. * the prologue. This is necessary to ensure the same memory ranges are
  504. * unmapped that were originally mapped.
  505. */
  506. static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
  507. {
  508. struct spi_transfer *tfr = bs->tfr;
  509. if (!bs->tx_prologue)
  510. return;
  511. if (bs->rx_prologue) {
  512. sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
  513. sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
  514. }
  515. if (!bs->tx_buf)
  516. goto out;
  517. if (likely(!bs->tx_spillover)) {
  518. sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
  519. sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
  520. } else {
  521. sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4;
  522. sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
  523. sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
  524. }
  525. out:
  526. bs->tx_prologue = 0;
  527. }
  528. /**
  529. * bcm2835_spi_dma_rx_done() - callback for DMA RX channel
  530. * @data: SPI master controller
  531. *
  532. * Used for bidirectional and RX-only transfers.
  533. */
  534. static void bcm2835_spi_dma_rx_done(void *data)
  535. {
  536. struct spi_controller *ctlr = data;
  537. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  538. /* terminate tx-dma as we do not have an irq for it
  539. * because when the rx dma will terminate and this callback
  540. * is called the tx-dma must have finished - can't get to this
  541. * situation otherwise...
  542. */
  543. dmaengine_terminate_async(ctlr->dma_tx);
  544. bs->tx_dma_active = false;
  545. bs->rx_dma_active = false;
  546. bcm2835_spi_undo_prologue(bs);
  547. /* reset fifo and HW */
  548. bcm2835_spi_reset_hw(bs);
  549. /* and mark as completed */;
  550. spi_finalize_current_transfer(ctlr);
  551. }
  552. /**
  553. * bcm2835_spi_dma_tx_done() - callback for DMA TX channel
  554. * @data: SPI master controller
  555. *
  556. * Used for TX-only transfers.
  557. */
  558. static void bcm2835_spi_dma_tx_done(void *data)
  559. {
  560. struct spi_controller *ctlr = data;
  561. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  562. /* busy-wait for TX FIFO to empty */
  563. while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
  564. bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs);
  565. bs->tx_dma_active = false;
  566. smp_wmb();
  567. /*
  568. * In case of a very short transfer, RX DMA may not have been
  569. * issued yet. The onus is then on bcm2835_spi_transfer_one_dma()
  570. * to terminate it immediately after issuing.
  571. */
  572. if (cmpxchg(&bs->rx_dma_active, true, false))
  573. dmaengine_terminate_async(ctlr->dma_rx);
  574. bcm2835_spi_undo_prologue(bs);
  575. bcm2835_spi_reset_hw(bs);
  576. spi_finalize_current_transfer(ctlr);
  577. }
  578. /**
  579. * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
  580. * @ctlr: SPI master controller
  581. * @tfr: SPI transfer
  582. * @bs: BCM2835 SPI controller
  583. * @slv: BCM2835 SPI slave
  584. * @is_tx: whether to submit DMA descriptor for TX or RX sglist
  585. *
  586. * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
  587. * Return 0 on success or a negative error number.
  588. */
  589. static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
  590. struct spi_transfer *tfr,
  591. struct bcm2835_spi *bs,
  592. struct bcm2835_spidev *slv,
  593. bool is_tx)
  594. {
  595. struct dma_chan *chan;
  596. struct scatterlist *sgl;
  597. unsigned int nents;
  598. enum dma_transfer_direction dir;
  599. unsigned long flags;
  600. struct dma_async_tx_descriptor *desc;
  601. dma_cookie_t cookie;
  602. if (is_tx) {
  603. dir = DMA_MEM_TO_DEV;
  604. chan = ctlr->dma_tx;
  605. nents = tfr->tx_sg.nents;
  606. sgl = tfr->tx_sg.sgl;
  607. flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT;
  608. } else {
  609. dir = DMA_DEV_TO_MEM;
  610. chan = ctlr->dma_rx;
  611. nents = tfr->rx_sg.nents;
  612. sgl = tfr->rx_sg.sgl;
  613. flags = DMA_PREP_INTERRUPT;
  614. }
  615. /* prepare the channel */
  616. desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
  617. if (!desc)
  618. return -EINVAL;
  619. /*
  620. * Completion is signaled by the RX channel for bidirectional and
  621. * RX-only transfers; else by the TX channel for TX-only transfers.
  622. */
  623. if (!is_tx) {
  624. desc->callback = bcm2835_spi_dma_rx_done;
  625. desc->callback_param = ctlr;
  626. } else if (!tfr->rx_buf) {
  627. desc->callback = bcm2835_spi_dma_tx_done;
  628. desc->callback_param = ctlr;
  629. bs->slv = slv;
  630. }
  631. /* submit it to DMA-engine */
  632. cookie = dmaengine_submit(desc);
  633. return dma_submit_error(cookie);
  634. }
  635. /**
  636. * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
  637. * @ctlr: SPI master controller
  638. * @tfr: SPI transfer
  639. * @slv: BCM2835 SPI slave
  640. * @cs: CS register
  641. *
  642. * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
  643. * the TX and RX DMA channel to copy between memory and FIFO register.
  644. *
  645. * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
  646. * memory is pointless. However not reading the RX FIFO isn't an option either
  647. * because transmission is halted once it's full. As a workaround, cyclically
  648. * clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
  649. *
  650. * The CS register value is precalculated in bcm2835_spi_setup(). Normally
  651. * this is called only once, on slave registration. A DMA descriptor to write
  652. * this value is preallocated in bcm2835_dma_init(). All that's left to do
  653. * when performing a TX-only transfer is to submit this descriptor to the RX
  654. * DMA channel. Latency is thereby minimized. The descriptor does not
  655. * generate any interrupts while running. It must be terminated once the
  656. * TX DMA channel is done.
  657. *
  658. * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
  659. * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
  660. * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
  661. * accesses, whereas clearing it requires only 1 bus access. So an 8-fold
  662. * reduction in bus traffic and thus energy consumption is achieved.
  663. *
  664. * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
  665. * copying from the zero page. The DMA descriptor to do this is preallocated
  666. * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
  667. * done and can then be reused.
  668. *
  669. * The BCM2835 DMA driver autodetects when a transaction copies from the zero
  670. * page and utilizes the DMA controller's ability to synthesize zeroes instead
  671. * of copying them from memory. This reduces traffic on the memory bus. The
  672. * feature is not available on so-called "lite" channels, but normally TX DMA
  673. * is backed by a full-featured channel.
  674. *
  675. * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
  676. * BCM2835 SPI controller continues to assert DREQ even after the DLEN register
  677. * has been counted down to zero (hardware erratum). Thus, when the transfer
  678. * has finished, the DMA engine zero-fills the TX FIFO until it is half full.
  679. * (Tuneable with the DC register.) So up to 9 gratuitous bus accesses are
  680. * performed at the end of an RX-only transfer.
  681. */
  682. static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
  683. struct spi_transfer *tfr,
  684. struct bcm2835_spidev *slv,
  685. u32 cs)
  686. {
  687. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  688. dma_cookie_t cookie;
  689. int ret;
  690. /* update usage statistics */
  691. bs->count_transfer_dma++;
  692. /*
  693. * Transfer first few bytes without DMA if length of first TX or RX
  694. * sglist entry is not a multiple of 4 bytes (hardware limitation).
  695. */
  696. bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
  697. /* setup tx-DMA */
  698. if (bs->tx_buf) {
  699. ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, true);
  700. } else {
  701. cookie = dmaengine_submit(bs->fill_tx_desc);
  702. ret = dma_submit_error(cookie);
  703. }
  704. if (ret)
  705. goto err_reset_hw;
  706. /* set the DMA length */
  707. bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
  708. /* start the HW */
  709. bcm2835_wr(bs, BCM2835_SPI_CS,
  710. cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
  711. bs->tx_dma_active = true;
  712. smp_wmb();
  713. /* start TX early */
  714. dma_async_issue_pending(ctlr->dma_tx);
  715. /* setup rx-DMA late - to run transfers while
  716. * mapping of the rx buffers still takes place
  717. * this saves 10us or more.
  718. */
  719. if (bs->rx_buf) {
  720. ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, false);
  721. } else {
  722. cookie = dmaengine_submit(slv->clear_rx_desc);
  723. ret = dma_submit_error(cookie);
  724. }
  725. if (ret) {
  726. /* need to reset on errors */
  727. dmaengine_terminate_sync(ctlr->dma_tx);
  728. bs->tx_dma_active = false;
  729. goto err_reset_hw;
  730. }
  731. /* start rx dma late */
  732. dma_async_issue_pending(ctlr->dma_rx);
  733. bs->rx_dma_active = true;
  734. smp_mb();
  735. /*
  736. * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done()
  737. * may run before RX DMA is issued. Terminate RX DMA if so.
  738. */
  739. if (!bs->rx_buf && !bs->tx_dma_active &&
  740. cmpxchg(&bs->rx_dma_active, true, false)) {
  741. dmaengine_terminate_async(ctlr->dma_rx);
  742. bcm2835_spi_reset_hw(bs);
  743. }
  744. /* wait for wakeup in framework */
  745. return 1;
  746. err_reset_hw:
  747. bcm2835_spi_reset_hw(bs);
  748. bcm2835_spi_undo_prologue(bs);
  749. return ret;
  750. }
  751. static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
  752. struct spi_device *spi,
  753. struct spi_transfer *tfr)
  754. {
  755. /* we start DMA efforts only on bigger transfers */
  756. if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
  757. return false;
  758. /* return OK */
  759. return true;
  760. }
  761. static void bcm2835_dma_release(struct spi_controller *ctlr,
  762. struct bcm2835_spi *bs)
  763. {
  764. if (ctlr->dma_tx) {
  765. dmaengine_terminate_sync(ctlr->dma_tx);
  766. if (bs->fill_tx_desc)
  767. dmaengine_desc_free(bs->fill_tx_desc);
  768. if (bs->fill_tx_addr)
  769. dma_unmap_page_attrs(ctlr->dma_tx->device->dev,
  770. bs->fill_tx_addr, sizeof(u32),
  771. DMA_TO_DEVICE,
  772. DMA_ATTR_SKIP_CPU_SYNC);
  773. dma_release_channel(ctlr->dma_tx);
  774. ctlr->dma_tx = NULL;
  775. }
  776. if (ctlr->dma_rx) {
  777. dmaengine_terminate_sync(ctlr->dma_rx);
  778. dma_release_channel(ctlr->dma_rx);
  779. ctlr->dma_rx = NULL;
  780. }
  781. }
  782. static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
  783. struct bcm2835_spi *bs)
  784. {
  785. struct dma_slave_config slave_config;
  786. const __be32 *addr;
  787. dma_addr_t dma_reg_base;
  788. int ret;
  789. /* base address in dma-space */
  790. addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
  791. if (!addr) {
  792. dev_err(dev, "could not get DMA-register address - not using dma mode\n");
  793. /* Fall back to interrupt mode */
  794. return 0;
  795. }
  796. dma_reg_base = be32_to_cpup(addr);
  797. /* get tx/rx dma */
  798. ctlr->dma_tx = dma_request_chan(dev, "tx");
  799. if (IS_ERR(ctlr->dma_tx)) {
  800. dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
  801. ret = PTR_ERR(ctlr->dma_tx);
  802. ctlr->dma_tx = NULL;
  803. goto err;
  804. }
  805. ctlr->dma_rx = dma_request_chan(dev, "rx");
  806. if (IS_ERR(ctlr->dma_rx)) {
  807. dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
  808. ret = PTR_ERR(ctlr->dma_rx);
  809. ctlr->dma_rx = NULL;
  810. goto err_release;
  811. }
  812. /*
  813. * The TX DMA channel either copies a transfer's TX buffer to the FIFO
  814. * or, in case of an RX-only transfer, cyclically copies from the zero
  815. * page to the FIFO using a preallocated, reusable descriptor.
  816. */
  817. slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
  818. slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  819. ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
  820. if (ret)
  821. goto err_config;
  822. bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
  823. ZERO_PAGE(0), 0, sizeof(u32),
  824. DMA_TO_DEVICE,
  825. DMA_ATTR_SKIP_CPU_SYNC);
  826. if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
  827. dev_err(dev, "cannot map zero page - not using DMA mode\n");
  828. bs->fill_tx_addr = 0;
  829. ret = -ENOMEM;
  830. goto err_release;
  831. }
  832. bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
  833. bs->fill_tx_addr,
  834. sizeof(u32), 0,
  835. DMA_MEM_TO_DEV, 0);
  836. if (!bs->fill_tx_desc) {
  837. dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
  838. ret = -ENOMEM;
  839. goto err_release;
  840. }
  841. ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
  842. if (ret) {
  843. dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n");
  844. goto err_release;
  845. }
  846. /*
  847. * The RX DMA channel is used bidirectionally: It either reads the
  848. * RX FIFO or, in case of a TX-only transfer, cyclically writes a
  849. * precalculated value to the CS register to clear the RX FIFO.
  850. */
  851. slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
  852. slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  853. slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
  854. slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  855. ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
  856. if (ret)
  857. goto err_config;
  858. /* all went well, so set can_dma */
  859. ctlr->can_dma = bcm2835_spi_can_dma;
  860. return 0;
  861. err_config:
  862. dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
  863. ret);
  864. err_release:
  865. bcm2835_dma_release(ctlr, bs);
  866. err:
  867. /*
  868. * Only report error for deferred probing, otherwise fall back to
  869. * interrupt mode
  870. */
  871. if (ret != -EPROBE_DEFER)
  872. ret = 0;
  873. return ret;
  874. }
  875. static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
  876. struct spi_device *spi,
  877. struct spi_transfer *tfr,
  878. u32 cs)
  879. {
  880. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  881. unsigned long timeout;
  882. /* update usage statistics */
  883. bs->count_transfer_polling++;
  884. /* enable HW block without interrupts */
  885. bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
  886. /* fill in the fifo before timeout calculations
  887. * if we are interrupted here, then the data is
  888. * getting transferred by the HW while we are interrupted
  889. */
  890. bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
  891. /* set the timeout to at least 2 jiffies */
  892. timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
  893. /* loop until finished the transfer */
  894. while (bs->rx_len) {
  895. /* fill in tx fifo with remaining data */
  896. bcm2835_wr_fifo(bs);
  897. /* read from fifo as much as possible */
  898. bcm2835_rd_fifo(bs);
  899. /* if there is still data pending to read
  900. * then check the timeout
  901. */
  902. if (bs->rx_len && time_after(jiffies, timeout)) {
  903. dev_dbg_ratelimited(&spi->dev,
  904. "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
  905. jiffies - timeout,
  906. bs->tx_len, bs->rx_len);
  907. /* fall back to interrupt mode */
  908. /* update usage statistics */
  909. bs->count_transfer_irq_after_polling++;
  910. return bcm2835_spi_transfer_one_irq(ctlr, spi,
  911. tfr, cs, false);
  912. }
  913. }
  914. /* Transfer complete - reset SPI HW */
  915. bcm2835_spi_reset_hw(bs);
  916. /* and return without waiting for completion */
  917. return 0;
  918. }
  919. static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
  920. struct spi_device *spi,
  921. struct spi_transfer *tfr)
  922. {
  923. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  924. struct bcm2835_spidev *slv = spi_get_ctldata(spi);
  925. unsigned long spi_hz, cdiv;
  926. unsigned long hz_per_byte, byte_limit;
  927. u32 cs = slv->prepare_cs;
  928. /* set clock */
  929. spi_hz = tfr->speed_hz;
  930. if (spi_hz >= bs->clk_hz / 2) {
  931. cdiv = 2; /* clk_hz/2 is the fastest we can go */
  932. } else if (spi_hz) {
  933. /* CDIV must be a multiple of two */
  934. cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
  935. cdiv += (cdiv % 2);
  936. if (cdiv >= 65536)
  937. cdiv = 0; /* 0 is the slowest we can go */
  938. } else {
  939. cdiv = 0; /* 0 is the slowest we can go */
  940. }
  941. tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
  942. bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
  943. /* handle all the 3-wire mode */
  944. if (spi->mode & SPI_3WIRE && tfr->rx_buf)
  945. cs |= BCM2835_SPI_CS_REN;
  946. /* set transmit buffers and length */
  947. bs->tx_buf = tfr->tx_buf;
  948. bs->rx_buf = tfr->rx_buf;
  949. bs->tx_len = tfr->len;
  950. bs->rx_len = tfr->len;
  951. /* Calculate the estimated time in us the transfer runs. Note that
  952. * there is 1 idle clocks cycles after each byte getting transferred
  953. * so we have 9 cycles/byte. This is used to find the number of Hz
  954. * per byte per polling limit. E.g., we can transfer 1 byte in 30 us
  955. * per 300,000 Hz of bus clock.
  956. */
  957. hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
  958. byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
  959. /* run in polling mode for short transfers */
  960. if (tfr->len < byte_limit)
  961. return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
  962. /* run in dma mode if conditions are right
  963. * Note that unlike poll or interrupt mode DMA mode does not have
  964. * this 1 idle clock cycle pattern but runs the spi clock without gaps
  965. */
  966. if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
  967. return bcm2835_spi_transfer_one_dma(ctlr, tfr, slv, cs);
  968. /* run in interrupt-mode */
  969. return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
  970. }
  971. static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
  972. struct spi_message *msg)
  973. {
  974. struct spi_device *spi = msg->spi;
  975. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  976. struct bcm2835_spidev *slv = spi_get_ctldata(spi);
  977. int ret;
  978. if (ctlr->can_dma) {
  979. /*
  980. * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
  981. * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
  982. * aligned) if the limit is exceeded.
  983. */
  984. ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
  985. GFP_KERNEL | GFP_DMA);
  986. if (ret)
  987. return ret;
  988. }
  989. /*
  990. * Set up clock polarity before spi_transfer_one_message() asserts
  991. * chip select to avoid a gratuitous clock signal edge.
  992. */
  993. bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs);
  994. return 0;
  995. }
  996. static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
  997. struct spi_message *msg)
  998. {
  999. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  1000. /* if an error occurred and we have an active dma, then terminate */
  1001. if (ctlr->dma_tx) {
  1002. dmaengine_terminate_sync(ctlr->dma_tx);
  1003. bs->tx_dma_active = false;
  1004. }
  1005. if (ctlr->dma_rx) {
  1006. dmaengine_terminate_sync(ctlr->dma_rx);
  1007. bs->rx_dma_active = false;
  1008. }
  1009. bcm2835_spi_undo_prologue(bs);
  1010. /* and reset */
  1011. bcm2835_spi_reset_hw(bs);
  1012. }
  1013. static int chip_match_name(struct gpio_chip *chip, void *data)
  1014. {
  1015. return !strcmp(chip->label, data);
  1016. }
  1017. static void bcm2835_spi_cleanup(struct spi_device *spi)
  1018. {
  1019. struct bcm2835_spidev *slv = spi_get_ctldata(spi);
  1020. struct spi_controller *ctlr = spi->controller;
  1021. if (slv->clear_rx_desc)
  1022. dmaengine_desc_free(slv->clear_rx_desc);
  1023. if (slv->clear_rx_addr)
  1024. dma_unmap_single(ctlr->dma_rx->device->dev,
  1025. slv->clear_rx_addr,
  1026. sizeof(u32),
  1027. DMA_TO_DEVICE);
  1028. kfree(slv);
  1029. }
  1030. static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
  1031. struct spi_device *spi,
  1032. struct bcm2835_spi *bs,
  1033. struct bcm2835_spidev *slv)
  1034. {
  1035. int ret;
  1036. if (!ctlr->dma_rx)
  1037. return 0;
  1038. slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
  1039. &slv->clear_rx_cs,
  1040. sizeof(u32),
  1041. DMA_TO_DEVICE);
  1042. if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) {
  1043. dev_err(&spi->dev, "cannot map clear_rx_cs\n");
  1044. slv->clear_rx_addr = 0;
  1045. return -ENOMEM;
  1046. }
  1047. slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
  1048. slv->clear_rx_addr,
  1049. sizeof(u32), 0,
  1050. DMA_MEM_TO_DEV, 0);
  1051. if (!slv->clear_rx_desc) {
  1052. dev_err(&spi->dev, "cannot prepare clear_rx_desc\n");
  1053. return -ENOMEM;
  1054. }
  1055. ret = dmaengine_desc_set_reuse(slv->clear_rx_desc);
  1056. if (ret) {
  1057. dev_err(&spi->dev, "cannot reuse clear_rx_desc\n");
  1058. return ret;
  1059. }
  1060. return 0;
  1061. }
  1062. static int bcm2835_spi_setup(struct spi_device *spi)
  1063. {
  1064. struct spi_controller *ctlr = spi->controller;
  1065. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  1066. struct bcm2835_spidev *slv = spi_get_ctldata(spi);
  1067. struct gpio_chip *chip;
  1068. int ret;
  1069. u32 cs;
  1070. if (!slv) {
  1071. slv = kzalloc(ALIGN(sizeof(*slv), dma_get_cache_alignment()),
  1072. GFP_KERNEL);
  1073. if (!slv)
  1074. return -ENOMEM;
  1075. spi_set_ctldata(spi, slv);
  1076. ret = bcm2835_spi_setup_dma(ctlr, spi, bs, slv);
  1077. if (ret)
  1078. goto err_cleanup;
  1079. }
  1080. /*
  1081. * Precalculate SPI slave's CS register value for ->prepare_message():
  1082. * The driver always uses software-controlled GPIO chip select, hence
  1083. * set the hardware-controlled native chip select to an invalid value
  1084. * to prevent it from interfering.
  1085. */
  1086. cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
  1087. if (spi->mode & SPI_CPOL)
  1088. cs |= BCM2835_SPI_CS_CPOL;
  1089. if (spi->mode & SPI_CPHA)
  1090. cs |= BCM2835_SPI_CS_CPHA;
  1091. slv->prepare_cs = cs;
  1092. /*
  1093. * Precalculate SPI slave's CS register value to clear RX FIFO
  1094. * in case of a TX-only DMA transfer.
  1095. */
  1096. if (ctlr->dma_rx) {
  1097. slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
  1098. BCM2835_SPI_CS_DMAEN |
  1099. BCM2835_SPI_CS_CLEAR_RX;
  1100. dma_sync_single_for_device(ctlr->dma_rx->device->dev,
  1101. slv->clear_rx_addr,
  1102. sizeof(u32),
  1103. DMA_TO_DEVICE);
  1104. }
  1105. /*
  1106. * sanity checking the native-chipselects
  1107. */
  1108. if (spi->mode & SPI_NO_CS)
  1109. return 0;
  1110. /*
  1111. * The SPI core has successfully requested the CS GPIO line from the
  1112. * device tree, so we are done.
  1113. */
  1114. if (spi->cs_gpiod)
  1115. return 0;
  1116. if (spi->chip_select > 1) {
  1117. /* error in the case of native CS requested with CS > 1
  1118. * officially there is a CS2, but it is not documented
  1119. * which GPIO is connected with that...
  1120. */
  1121. dev_err(&spi->dev,
  1122. "setup: only two native chip-selects are supported\n");
  1123. ret = -EINVAL;
  1124. goto err_cleanup;
  1125. }
  1126. /*
  1127. * Translate native CS to GPIO
  1128. *
  1129. * FIXME: poking around in the gpiolib internals like this is
  1130. * not very good practice. Find a way to locate the real problem
  1131. * and fix it. Why is the GPIO descriptor in spi->cs_gpiod
  1132. * sometimes not assigned correctly? Erroneous device trees?
  1133. */
  1134. /* get the gpio chip for the base */
  1135. chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
  1136. if (!chip)
  1137. return 0;
  1138. spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
  1139. DRV_NAME,
  1140. GPIO_LOOKUP_FLAGS_DEFAULT,
  1141. GPIOD_OUT_LOW);
  1142. if (IS_ERR(spi->cs_gpiod)) {
  1143. ret = PTR_ERR(spi->cs_gpiod);
  1144. goto err_cleanup;
  1145. }
  1146. /* and set up the "mode" and level */
  1147. dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
  1148. spi->chip_select);
  1149. return 0;
  1150. err_cleanup:
  1151. bcm2835_spi_cleanup(spi);
  1152. return ret;
  1153. }
  1154. static int bcm2835_spi_probe(struct platform_device *pdev)
  1155. {
  1156. struct spi_controller *ctlr;
  1157. struct bcm2835_spi *bs;
  1158. int err;
  1159. ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
  1160. if (!ctlr)
  1161. return -ENOMEM;
  1162. platform_set_drvdata(pdev, ctlr);
  1163. ctlr->use_gpio_descriptors = true;
  1164. ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
  1165. ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
  1166. ctlr->num_chipselect = 3;
  1167. ctlr->setup = bcm2835_spi_setup;
  1168. ctlr->cleanup = bcm2835_spi_cleanup;
  1169. ctlr->transfer_one = bcm2835_spi_transfer_one;
  1170. ctlr->handle_err = bcm2835_spi_handle_err;
  1171. ctlr->prepare_message = bcm2835_spi_prepare_message;
  1172. ctlr->dev.of_node = pdev->dev.of_node;
  1173. bs = spi_controller_get_devdata(ctlr);
  1174. bs->ctlr = ctlr;
  1175. bs->regs = devm_platform_ioremap_resource(pdev, 0);
  1176. if (IS_ERR(bs->regs))
  1177. return PTR_ERR(bs->regs);
  1178. bs->clk = devm_clk_get(&pdev->dev, NULL);
  1179. if (IS_ERR(bs->clk))
  1180. return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
  1181. "could not get clk\n");
  1182. ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2;
  1183. bs->irq = platform_get_irq(pdev, 0);
  1184. if (bs->irq <= 0)
  1185. return bs->irq ? bs->irq : -ENODEV;
  1186. clk_prepare_enable(bs->clk);
  1187. bs->clk_hz = clk_get_rate(bs->clk);
  1188. err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
  1189. if (err)
  1190. goto out_clk_disable;
  1191. /* initialise the hardware with the default polarities */
  1192. bcm2835_wr(bs, BCM2835_SPI_CS,
  1193. BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
  1194. err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt,
  1195. IRQF_SHARED, dev_name(&pdev->dev), bs);
  1196. if (err) {
  1197. dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
  1198. goto out_dma_release;
  1199. }
  1200. err = spi_register_controller(ctlr);
  1201. if (err) {
  1202. dev_err(&pdev->dev, "could not register SPI controller: %d\n",
  1203. err);
  1204. goto out_dma_release;
  1205. }
  1206. bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
  1207. return 0;
  1208. out_dma_release:
  1209. bcm2835_dma_release(ctlr, bs);
  1210. out_clk_disable:
  1211. clk_disable_unprepare(bs->clk);
  1212. return err;
  1213. }
  1214. static int bcm2835_spi_remove(struct platform_device *pdev)
  1215. {
  1216. struct spi_controller *ctlr = platform_get_drvdata(pdev);
  1217. struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
  1218. bcm2835_debugfs_remove(bs);
  1219. spi_unregister_controller(ctlr);
  1220. bcm2835_dma_release(ctlr, bs);
  1221. /* Clear FIFOs, and disable the HW block */
  1222. bcm2835_wr(bs, BCM2835_SPI_CS,
  1223. BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
  1224. clk_disable_unprepare(bs->clk);
  1225. return 0;
  1226. }
  1227. static void bcm2835_spi_shutdown(struct platform_device *pdev)
  1228. {
  1229. int ret;
  1230. ret = bcm2835_spi_remove(pdev);
  1231. if (ret)
  1232. dev_err(&pdev->dev, "failed to shutdown\n");
  1233. }
  1234. static const struct of_device_id bcm2835_spi_match[] = {
  1235. { .compatible = "brcm,bcm2835-spi", },
  1236. {}
  1237. };
  1238. MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
  1239. static struct platform_driver bcm2835_spi_driver = {
  1240. .driver = {
  1241. .name = DRV_NAME,
  1242. .of_match_table = bcm2835_spi_match,
  1243. },
  1244. .probe = bcm2835_spi_probe,
  1245. .remove = bcm2835_spi_remove,
  1246. .shutdown = bcm2835_spi_shutdown,
  1247. };
  1248. module_platform_driver(bcm2835_spi_driver);
  1249. MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
  1250. MODULE_AUTHOR("Chris Boot <[email protected]>");
  1251. MODULE_LICENSE("GPL");