spi-imx.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
  3. // Copyright (C) 2008 Juergen Beisert
  4. #include <linux/clk.h>
  5. #include <linux/completion.h>
  6. #include <linux/delay.h>
  7. #include <linux/dmaengine.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/err.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/irq.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/pinctrl/consumer.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/slab.h>
  19. #include <linux/spi/spi.h>
  20. #include <linux/types.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/property.h>
  24. #include <linux/dma/imx-dma.h>
  25. #define DRIVER_NAME "spi_imx"
  26. static bool use_dma = true;
  27. module_param(use_dma, bool, 0644);
  28. MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
  29. /* define polling limits */
  30. static unsigned int polling_limit_us = 30;
  31. module_param(polling_limit_us, uint, 0664);
  32. MODULE_PARM_DESC(polling_limit_us,
  33. "time in us to run a transfer in polling mode\n");
  34. #define MXC_RPM_TIMEOUT 2000 /* 2000ms */
  35. #define MXC_CSPIRXDATA 0x00
  36. #define MXC_CSPITXDATA 0x04
  37. #define MXC_CSPICTRL 0x08
  38. #define MXC_CSPIINT 0x0c
  39. #define MXC_RESET 0x1c
  40. /* generic defines to abstract from the different register layouts */
  41. #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
  42. #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
  43. #define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
  44. /* The maximum bytes that a sdma BD can transfer. */
  45. #define MAX_SDMA_BD_BYTES (1 << 15)
  46. #define MX51_ECSPI_CTRL_MAX_BURST 512
  47. /* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
  48. #define MX53_MAX_TRANSFER_BYTES 512
  49. enum spi_imx_devtype {
  50. IMX1_CSPI,
  51. IMX21_CSPI,
  52. IMX27_CSPI,
  53. IMX31_CSPI,
  54. IMX35_CSPI, /* CSPI on all i.mx except above */
  55. IMX51_ECSPI, /* ECSPI on i.mx51 */
  56. IMX53_ECSPI, /* ECSPI on i.mx53 and later */
  57. };
  58. struct spi_imx_data;
  59. struct spi_imx_devtype_data {
  60. void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
  61. int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
  62. int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi);
  63. void (*trigger)(struct spi_imx_data *spi_imx);
  64. int (*rx_available)(struct spi_imx_data *spi_imx);
  65. void (*reset)(struct spi_imx_data *spi_imx);
  66. void (*setup_wml)(struct spi_imx_data *spi_imx);
  67. void (*disable)(struct spi_imx_data *spi_imx);
  68. void (*disable_dma)(struct spi_imx_data *spi_imx);
  69. bool has_dmamode;
  70. bool has_slavemode;
  71. unsigned int fifo_size;
  72. bool dynamic_burst;
  73. /*
  74. * ERR009165 fixed or not:
  75. * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
  76. */
  77. bool tx_glitch_fixed;
  78. enum spi_imx_devtype devtype;
  79. };
  80. struct spi_imx_data {
  81. struct spi_controller *controller;
  82. struct device *dev;
  83. struct completion xfer_done;
  84. void __iomem *base;
  85. unsigned long base_phys;
  86. struct clk *clk_per;
  87. struct clk *clk_ipg;
  88. unsigned long spi_clk;
  89. unsigned int spi_bus_clk;
  90. unsigned int bits_per_word;
  91. unsigned int spi_drctl;
  92. unsigned int count, remainder;
  93. void (*tx)(struct spi_imx_data *spi_imx);
  94. void (*rx)(struct spi_imx_data *spi_imx);
  95. void *rx_buf;
  96. const void *tx_buf;
  97. unsigned int txfifo; /* number of words pushed in tx FIFO */
  98. unsigned int dynamic_burst;
  99. bool rx_only;
  100. /* Slave mode */
  101. bool slave_mode;
  102. bool slave_aborted;
  103. unsigned int slave_burst;
  104. /* DMA */
  105. bool usedma;
  106. u32 wml;
  107. struct completion dma_rx_completion;
  108. struct completion dma_tx_completion;
  109. const struct spi_imx_devtype_data *devtype_data;
  110. };
  111. static inline int is_imx27_cspi(struct spi_imx_data *d)
  112. {
  113. return d->devtype_data->devtype == IMX27_CSPI;
  114. }
  115. static inline int is_imx35_cspi(struct spi_imx_data *d)
  116. {
  117. return d->devtype_data->devtype == IMX35_CSPI;
  118. }
  119. static inline int is_imx51_ecspi(struct spi_imx_data *d)
  120. {
  121. return d->devtype_data->devtype == IMX51_ECSPI;
  122. }
  123. static inline int is_imx53_ecspi(struct spi_imx_data *d)
  124. {
  125. return d->devtype_data->devtype == IMX53_ECSPI;
  126. }
  127. #define MXC_SPI_BUF_RX(type) \
  128. static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
  129. { \
  130. unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
  131. \
  132. if (spi_imx->rx_buf) { \
  133. *(type *)spi_imx->rx_buf = val; \
  134. spi_imx->rx_buf += sizeof(type); \
  135. } \
  136. \
  137. spi_imx->remainder -= sizeof(type); \
  138. }
  139. #define MXC_SPI_BUF_TX(type) \
  140. static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
  141. { \
  142. type val = 0; \
  143. \
  144. if (spi_imx->tx_buf) { \
  145. val = *(type *)spi_imx->tx_buf; \
  146. spi_imx->tx_buf += sizeof(type); \
  147. } \
  148. \
  149. spi_imx->count -= sizeof(type); \
  150. \
  151. writel(val, spi_imx->base + MXC_CSPITXDATA); \
  152. }
  153. MXC_SPI_BUF_RX(u8)
  154. MXC_SPI_BUF_TX(u8)
  155. MXC_SPI_BUF_RX(u16)
  156. MXC_SPI_BUF_TX(u16)
  157. MXC_SPI_BUF_RX(u32)
  158. MXC_SPI_BUF_TX(u32)
  159. /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
  160. * (which is currently not the case in this driver)
  161. */
  162. static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
  163. 256, 384, 512, 768, 1024};
  164. /* MX21, MX27 */
  165. static unsigned int spi_imx_clkdiv_1(unsigned int fin,
  166. unsigned int fspi, unsigned int max, unsigned int *fres)
  167. {
  168. int i;
  169. for (i = 2; i < max; i++)
  170. if (fspi * mxc_clkdivs[i] >= fin)
  171. break;
  172. *fres = fin / mxc_clkdivs[i];
  173. return i;
  174. }
  175. /* MX1, MX31, MX35, MX51 CSPI */
  176. static unsigned int spi_imx_clkdiv_2(unsigned int fin,
  177. unsigned int fspi, unsigned int *fres)
  178. {
  179. int i, div = 4;
  180. for (i = 0; i < 7; i++) {
  181. if (fspi * div >= fin)
  182. goto out;
  183. div <<= 1;
  184. }
  185. out:
  186. *fres = fin / div;
  187. return i;
  188. }
  189. static int spi_imx_bytes_per_word(const int bits_per_word)
  190. {
  191. if (bits_per_word <= 8)
  192. return 1;
  193. else if (bits_per_word <= 16)
  194. return 2;
  195. else
  196. return 4;
  197. }
  198. static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
  199. struct spi_transfer *transfer)
  200. {
  201. struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
  202. if (!use_dma || controller->fallback)
  203. return false;
  204. if (!controller->dma_rx)
  205. return false;
  206. if (spi_imx->slave_mode)
  207. return false;
  208. if (transfer->len < spi_imx->devtype_data->fifo_size)
  209. return false;
  210. spi_imx->dynamic_burst = 0;
  211. return true;
  212. }
  213. /*
  214. * Note the number of natively supported chip selects for MX51 is 4. Some
  215. * devices may have less actual SS pins but the register map supports 4. When
  216. * using gpio chip selects the cs values passed into the macros below can go
  217. * outside the range 0 - 3. We therefore need to limit the cs value to avoid
  218. * corrupting bits outside the allocated locations.
  219. *
  220. * The simplest way to do this is to just mask the cs bits to 2 bits. This
  221. * still allows all 4 native chip selects to work as well as gpio chip selects
  222. * (which can use any of the 4 chip select configurations).
  223. */
  224. #define MX51_ECSPI_CTRL 0x08
  225. #define MX51_ECSPI_CTRL_ENABLE (1 << 0)
  226. #define MX51_ECSPI_CTRL_XCH (1 << 2)
  227. #define MX51_ECSPI_CTRL_SMC (1 << 3)
  228. #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
  229. #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
  230. #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
  231. #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
  232. #define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
  233. #define MX51_ECSPI_CTRL_BL_OFFSET 20
  234. #define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
  235. #define MX51_ECSPI_CONFIG 0x0c
  236. #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
  237. #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
  238. #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
  239. #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
  240. #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
  241. #define MX51_ECSPI_INT 0x10
  242. #define MX51_ECSPI_INT_TEEN (1 << 0)
  243. #define MX51_ECSPI_INT_RREN (1 << 3)
  244. #define MX51_ECSPI_INT_RDREN (1 << 4)
  245. #define MX51_ECSPI_DMA 0x14
  246. #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
  247. #define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
  248. #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
  249. #define MX51_ECSPI_DMA_TEDEN (1 << 7)
  250. #define MX51_ECSPI_DMA_RXDEN (1 << 23)
  251. #define MX51_ECSPI_DMA_RXTDEN (1 << 31)
  252. #define MX51_ECSPI_STAT 0x18
  253. #define MX51_ECSPI_STAT_RR (1 << 3)
  254. #define MX51_ECSPI_TESTREG 0x20
  255. #define MX51_ECSPI_TESTREG_LBC BIT(31)
  256. static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
  257. {
  258. unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
  259. if (spi_imx->rx_buf) {
  260. #ifdef __LITTLE_ENDIAN
  261. unsigned int bytes_per_word;
  262. bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
  263. if (bytes_per_word == 1)
  264. swab32s(&val);
  265. else if (bytes_per_word == 2)
  266. swahw32s(&val);
  267. #endif
  268. *(u32 *)spi_imx->rx_buf = val;
  269. spi_imx->rx_buf += sizeof(u32);
  270. }
  271. spi_imx->remainder -= sizeof(u32);
  272. }
  273. static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
  274. {
  275. int unaligned;
  276. u32 val;
  277. unaligned = spi_imx->remainder % 4;
  278. if (!unaligned) {
  279. spi_imx_buf_rx_swap_u32(spi_imx);
  280. return;
  281. }
  282. if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
  283. spi_imx_buf_rx_u16(spi_imx);
  284. return;
  285. }
  286. val = readl(spi_imx->base + MXC_CSPIRXDATA);
  287. while (unaligned--) {
  288. if (spi_imx->rx_buf) {
  289. *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
  290. spi_imx->rx_buf++;
  291. }
  292. spi_imx->remainder--;
  293. }
  294. }
  295. static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
  296. {
  297. u32 val = 0;
  298. #ifdef __LITTLE_ENDIAN
  299. unsigned int bytes_per_word;
  300. #endif
  301. if (spi_imx->tx_buf) {
  302. val = *(u32 *)spi_imx->tx_buf;
  303. spi_imx->tx_buf += sizeof(u32);
  304. }
  305. spi_imx->count -= sizeof(u32);
  306. #ifdef __LITTLE_ENDIAN
  307. bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
  308. if (bytes_per_word == 1)
  309. swab32s(&val);
  310. else if (bytes_per_word == 2)
  311. swahw32s(&val);
  312. #endif
  313. writel(val, spi_imx->base + MXC_CSPITXDATA);
  314. }
  315. static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
  316. {
  317. int unaligned;
  318. u32 val = 0;
  319. unaligned = spi_imx->count % 4;
  320. if (!unaligned) {
  321. spi_imx_buf_tx_swap_u32(spi_imx);
  322. return;
  323. }
  324. if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
  325. spi_imx_buf_tx_u16(spi_imx);
  326. return;
  327. }
  328. while (unaligned--) {
  329. if (spi_imx->tx_buf) {
  330. val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
  331. spi_imx->tx_buf++;
  332. }
  333. spi_imx->count--;
  334. }
  335. writel(val, spi_imx->base + MXC_CSPITXDATA);
  336. }
  337. static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
  338. {
  339. u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
  340. if (spi_imx->rx_buf) {
  341. int n_bytes = spi_imx->slave_burst % sizeof(val);
  342. if (!n_bytes)
  343. n_bytes = sizeof(val);
  344. memcpy(spi_imx->rx_buf,
  345. ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
  346. spi_imx->rx_buf += n_bytes;
  347. spi_imx->slave_burst -= n_bytes;
  348. }
  349. spi_imx->remainder -= sizeof(u32);
  350. }
  351. static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
  352. {
  353. u32 val = 0;
  354. int n_bytes = spi_imx->count % sizeof(val);
  355. if (!n_bytes)
  356. n_bytes = sizeof(val);
  357. if (spi_imx->tx_buf) {
  358. memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
  359. spi_imx->tx_buf, n_bytes);
  360. val = cpu_to_be32(val);
  361. spi_imx->tx_buf += n_bytes;
  362. }
  363. spi_imx->count -= n_bytes;
  364. writel(val, spi_imx->base + MXC_CSPITXDATA);
  365. }
  366. /* MX51 eCSPI */
  367. static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
  368. unsigned int fspi, unsigned int *fres)
  369. {
  370. /*
  371. * there are two 4-bit dividers, the pre-divider divides by
  372. * $pre, the post-divider by 2^$post
  373. */
  374. unsigned int pre, post;
  375. unsigned int fin = spi_imx->spi_clk;
  376. fspi = min(fspi, fin);
  377. post = fls(fin) - fls(fspi);
  378. if (fin > fspi << post)
  379. post++;
  380. /* now we have: (fin <= fspi << post) with post being minimal */
  381. post = max(4U, post) - 4;
  382. if (unlikely(post > 0xf)) {
  383. dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
  384. fspi, fin);
  385. return 0xff;
  386. }
  387. pre = DIV_ROUND_UP(fin, fspi << post) - 1;
  388. dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
  389. __func__, fin, fspi, post, pre);
  390. /* Resulting frequency for the SCLK line. */
  391. *fres = (fin / (pre + 1)) >> post;
  392. return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
  393. (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
  394. }
  395. static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
  396. {
  397. unsigned int val = 0;
  398. if (enable & MXC_INT_TE)
  399. val |= MX51_ECSPI_INT_TEEN;
  400. if (enable & MXC_INT_RR)
  401. val |= MX51_ECSPI_INT_RREN;
  402. if (enable & MXC_INT_RDR)
  403. val |= MX51_ECSPI_INT_RDREN;
  404. writel(val, spi_imx->base + MX51_ECSPI_INT);
  405. }
  406. static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
  407. {
  408. u32 reg;
  409. reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
  410. reg |= MX51_ECSPI_CTRL_XCH;
  411. writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
  412. }
  413. static void mx51_disable_dma(struct spi_imx_data *spi_imx)
  414. {
  415. writel(0, spi_imx->base + MX51_ECSPI_DMA);
  416. }
  417. static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
  418. {
  419. u32 ctrl;
  420. ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
  421. ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
  422. writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
  423. }
  424. static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
  425. struct spi_message *msg)
  426. {
  427. struct spi_device *spi = msg->spi;
  428. struct spi_transfer *xfer;
  429. u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
  430. u32 min_speed_hz = ~0U;
  431. u32 testreg, delay;
  432. u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
  433. u32 current_cfg = cfg;
  434. /* set Master or Slave mode */
  435. if (spi_imx->slave_mode)
  436. ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
  437. else
  438. ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
  439. /*
  440. * Enable SPI_RDY handling (falling edge/level triggered).
  441. */
  442. if (spi->mode & SPI_READY)
  443. ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
  444. /* set chip select to use */
  445. ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
  446. /*
  447. * The ctrl register must be written first, with the EN bit set other
  448. * registers must not be written to.
  449. */
  450. writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
  451. testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
  452. if (spi->mode & SPI_LOOP)
  453. testreg |= MX51_ECSPI_TESTREG_LBC;
  454. else
  455. testreg &= ~MX51_ECSPI_TESTREG_LBC;
  456. writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
  457. /*
  458. * eCSPI burst completion by Chip Select signal in Slave mode
  459. * is not functional for imx53 Soc, config SPI burst completed when
  460. * BURST_LENGTH + 1 bits are received
  461. */
  462. if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
  463. cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
  464. else
  465. cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
  466. if (spi->mode & SPI_CPOL) {
  467. cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
  468. cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
  469. } else {
  470. cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
  471. cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
  472. }
  473. if (spi->mode & SPI_CS_HIGH)
  474. cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
  475. else
  476. cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
  477. if (cfg == current_cfg)
  478. return 0;
  479. writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
  480. /*
  481. * Wait until the changes in the configuration register CONFIGREG
  482. * propagate into the hardware. It takes exactly one tick of the
  483. * SCLK clock, but we will wait two SCLK clock just to be sure. The
  484. * effect of the delay it takes for the hardware to apply changes
  485. * is noticable if the SCLK clock run very slow. In such a case, if
  486. * the polarity of SCLK should be inverted, the GPIO ChipSelect might
  487. * be asserted before the SCLK polarity changes, which would disrupt
  488. * the SPI communication as the device on the other end would consider
  489. * the change of SCLK polarity as a clock tick already.
  490. *
  491. * Because spi_imx->spi_bus_clk is only set in prepare_message
  492. * callback, iterate over all the transfers in spi_message, find the
  493. * one with lowest bus frequency, and use that bus frequency for the
  494. * delay calculation. In case all transfers have speed_hz == 0, then
  495. * min_speed_hz is ~0 and the resulting delay is zero.
  496. */
  497. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  498. if (!xfer->speed_hz)
  499. continue;
  500. min_speed_hz = min(xfer->speed_hz, min_speed_hz);
  501. }
  502. delay = (2 * 1000000) / min_speed_hz;
  503. if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
  504. udelay(delay);
  505. else /* SCLK is _very_ slow */
  506. usleep_range(delay, delay + 10);
  507. return 0;
  508. }
  509. static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
  510. struct spi_device *spi)
  511. {
  512. bool cpha = (spi->mode & SPI_CPHA);
  513. bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
  514. u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
  515. /* Flip cpha logical value iff flip_cpha */
  516. cpha ^= flip_cpha;
  517. if (cpha)
  518. cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
  519. else
  520. cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
  521. writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
  522. }
  523. static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
  524. struct spi_device *spi)
  525. {
  526. u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
  527. u32 clk;
  528. /* Clear BL field and set the right value */
  529. ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
  530. if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
  531. ctrl |= (spi_imx->slave_burst * 8 - 1)
  532. << MX51_ECSPI_CTRL_BL_OFFSET;
  533. else
  534. ctrl |= (spi_imx->bits_per_word - 1)
  535. << MX51_ECSPI_CTRL_BL_OFFSET;
  536. /* set clock speed */
  537. ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
  538. 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
  539. ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
  540. spi_imx->spi_bus_clk = clk;
  541. mx51_configure_cpha(spi_imx, spi);
  542. /*
  543. * ERR009165: work in XHC mode instead of SMC as PIO on the chips
  544. * before i.mx6ul.
  545. */
  546. if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
  547. ctrl |= MX51_ECSPI_CTRL_SMC;
  548. else
  549. ctrl &= ~MX51_ECSPI_CTRL_SMC;
  550. writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
  551. return 0;
  552. }
  553. static void mx51_setup_wml(struct spi_imx_data *spi_imx)
  554. {
  555. u32 tx_wml = 0;
  556. if (spi_imx->devtype_data->tx_glitch_fixed)
  557. tx_wml = spi_imx->wml;
  558. /*
  559. * Configure the DMA register: setup the watermark
  560. * and enable DMA request.
  561. */
  562. writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
  563. MX51_ECSPI_DMA_TX_WML(tx_wml) |
  564. MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
  565. MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
  566. MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
  567. }
  568. static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
  569. {
  570. return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
  571. }
  572. static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
  573. {
  574. /* drain receive buffer */
  575. while (mx51_ecspi_rx_available(spi_imx))
  576. readl(spi_imx->base + MXC_CSPIRXDATA);
  577. }
  578. #define MX31_INTREG_TEEN (1 << 0)
  579. #define MX31_INTREG_RREN (1 << 3)
  580. #define MX31_CSPICTRL_ENABLE (1 << 0)
  581. #define MX31_CSPICTRL_MASTER (1 << 1)
  582. #define MX31_CSPICTRL_XCH (1 << 2)
  583. #define MX31_CSPICTRL_SMC (1 << 3)
  584. #define MX31_CSPICTRL_POL (1 << 4)
  585. #define MX31_CSPICTRL_PHA (1 << 5)
  586. #define MX31_CSPICTRL_SSCTL (1 << 6)
  587. #define MX31_CSPICTRL_SSPOL (1 << 7)
  588. #define MX31_CSPICTRL_BC_SHIFT 8
  589. #define MX35_CSPICTRL_BL_SHIFT 20
  590. #define MX31_CSPICTRL_CS_SHIFT 24
  591. #define MX35_CSPICTRL_CS_SHIFT 12
  592. #define MX31_CSPICTRL_DR_SHIFT 16
  593. #define MX31_CSPI_DMAREG 0x10
  594. #define MX31_DMAREG_RH_DEN (1<<4)
  595. #define MX31_DMAREG_TH_DEN (1<<1)
  596. #define MX31_CSPISTATUS 0x14
  597. #define MX31_STATUS_RR (1 << 3)
  598. #define MX31_CSPI_TESTREG 0x1C
  599. #define MX31_TEST_LBC (1 << 14)
  600. /* These functions also work for the i.MX35, but be aware that
  601. * the i.MX35 has a slightly different register layout for bits
  602. * we do not use here.
  603. */
  604. static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
  605. {
  606. unsigned int val = 0;
  607. if (enable & MXC_INT_TE)
  608. val |= MX31_INTREG_TEEN;
  609. if (enable & MXC_INT_RR)
  610. val |= MX31_INTREG_RREN;
  611. writel(val, spi_imx->base + MXC_CSPIINT);
  612. }
  613. static void mx31_trigger(struct spi_imx_data *spi_imx)
  614. {
  615. unsigned int reg;
  616. reg = readl(spi_imx->base + MXC_CSPICTRL);
  617. reg |= MX31_CSPICTRL_XCH;
  618. writel(reg, spi_imx->base + MXC_CSPICTRL);
  619. }
  620. static int mx31_prepare_message(struct spi_imx_data *spi_imx,
  621. struct spi_message *msg)
  622. {
  623. return 0;
  624. }
  625. static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
  626. struct spi_device *spi)
  627. {
  628. unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
  629. unsigned int clk;
  630. reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
  631. MX31_CSPICTRL_DR_SHIFT;
  632. spi_imx->spi_bus_clk = clk;
  633. if (is_imx35_cspi(spi_imx)) {
  634. reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
  635. reg |= MX31_CSPICTRL_SSCTL;
  636. } else {
  637. reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
  638. }
  639. if (spi->mode & SPI_CPHA)
  640. reg |= MX31_CSPICTRL_PHA;
  641. if (spi->mode & SPI_CPOL)
  642. reg |= MX31_CSPICTRL_POL;
  643. if (spi->mode & SPI_CS_HIGH)
  644. reg |= MX31_CSPICTRL_SSPOL;
  645. if (!spi->cs_gpiod)
  646. reg |= (spi->chip_select) <<
  647. (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
  648. MX31_CSPICTRL_CS_SHIFT);
  649. if (spi_imx->usedma)
  650. reg |= MX31_CSPICTRL_SMC;
  651. writel(reg, spi_imx->base + MXC_CSPICTRL);
  652. reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
  653. if (spi->mode & SPI_LOOP)
  654. reg |= MX31_TEST_LBC;
  655. else
  656. reg &= ~MX31_TEST_LBC;
  657. writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
  658. if (spi_imx->usedma) {
  659. /*
  660. * configure DMA requests when RXFIFO is half full and
  661. * when TXFIFO is half empty
  662. */
  663. writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
  664. spi_imx->base + MX31_CSPI_DMAREG);
  665. }
  666. return 0;
  667. }
  668. static int mx31_rx_available(struct spi_imx_data *spi_imx)
  669. {
  670. return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
  671. }
  672. static void mx31_reset(struct spi_imx_data *spi_imx)
  673. {
  674. /* drain receive buffer */
  675. while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
  676. readl(spi_imx->base + MXC_CSPIRXDATA);
  677. }
  678. #define MX21_INTREG_RR (1 << 4)
  679. #define MX21_INTREG_TEEN (1 << 9)
  680. #define MX21_INTREG_RREN (1 << 13)
  681. #define MX21_CSPICTRL_POL (1 << 5)
  682. #define MX21_CSPICTRL_PHA (1 << 6)
  683. #define MX21_CSPICTRL_SSPOL (1 << 8)
  684. #define MX21_CSPICTRL_XCH (1 << 9)
  685. #define MX21_CSPICTRL_ENABLE (1 << 10)
  686. #define MX21_CSPICTRL_MASTER (1 << 11)
  687. #define MX21_CSPICTRL_DR_SHIFT 14
  688. #define MX21_CSPICTRL_CS_SHIFT 19
  689. static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
  690. {
  691. unsigned int val = 0;
  692. if (enable & MXC_INT_TE)
  693. val |= MX21_INTREG_TEEN;
  694. if (enable & MXC_INT_RR)
  695. val |= MX21_INTREG_RREN;
  696. writel(val, spi_imx->base + MXC_CSPIINT);
  697. }
  698. static void mx21_trigger(struct spi_imx_data *spi_imx)
  699. {
  700. unsigned int reg;
  701. reg = readl(spi_imx->base + MXC_CSPICTRL);
  702. reg |= MX21_CSPICTRL_XCH;
  703. writel(reg, spi_imx->base + MXC_CSPICTRL);
  704. }
  705. static int mx21_prepare_message(struct spi_imx_data *spi_imx,
  706. struct spi_message *msg)
  707. {
  708. return 0;
  709. }
  710. static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
  711. struct spi_device *spi)
  712. {
  713. unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
  714. unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
  715. unsigned int clk;
  716. reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
  717. << MX21_CSPICTRL_DR_SHIFT;
  718. spi_imx->spi_bus_clk = clk;
  719. reg |= spi_imx->bits_per_word - 1;
  720. if (spi->mode & SPI_CPHA)
  721. reg |= MX21_CSPICTRL_PHA;
  722. if (spi->mode & SPI_CPOL)
  723. reg |= MX21_CSPICTRL_POL;
  724. if (spi->mode & SPI_CS_HIGH)
  725. reg |= MX21_CSPICTRL_SSPOL;
  726. if (!spi->cs_gpiod)
  727. reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
  728. writel(reg, spi_imx->base + MXC_CSPICTRL);
  729. return 0;
  730. }
  731. static int mx21_rx_available(struct spi_imx_data *spi_imx)
  732. {
  733. return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
  734. }
  735. static void mx21_reset(struct spi_imx_data *spi_imx)
  736. {
  737. writel(1, spi_imx->base + MXC_RESET);
  738. }
  739. #define MX1_INTREG_RR (1 << 3)
  740. #define MX1_INTREG_TEEN (1 << 8)
  741. #define MX1_INTREG_RREN (1 << 11)
  742. #define MX1_CSPICTRL_POL (1 << 4)
  743. #define MX1_CSPICTRL_PHA (1 << 5)
  744. #define MX1_CSPICTRL_XCH (1 << 8)
  745. #define MX1_CSPICTRL_ENABLE (1 << 9)
  746. #define MX1_CSPICTRL_MASTER (1 << 10)
  747. #define MX1_CSPICTRL_DR_SHIFT 13
  748. static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
  749. {
  750. unsigned int val = 0;
  751. if (enable & MXC_INT_TE)
  752. val |= MX1_INTREG_TEEN;
  753. if (enable & MXC_INT_RR)
  754. val |= MX1_INTREG_RREN;
  755. writel(val, spi_imx->base + MXC_CSPIINT);
  756. }
  757. static void mx1_trigger(struct spi_imx_data *spi_imx)
  758. {
  759. unsigned int reg;
  760. reg = readl(spi_imx->base + MXC_CSPICTRL);
  761. reg |= MX1_CSPICTRL_XCH;
  762. writel(reg, spi_imx->base + MXC_CSPICTRL);
  763. }
  764. static int mx1_prepare_message(struct spi_imx_data *spi_imx,
  765. struct spi_message *msg)
  766. {
  767. return 0;
  768. }
  769. static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
  770. struct spi_device *spi)
  771. {
  772. unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
  773. unsigned int clk;
  774. reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
  775. MX1_CSPICTRL_DR_SHIFT;
  776. spi_imx->spi_bus_clk = clk;
  777. reg |= spi_imx->bits_per_word - 1;
  778. if (spi->mode & SPI_CPHA)
  779. reg |= MX1_CSPICTRL_PHA;
  780. if (spi->mode & SPI_CPOL)
  781. reg |= MX1_CSPICTRL_POL;
  782. writel(reg, spi_imx->base + MXC_CSPICTRL);
  783. return 0;
  784. }
  785. static int mx1_rx_available(struct spi_imx_data *spi_imx)
  786. {
  787. return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
  788. }
  789. static void mx1_reset(struct spi_imx_data *spi_imx)
  790. {
  791. writel(1, spi_imx->base + MXC_RESET);
  792. }
  793. static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
  794. .intctrl = mx1_intctrl,
  795. .prepare_message = mx1_prepare_message,
  796. .prepare_transfer = mx1_prepare_transfer,
  797. .trigger = mx1_trigger,
  798. .rx_available = mx1_rx_available,
  799. .reset = mx1_reset,
  800. .fifo_size = 8,
  801. .has_dmamode = false,
  802. .dynamic_burst = false,
  803. .has_slavemode = false,
  804. .devtype = IMX1_CSPI,
  805. };
  806. static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
  807. .intctrl = mx21_intctrl,
  808. .prepare_message = mx21_prepare_message,
  809. .prepare_transfer = mx21_prepare_transfer,
  810. .trigger = mx21_trigger,
  811. .rx_available = mx21_rx_available,
  812. .reset = mx21_reset,
  813. .fifo_size = 8,
  814. .has_dmamode = false,
  815. .dynamic_burst = false,
  816. .has_slavemode = false,
  817. .devtype = IMX21_CSPI,
  818. };
  819. static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
  820. /* i.mx27 cspi shares the functions with i.mx21 one */
  821. .intctrl = mx21_intctrl,
  822. .prepare_message = mx21_prepare_message,
  823. .prepare_transfer = mx21_prepare_transfer,
  824. .trigger = mx21_trigger,
  825. .rx_available = mx21_rx_available,
  826. .reset = mx21_reset,
  827. .fifo_size = 8,
  828. .has_dmamode = false,
  829. .dynamic_burst = false,
  830. .has_slavemode = false,
  831. .devtype = IMX27_CSPI,
  832. };
  833. static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
  834. .intctrl = mx31_intctrl,
  835. .prepare_message = mx31_prepare_message,
  836. .prepare_transfer = mx31_prepare_transfer,
  837. .trigger = mx31_trigger,
  838. .rx_available = mx31_rx_available,
  839. .reset = mx31_reset,
  840. .fifo_size = 8,
  841. .has_dmamode = false,
  842. .dynamic_burst = false,
  843. .has_slavemode = false,
  844. .devtype = IMX31_CSPI,
  845. };
  846. static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
  847. /* i.mx35 and later cspi shares the functions with i.mx31 one */
  848. .intctrl = mx31_intctrl,
  849. .prepare_message = mx31_prepare_message,
  850. .prepare_transfer = mx31_prepare_transfer,
  851. .trigger = mx31_trigger,
  852. .rx_available = mx31_rx_available,
  853. .reset = mx31_reset,
  854. .fifo_size = 8,
  855. .has_dmamode = true,
  856. .dynamic_burst = false,
  857. .has_slavemode = false,
  858. .devtype = IMX35_CSPI,
  859. };
  860. static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
  861. .intctrl = mx51_ecspi_intctrl,
  862. .prepare_message = mx51_ecspi_prepare_message,
  863. .prepare_transfer = mx51_ecspi_prepare_transfer,
  864. .trigger = mx51_ecspi_trigger,
  865. .rx_available = mx51_ecspi_rx_available,
  866. .reset = mx51_ecspi_reset,
  867. .setup_wml = mx51_setup_wml,
  868. .disable_dma = mx51_disable_dma,
  869. .fifo_size = 64,
  870. .has_dmamode = true,
  871. .dynamic_burst = true,
  872. .has_slavemode = true,
  873. .disable = mx51_ecspi_disable,
  874. .devtype = IMX51_ECSPI,
  875. };
  876. static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
  877. .intctrl = mx51_ecspi_intctrl,
  878. .prepare_message = mx51_ecspi_prepare_message,
  879. .prepare_transfer = mx51_ecspi_prepare_transfer,
  880. .trigger = mx51_ecspi_trigger,
  881. .rx_available = mx51_ecspi_rx_available,
  882. .disable_dma = mx51_disable_dma,
  883. .reset = mx51_ecspi_reset,
  884. .fifo_size = 64,
  885. .has_dmamode = true,
  886. .has_slavemode = true,
  887. .disable = mx51_ecspi_disable,
  888. .devtype = IMX53_ECSPI,
  889. };
  890. static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
  891. .intctrl = mx51_ecspi_intctrl,
  892. .prepare_message = mx51_ecspi_prepare_message,
  893. .prepare_transfer = mx51_ecspi_prepare_transfer,
  894. .trigger = mx51_ecspi_trigger,
  895. .rx_available = mx51_ecspi_rx_available,
  896. .reset = mx51_ecspi_reset,
  897. .setup_wml = mx51_setup_wml,
  898. .fifo_size = 64,
  899. .has_dmamode = true,
  900. .dynamic_burst = true,
  901. .has_slavemode = true,
  902. .tx_glitch_fixed = true,
  903. .disable = mx51_ecspi_disable,
  904. .devtype = IMX51_ECSPI,
  905. };
  906. static const struct of_device_id spi_imx_dt_ids[] = {
  907. { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
  908. { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
  909. { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
  910. { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
  911. { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
  912. { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
  913. { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
  914. { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
  915. { /* sentinel */ }
  916. };
  917. MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
  918. static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
  919. {
  920. u32 ctrl;
  921. ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
  922. ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
  923. ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
  924. writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
  925. }
  926. static void spi_imx_push(struct spi_imx_data *spi_imx)
  927. {
  928. unsigned int burst_len;
  929. /*
  930. * Reload the FIFO when the remaining bytes to be transferred in the
  931. * current burst is 0. This only applies when bits_per_word is a
  932. * multiple of 8.
  933. */
  934. if (!spi_imx->remainder) {
  935. if (spi_imx->dynamic_burst) {
  936. /* We need to deal unaligned data first */
  937. burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
  938. if (!burst_len)
  939. burst_len = MX51_ECSPI_CTRL_MAX_BURST;
  940. spi_imx_set_burst_len(spi_imx, burst_len * 8);
  941. spi_imx->remainder = burst_len;
  942. } else {
  943. spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
  944. }
  945. }
  946. while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
  947. if (!spi_imx->count)
  948. break;
  949. if (spi_imx->dynamic_burst &&
  950. spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
  951. break;
  952. spi_imx->tx(spi_imx);
  953. spi_imx->txfifo++;
  954. }
  955. if (!spi_imx->slave_mode)
  956. spi_imx->devtype_data->trigger(spi_imx);
  957. }
  958. static irqreturn_t spi_imx_isr(int irq, void *dev_id)
  959. {
  960. struct spi_imx_data *spi_imx = dev_id;
  961. while (spi_imx->txfifo &&
  962. spi_imx->devtype_data->rx_available(spi_imx)) {
  963. spi_imx->rx(spi_imx);
  964. spi_imx->txfifo--;
  965. }
  966. if (spi_imx->count) {
  967. spi_imx_push(spi_imx);
  968. return IRQ_HANDLED;
  969. }
  970. if (spi_imx->txfifo) {
  971. /* No data left to push, but still waiting for rx data,
  972. * enable receive data available interrupt.
  973. */
  974. spi_imx->devtype_data->intctrl(
  975. spi_imx, MXC_INT_RR);
  976. return IRQ_HANDLED;
  977. }
  978. spi_imx->devtype_data->intctrl(spi_imx, 0);
  979. complete(&spi_imx->xfer_done);
  980. return IRQ_HANDLED;
  981. }
  982. static int spi_imx_dma_configure(struct spi_controller *controller)
  983. {
  984. int ret;
  985. enum dma_slave_buswidth buswidth;
  986. struct dma_slave_config rx = {}, tx = {};
  987. struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
  988. switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
  989. case 4:
  990. buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
  991. break;
  992. case 2:
  993. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  994. break;
  995. case 1:
  996. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  997. break;
  998. default:
  999. return -EINVAL;
  1000. }
  1001. tx.direction = DMA_MEM_TO_DEV;
  1002. tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
  1003. tx.dst_addr_width = buswidth;
  1004. tx.dst_maxburst = spi_imx->wml;
  1005. ret = dmaengine_slave_config(controller->dma_tx, &tx);
  1006. if (ret) {
  1007. dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
  1008. return ret;
  1009. }
  1010. rx.direction = DMA_DEV_TO_MEM;
  1011. rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
  1012. rx.src_addr_width = buswidth;
  1013. rx.src_maxburst = spi_imx->wml;
  1014. ret = dmaengine_slave_config(controller->dma_rx, &rx);
  1015. if (ret) {
  1016. dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
  1017. return ret;
  1018. }
  1019. return 0;
  1020. }
  1021. static int spi_imx_setupxfer(struct spi_device *spi,
  1022. struct spi_transfer *t)
  1023. {
  1024. struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
  1025. if (!t)
  1026. return 0;
  1027. if (!t->speed_hz) {
  1028. if (!spi->max_speed_hz) {
  1029. dev_err(&spi->dev, "no speed_hz provided!\n");
  1030. return -EINVAL;
  1031. }
  1032. dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
  1033. spi_imx->spi_bus_clk = spi->max_speed_hz;
  1034. } else
  1035. spi_imx->spi_bus_clk = t->speed_hz;
  1036. spi_imx->bits_per_word = t->bits_per_word;
  1037. /*
  1038. * Initialize the functions for transfer. To transfer non byte-aligned
  1039. * words, we have to use multiple word-size bursts, we can't use
  1040. * dynamic_burst in that case.
  1041. */
  1042. if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
  1043. !(spi->mode & SPI_CS_WORD) &&
  1044. (spi_imx->bits_per_word == 8 ||
  1045. spi_imx->bits_per_word == 16 ||
  1046. spi_imx->bits_per_word == 32)) {
  1047. spi_imx->rx = spi_imx_buf_rx_swap;
  1048. spi_imx->tx = spi_imx_buf_tx_swap;
  1049. spi_imx->dynamic_burst = 1;
  1050. } else {
  1051. if (spi_imx->bits_per_word <= 8) {
  1052. spi_imx->rx = spi_imx_buf_rx_u8;
  1053. spi_imx->tx = spi_imx_buf_tx_u8;
  1054. } else if (spi_imx->bits_per_word <= 16) {
  1055. spi_imx->rx = spi_imx_buf_rx_u16;
  1056. spi_imx->tx = spi_imx_buf_tx_u16;
  1057. } else {
  1058. spi_imx->rx = spi_imx_buf_rx_u32;
  1059. spi_imx->tx = spi_imx_buf_tx_u32;
  1060. }
  1061. spi_imx->dynamic_burst = 0;
  1062. }
  1063. if (spi_imx_can_dma(spi_imx->controller, spi, t))
  1064. spi_imx->usedma = true;
  1065. else
  1066. spi_imx->usedma = false;
  1067. spi_imx->rx_only = ((t->tx_buf == NULL)
  1068. || (t->tx_buf == spi->controller->dummy_tx));
  1069. if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
  1070. spi_imx->rx = mx53_ecspi_rx_slave;
  1071. spi_imx->tx = mx53_ecspi_tx_slave;
  1072. spi_imx->slave_burst = t->len;
  1073. }
  1074. spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
  1075. return 0;
  1076. }
  1077. static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
  1078. {
  1079. struct spi_controller *controller = spi_imx->controller;
  1080. if (controller->dma_rx) {
  1081. dma_release_channel(controller->dma_rx);
  1082. controller->dma_rx = NULL;
  1083. }
  1084. if (controller->dma_tx) {
  1085. dma_release_channel(controller->dma_tx);
  1086. controller->dma_tx = NULL;
  1087. }
  1088. }
  1089. static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
  1090. struct spi_controller *controller)
  1091. {
  1092. int ret;
  1093. spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
  1094. /* Prepare for TX DMA: */
  1095. controller->dma_tx = dma_request_chan(dev, "tx");
  1096. if (IS_ERR(controller->dma_tx)) {
  1097. ret = PTR_ERR(controller->dma_tx);
  1098. dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
  1099. controller->dma_tx = NULL;
  1100. goto err;
  1101. }
  1102. /* Prepare for RX : */
  1103. controller->dma_rx = dma_request_chan(dev, "rx");
  1104. if (IS_ERR(controller->dma_rx)) {
  1105. ret = PTR_ERR(controller->dma_rx);
  1106. dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
  1107. controller->dma_rx = NULL;
  1108. goto err;
  1109. }
  1110. init_completion(&spi_imx->dma_rx_completion);
  1111. init_completion(&spi_imx->dma_tx_completion);
  1112. controller->can_dma = spi_imx_can_dma;
  1113. controller->max_dma_len = MAX_SDMA_BD_BYTES;
  1114. spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
  1115. SPI_CONTROLLER_MUST_TX;
  1116. return 0;
  1117. err:
  1118. spi_imx_sdma_exit(spi_imx);
  1119. return ret;
  1120. }
  1121. static void spi_imx_dma_rx_callback(void *cookie)
  1122. {
  1123. struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
  1124. complete(&spi_imx->dma_rx_completion);
  1125. }
  1126. static void spi_imx_dma_tx_callback(void *cookie)
  1127. {
  1128. struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
  1129. complete(&spi_imx->dma_tx_completion);
  1130. }
  1131. static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
  1132. {
  1133. unsigned long timeout = 0;
  1134. /* Time with actual data transfer and CS change delay related to HW */
  1135. timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
  1136. /* Add extra second for scheduler related activities */
  1137. timeout += 1;
  1138. /* Double calculated timeout */
  1139. return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
  1140. }
  1141. static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
  1142. struct spi_transfer *transfer)
  1143. {
  1144. struct dma_async_tx_descriptor *desc_tx, *desc_rx;
  1145. unsigned long transfer_timeout;
  1146. unsigned long timeout;
  1147. struct spi_controller *controller = spi_imx->controller;
  1148. struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
  1149. struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
  1150. unsigned int bytes_per_word, i;
  1151. int ret;
  1152. /* Get the right burst length from the last sg to ensure no tail data */
  1153. bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
  1154. for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
  1155. if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
  1156. break;
  1157. }
  1158. /* Use 1 as wml in case no available burst length got */
  1159. if (i == 0)
  1160. i = 1;
  1161. spi_imx->wml = i;
  1162. ret = spi_imx_dma_configure(controller);
  1163. if (ret)
  1164. goto dma_failure_no_start;
  1165. if (!spi_imx->devtype_data->setup_wml) {
  1166. dev_err(spi_imx->dev, "No setup_wml()?\n");
  1167. ret = -EINVAL;
  1168. goto dma_failure_no_start;
  1169. }
  1170. spi_imx->devtype_data->setup_wml(spi_imx);
  1171. /*
  1172. * The TX DMA setup starts the transfer, so make sure RX is configured
  1173. * before TX.
  1174. */
  1175. desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
  1176. rx->sgl, rx->nents, DMA_DEV_TO_MEM,
  1177. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1178. if (!desc_rx) {
  1179. ret = -EINVAL;
  1180. goto dma_failure_no_start;
  1181. }
  1182. desc_rx->callback = spi_imx_dma_rx_callback;
  1183. desc_rx->callback_param = (void *)spi_imx;
  1184. dmaengine_submit(desc_rx);
  1185. reinit_completion(&spi_imx->dma_rx_completion);
  1186. dma_async_issue_pending(controller->dma_rx);
  1187. desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
  1188. tx->sgl, tx->nents, DMA_MEM_TO_DEV,
  1189. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1190. if (!desc_tx) {
  1191. dmaengine_terminate_all(controller->dma_tx);
  1192. dmaengine_terminate_all(controller->dma_rx);
  1193. return -EINVAL;
  1194. }
  1195. desc_tx->callback = spi_imx_dma_tx_callback;
  1196. desc_tx->callback_param = (void *)spi_imx;
  1197. dmaengine_submit(desc_tx);
  1198. reinit_completion(&spi_imx->dma_tx_completion);
  1199. dma_async_issue_pending(controller->dma_tx);
  1200. transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
  1201. /* Wait SDMA to finish the data transfer.*/
  1202. timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
  1203. transfer_timeout);
  1204. if (!timeout) {
  1205. dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
  1206. dmaengine_terminate_all(controller->dma_tx);
  1207. dmaengine_terminate_all(controller->dma_rx);
  1208. return -ETIMEDOUT;
  1209. }
  1210. timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
  1211. transfer_timeout);
  1212. if (!timeout) {
  1213. dev_err(&controller->dev, "I/O Error in DMA RX\n");
  1214. spi_imx->devtype_data->reset(spi_imx);
  1215. dmaengine_terminate_all(controller->dma_rx);
  1216. return -ETIMEDOUT;
  1217. }
  1218. return 0;
  1219. /* fallback to pio */
  1220. dma_failure_no_start:
  1221. transfer->error |= SPI_TRANS_FAIL_NO_START;
  1222. return ret;
  1223. }
  1224. static int spi_imx_pio_transfer(struct spi_device *spi,
  1225. struct spi_transfer *transfer)
  1226. {
  1227. struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
  1228. unsigned long transfer_timeout;
  1229. unsigned long timeout;
  1230. spi_imx->tx_buf = transfer->tx_buf;
  1231. spi_imx->rx_buf = transfer->rx_buf;
  1232. spi_imx->count = transfer->len;
  1233. spi_imx->txfifo = 0;
  1234. spi_imx->remainder = 0;
  1235. reinit_completion(&spi_imx->xfer_done);
  1236. spi_imx_push(spi_imx);
  1237. spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
  1238. transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
  1239. timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
  1240. transfer_timeout);
  1241. if (!timeout) {
  1242. dev_err(&spi->dev, "I/O Error in PIO\n");
  1243. spi_imx->devtype_data->reset(spi_imx);
  1244. return -ETIMEDOUT;
  1245. }
  1246. return 0;
  1247. }
  1248. static int spi_imx_poll_transfer(struct spi_device *spi,
  1249. struct spi_transfer *transfer)
  1250. {
  1251. struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
  1252. unsigned long timeout;
  1253. spi_imx->tx_buf = transfer->tx_buf;
  1254. spi_imx->rx_buf = transfer->rx_buf;
  1255. spi_imx->count = transfer->len;
  1256. spi_imx->txfifo = 0;
  1257. spi_imx->remainder = 0;
  1258. /* fill in the fifo before timeout calculations if we are
  1259. * interrupted here, then the data is getting transferred by
  1260. * the HW while we are interrupted
  1261. */
  1262. spi_imx_push(spi_imx);
  1263. timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
  1264. while (spi_imx->txfifo) {
  1265. /* RX */
  1266. while (spi_imx->txfifo &&
  1267. spi_imx->devtype_data->rx_available(spi_imx)) {
  1268. spi_imx->rx(spi_imx);
  1269. spi_imx->txfifo--;
  1270. }
  1271. /* TX */
  1272. if (spi_imx->count) {
  1273. spi_imx_push(spi_imx);
  1274. continue;
  1275. }
  1276. if (spi_imx->txfifo &&
  1277. time_after(jiffies, timeout)) {
  1278. dev_err_ratelimited(&spi->dev,
  1279. "timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
  1280. jiffies - timeout);
  1281. /* fall back to interrupt mode */
  1282. return spi_imx_pio_transfer(spi, transfer);
  1283. }
  1284. }
  1285. return 0;
  1286. }
  1287. static int spi_imx_pio_transfer_slave(struct spi_device *spi,
  1288. struct spi_transfer *transfer)
  1289. {
  1290. struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
  1291. int ret = 0;
  1292. if (is_imx53_ecspi(spi_imx) &&
  1293. transfer->len > MX53_MAX_TRANSFER_BYTES) {
  1294. dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
  1295. MX53_MAX_TRANSFER_BYTES);
  1296. return -EMSGSIZE;
  1297. }
  1298. spi_imx->tx_buf = transfer->tx_buf;
  1299. spi_imx->rx_buf = transfer->rx_buf;
  1300. spi_imx->count = transfer->len;
  1301. spi_imx->txfifo = 0;
  1302. spi_imx->remainder = 0;
  1303. reinit_completion(&spi_imx->xfer_done);
  1304. spi_imx->slave_aborted = false;
  1305. spi_imx_push(spi_imx);
  1306. spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
  1307. if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
  1308. spi_imx->slave_aborted) {
  1309. dev_dbg(&spi->dev, "interrupted\n");
  1310. ret = -EINTR;
  1311. }
  1312. /* ecspi has a HW issue when works in Slave mode,
  1313. * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
  1314. * ECSPI_TXDATA keeps shift out the last word data,
  1315. * so we have to disable ECSPI when in slave mode after the
  1316. * transfer completes
  1317. */
  1318. if (spi_imx->devtype_data->disable)
  1319. spi_imx->devtype_data->disable(spi_imx);
  1320. return ret;
  1321. }
  1322. static int spi_imx_transfer_one(struct spi_controller *controller,
  1323. struct spi_device *spi,
  1324. struct spi_transfer *transfer)
  1325. {
  1326. struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
  1327. unsigned long hz_per_byte, byte_limit;
  1328. spi_imx_setupxfer(spi, transfer);
  1329. transfer->effective_speed_hz = spi_imx->spi_bus_clk;
  1330. /* flush rxfifo before transfer */
  1331. while (spi_imx->devtype_data->rx_available(spi_imx))
  1332. readl(spi_imx->base + MXC_CSPIRXDATA);
  1333. if (spi_imx->slave_mode)
  1334. return spi_imx_pio_transfer_slave(spi, transfer);
  1335. /*
  1336. * If we decided in spi_imx_can_dma() that we want to do a DMA
  1337. * transfer, the SPI transfer has already been mapped, so we
  1338. * have to do the DMA transfer here.
  1339. */
  1340. if (spi_imx->usedma)
  1341. return spi_imx_dma_transfer(spi_imx, transfer);
  1342. /*
  1343. * Calculate the estimated time in us the transfer runs. Find
  1344. * the number of Hz per byte per polling limit.
  1345. */
  1346. hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0;
  1347. byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1;
  1348. /* run in polling mode for short transfers */
  1349. if (transfer->len < byte_limit)
  1350. return spi_imx_poll_transfer(spi, transfer);
  1351. return spi_imx_pio_transfer(spi, transfer);
  1352. }
  1353. static int spi_imx_setup(struct spi_device *spi)
  1354. {
  1355. dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
  1356. spi->mode, spi->bits_per_word, spi->max_speed_hz);
  1357. return 0;
  1358. }
  1359. static void spi_imx_cleanup(struct spi_device *spi)
  1360. {
  1361. }
  1362. static int
  1363. spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
  1364. {
  1365. struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
  1366. int ret;
  1367. ret = pm_runtime_resume_and_get(spi_imx->dev);
  1368. if (ret < 0) {
  1369. dev_err(spi_imx->dev, "failed to enable clock\n");
  1370. return ret;
  1371. }
  1372. ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
  1373. if (ret) {
  1374. pm_runtime_mark_last_busy(spi_imx->dev);
  1375. pm_runtime_put_autosuspend(spi_imx->dev);
  1376. }
  1377. return ret;
  1378. }
  1379. static int
  1380. spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
  1381. {
  1382. struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
  1383. pm_runtime_mark_last_busy(spi_imx->dev);
  1384. pm_runtime_put_autosuspend(spi_imx->dev);
  1385. return 0;
  1386. }
  1387. static int spi_imx_slave_abort(struct spi_controller *controller)
  1388. {
  1389. struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
  1390. spi_imx->slave_aborted = true;
  1391. complete(&spi_imx->xfer_done);
  1392. return 0;
  1393. }
  1394. static int spi_imx_probe(struct platform_device *pdev)
  1395. {
  1396. struct device_node *np = pdev->dev.of_node;
  1397. struct spi_controller *controller;
  1398. struct spi_imx_data *spi_imx;
  1399. struct resource *res;
  1400. int ret, irq, spi_drctl;
  1401. const struct spi_imx_devtype_data *devtype_data =
  1402. of_device_get_match_data(&pdev->dev);
  1403. bool slave_mode;
  1404. u32 val;
  1405. slave_mode = devtype_data->has_slavemode &&
  1406. of_property_read_bool(np, "spi-slave");
  1407. if (slave_mode)
  1408. controller = spi_alloc_slave(&pdev->dev,
  1409. sizeof(struct spi_imx_data));
  1410. else
  1411. controller = spi_alloc_master(&pdev->dev,
  1412. sizeof(struct spi_imx_data));
  1413. if (!controller)
  1414. return -ENOMEM;
  1415. ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
  1416. if ((ret < 0) || (spi_drctl >= 0x3)) {
  1417. /* '11' is reserved */
  1418. spi_drctl = 0;
  1419. }
  1420. platform_set_drvdata(pdev, controller);
  1421. controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
  1422. controller->bus_num = np ? -1 : pdev->id;
  1423. controller->use_gpio_descriptors = true;
  1424. spi_imx = spi_controller_get_devdata(controller);
  1425. spi_imx->controller = controller;
  1426. spi_imx->dev = &pdev->dev;
  1427. spi_imx->slave_mode = slave_mode;
  1428. spi_imx->devtype_data = devtype_data;
  1429. /*
  1430. * Get number of chip selects from device properties. This can be
  1431. * coming from device tree or boardfiles, if it is not defined,
  1432. * a default value of 3 chip selects will be used, as all the legacy
  1433. * board files have <= 3 chip selects.
  1434. */
  1435. if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
  1436. controller->num_chipselect = val;
  1437. else
  1438. controller->num_chipselect = 3;
  1439. spi_imx->controller->transfer_one = spi_imx_transfer_one;
  1440. spi_imx->controller->setup = spi_imx_setup;
  1441. spi_imx->controller->cleanup = spi_imx_cleanup;
  1442. spi_imx->controller->prepare_message = spi_imx_prepare_message;
  1443. spi_imx->controller->unprepare_message = spi_imx_unprepare_message;
  1444. spi_imx->controller->slave_abort = spi_imx_slave_abort;
  1445. spi_imx->controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS;
  1446. if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
  1447. is_imx53_ecspi(spi_imx))
  1448. spi_imx->controller->mode_bits |= SPI_LOOP | SPI_READY;
  1449. if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
  1450. spi_imx->controller->mode_bits |= SPI_RX_CPHA_FLIP;
  1451. if (is_imx51_ecspi(spi_imx) &&
  1452. device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
  1453. /*
  1454. * When using HW-CS implementing SPI_CS_WORD can be done by just
  1455. * setting the burst length to the word size. This is
  1456. * considerably faster than manually controlling the CS.
  1457. */
  1458. spi_imx->controller->mode_bits |= SPI_CS_WORD;
  1459. spi_imx->spi_drctl = spi_drctl;
  1460. init_completion(&spi_imx->xfer_done);
  1461. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1462. spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
  1463. if (IS_ERR(spi_imx->base)) {
  1464. ret = PTR_ERR(spi_imx->base);
  1465. goto out_controller_put;
  1466. }
  1467. spi_imx->base_phys = res->start;
  1468. irq = platform_get_irq(pdev, 0);
  1469. if (irq < 0) {
  1470. ret = irq;
  1471. goto out_controller_put;
  1472. }
  1473. ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
  1474. dev_name(&pdev->dev), spi_imx);
  1475. if (ret) {
  1476. dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
  1477. goto out_controller_put;
  1478. }
  1479. spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
  1480. if (IS_ERR(spi_imx->clk_ipg)) {
  1481. ret = PTR_ERR(spi_imx->clk_ipg);
  1482. goto out_controller_put;
  1483. }
  1484. spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
  1485. if (IS_ERR(spi_imx->clk_per)) {
  1486. ret = PTR_ERR(spi_imx->clk_per);
  1487. goto out_controller_put;
  1488. }
  1489. ret = clk_prepare_enable(spi_imx->clk_per);
  1490. if (ret)
  1491. goto out_controller_put;
  1492. ret = clk_prepare_enable(spi_imx->clk_ipg);
  1493. if (ret)
  1494. goto out_put_per;
  1495. pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
  1496. pm_runtime_use_autosuspend(spi_imx->dev);
  1497. pm_runtime_get_noresume(spi_imx->dev);
  1498. pm_runtime_set_active(spi_imx->dev);
  1499. pm_runtime_enable(spi_imx->dev);
  1500. spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
  1501. /*
  1502. * Only validated on i.mx35 and i.mx6 now, can remove the constraint
  1503. * if validated on other chips.
  1504. */
  1505. if (spi_imx->devtype_data->has_dmamode) {
  1506. ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
  1507. if (ret == -EPROBE_DEFER)
  1508. goto out_runtime_pm_put;
  1509. if (ret < 0)
  1510. dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
  1511. ret);
  1512. }
  1513. spi_imx->devtype_data->reset(spi_imx);
  1514. spi_imx->devtype_data->intctrl(spi_imx, 0);
  1515. controller->dev.of_node = pdev->dev.of_node;
  1516. ret = spi_register_controller(controller);
  1517. if (ret) {
  1518. dev_err_probe(&pdev->dev, ret, "register controller failed\n");
  1519. goto out_register_controller;
  1520. }
  1521. pm_runtime_mark_last_busy(spi_imx->dev);
  1522. pm_runtime_put_autosuspend(spi_imx->dev);
  1523. return ret;
  1524. out_register_controller:
  1525. if (spi_imx->devtype_data->has_dmamode)
  1526. spi_imx_sdma_exit(spi_imx);
  1527. out_runtime_pm_put:
  1528. pm_runtime_dont_use_autosuspend(spi_imx->dev);
  1529. pm_runtime_set_suspended(&pdev->dev);
  1530. pm_runtime_disable(spi_imx->dev);
  1531. clk_disable_unprepare(spi_imx->clk_ipg);
  1532. out_put_per:
  1533. clk_disable_unprepare(spi_imx->clk_per);
  1534. out_controller_put:
  1535. spi_controller_put(controller);
  1536. return ret;
  1537. }
  1538. static int spi_imx_remove(struct platform_device *pdev)
  1539. {
  1540. struct spi_controller *controller = platform_get_drvdata(pdev);
  1541. struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
  1542. int ret;
  1543. spi_unregister_controller(controller);
  1544. ret = pm_runtime_get_sync(spi_imx->dev);
  1545. if (ret >= 0)
  1546. writel(0, spi_imx->base + MXC_CSPICTRL);
  1547. else
  1548. dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
  1549. pm_runtime_dont_use_autosuspend(spi_imx->dev);
  1550. pm_runtime_put_sync(spi_imx->dev);
  1551. pm_runtime_disable(spi_imx->dev);
  1552. spi_imx_sdma_exit(spi_imx);
  1553. return 0;
  1554. }
  1555. static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
  1556. {
  1557. struct spi_controller *controller = dev_get_drvdata(dev);
  1558. struct spi_imx_data *spi_imx;
  1559. int ret;
  1560. spi_imx = spi_controller_get_devdata(controller);
  1561. ret = clk_prepare_enable(spi_imx->clk_per);
  1562. if (ret)
  1563. return ret;
  1564. ret = clk_prepare_enable(spi_imx->clk_ipg);
  1565. if (ret) {
  1566. clk_disable_unprepare(spi_imx->clk_per);
  1567. return ret;
  1568. }
  1569. return 0;
  1570. }
  1571. static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
  1572. {
  1573. struct spi_controller *controller = dev_get_drvdata(dev);
  1574. struct spi_imx_data *spi_imx;
  1575. spi_imx = spi_controller_get_devdata(controller);
  1576. clk_disable_unprepare(spi_imx->clk_per);
  1577. clk_disable_unprepare(spi_imx->clk_ipg);
  1578. return 0;
  1579. }
  1580. static int __maybe_unused spi_imx_suspend(struct device *dev)
  1581. {
  1582. pinctrl_pm_select_sleep_state(dev);
  1583. return 0;
  1584. }
  1585. static int __maybe_unused spi_imx_resume(struct device *dev)
  1586. {
  1587. pinctrl_pm_select_default_state(dev);
  1588. return 0;
  1589. }
  1590. static const struct dev_pm_ops imx_spi_pm = {
  1591. SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
  1592. spi_imx_runtime_resume, NULL)
  1593. SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
  1594. };
  1595. static struct platform_driver spi_imx_driver = {
  1596. .driver = {
  1597. .name = DRIVER_NAME,
  1598. .of_match_table = spi_imx_dt_ids,
  1599. .pm = &imx_spi_pm,
  1600. },
  1601. .probe = spi_imx_probe,
  1602. .remove = spi_imx_remove,
  1603. };
  1604. module_platform_driver(spi_imx_driver);
  1605. MODULE_DESCRIPTION("i.MX SPI Controller driver");
  1606. MODULE_AUTHOR("Sascha Hauer, Pengutronix");
  1607. MODULE_LICENSE("GPL");
  1608. MODULE_ALIAS("platform:" DRIVER_NAME);