spi-mt65xx.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015 MediaTek Inc.
  4. * Author: Leilk Liu <leilk.liu@mediatek.com>
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/device.h>
  8. #include <linux/err.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/io.h>
  11. #include <linux/ioport.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/gpio/consumer.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/platform_data/spi-mt65xx.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/spi/spi.h>
  19. #include <linux/spi/spi-mem.h>
  20. #include <linux/dma-mapping.h>
  21. #define SPI_CFG0_REG 0x0000
  22. #define SPI_CFG1_REG 0x0004
  23. #define SPI_TX_SRC_REG 0x0008
  24. #define SPI_RX_DST_REG 0x000c
  25. #define SPI_TX_DATA_REG 0x0010
  26. #define SPI_RX_DATA_REG 0x0014
  27. #define SPI_CMD_REG 0x0018
  28. #define SPI_STATUS0_REG 0x001c
  29. #define SPI_PAD_SEL_REG 0x0024
  30. #define SPI_CFG2_REG 0x0028
  31. #define SPI_TX_SRC_REG_64 0x002c
  32. #define SPI_RX_DST_REG_64 0x0030
  33. #define SPI_CFG3_IPM_REG 0x0040
  34. #define SPI_CFG0_SCK_HIGH_OFFSET 0
  35. #define SPI_CFG0_SCK_LOW_OFFSET 8
  36. #define SPI_CFG0_CS_HOLD_OFFSET 16
  37. #define SPI_CFG0_CS_SETUP_OFFSET 24
  38. #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
  39. #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
  40. #define SPI_CFG1_CS_IDLE_OFFSET 0
  41. #define SPI_CFG1_PACKET_LOOP_OFFSET 8
  42. #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
  43. #define SPI_CFG1_GET_TICK_DLY_OFFSET 29
  44. #define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
  45. #define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
  46. #define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
  47. #define SPI_CFG1_CS_IDLE_MASK 0xff
  48. #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
  49. #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
  50. #define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
  51. #define SPI_CFG2_SCK_HIGH_OFFSET 0
  52. #define SPI_CFG2_SCK_LOW_OFFSET 16
  53. #define SPI_CMD_ACT BIT(0)
  54. #define SPI_CMD_RESUME BIT(1)
  55. #define SPI_CMD_RST BIT(2)
  56. #define SPI_CMD_PAUSE_EN BIT(4)
  57. #define SPI_CMD_DEASSERT BIT(5)
  58. #define SPI_CMD_SAMPLE_SEL BIT(6)
  59. #define SPI_CMD_CS_POL BIT(7)
  60. #define SPI_CMD_CPHA BIT(8)
  61. #define SPI_CMD_CPOL BIT(9)
  62. #define SPI_CMD_RX_DMA BIT(10)
  63. #define SPI_CMD_TX_DMA BIT(11)
  64. #define SPI_CMD_TXMSBF BIT(12)
  65. #define SPI_CMD_RXMSBF BIT(13)
  66. #define SPI_CMD_RX_ENDIAN BIT(14)
  67. #define SPI_CMD_TX_ENDIAN BIT(15)
  68. #define SPI_CMD_FINISH_IE BIT(16)
  69. #define SPI_CMD_PAUSE_IE BIT(17)
  70. #define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
  71. #define SPI_CMD_IPM_SPIM_LOOP BIT(21)
  72. #define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
  73. #define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
  74. #define PIN_MODE_CFG(x) ((x) / 2)
  75. #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
  76. #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
  77. #define SPI_CFG3_IPM_XMODE_EN BIT(4)
  78. #define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
  79. #define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
  80. #define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
  81. #define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
  82. #define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
  83. #define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
  84. #define MT8173_SPI_MAX_PAD_SEL 3
  85. #define MTK_SPI_PAUSE_INT_STATUS 0x2
  86. #define MTK_SPI_MAX_FIFO_SIZE 32U
  87. #define MTK_SPI_PACKET_SIZE 1024
  88. #define MTK_SPI_IPM_PACKET_SIZE SZ_64K
  89. #define MTK_SPI_IPM_PACKET_LOOP SZ_256
  90. #define MTK_SPI_IDLE 0
  91. #define MTK_SPI_PAUSED 1
  92. #define MTK_SPI_32BITS_MASK (0xffffffff)
  93. #define DMA_ADDR_EXT_BITS (36)
  94. #define DMA_ADDR_DEF_BITS (32)
  95. /**
  96. * struct mtk_spi_compatible - device data structure
  97. * @need_pad_sel: Enable pad (pins) selection in SPI controller
  98. * @must_tx: Must explicitly send dummy TX bytes to do RX only transfer
  99. * @enhance_timing: Enable adjusting cfg register to enhance time accuracy
  100. * @dma_ext: DMA address extension supported
  101. * @no_need_unprepare: Don't unprepare the SPI clk during runtime
  102. * @ipm_design: Adjust/extend registers to support IPM design IP features
  103. */
  104. struct mtk_spi_compatible {
  105. bool need_pad_sel;
  106. bool must_tx;
  107. bool enhance_timing;
  108. bool dma_ext;
  109. bool no_need_unprepare;
  110. bool ipm_design;
  111. };
  112. /**
  113. * struct mtk_spi - SPI driver instance
  114. * @base: Start address of the SPI controller registers
  115. * @state: SPI controller state
  116. * @pad_num: Number of pad_sel entries
  117. * @pad_sel: Groups of pins to select
  118. * @parent_clk: Parent of sel_clk
  119. * @sel_clk: SPI master mux clock
  120. * @spi_clk: Peripheral clock
  121. * @spi_hclk: AHB bus clock
  122. * @cur_transfer: Currently processed SPI transfer
  123. * @xfer_len: Number of bytes to transfer
  124. * @num_xfered: Number of transferred bytes
  125. * @tx_sgl: TX transfer scatterlist
  126. * @rx_sgl: RX transfer scatterlist
  127. * @tx_sgl_len: Size of TX DMA transfer
  128. * @rx_sgl_len: Size of RX DMA transfer
  129. * @dev_comp: Device data structure
  130. * @spi_clk_hz: Current SPI clock in Hz
  131. * @spimem_done: SPI-MEM operation completion
  132. * @use_spimem: Enables SPI-MEM
  133. * @dev: Device pointer
  134. * @tx_dma: DMA start for SPI-MEM TX
  135. * @rx_dma: DMA start for SPI-MEM RX
  136. */
  137. struct mtk_spi {
  138. void __iomem *base;
  139. u32 state;
  140. int pad_num;
  141. u32 *pad_sel;
  142. struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
  143. struct spi_transfer *cur_transfer;
  144. u32 xfer_len;
  145. u32 num_xfered;
  146. struct scatterlist *tx_sgl, *rx_sgl;
  147. u32 tx_sgl_len, rx_sgl_len;
  148. const struct mtk_spi_compatible *dev_comp;
  149. u32 spi_clk_hz;
  150. struct completion spimem_done;
  151. bool use_spimem;
  152. struct device *dev;
  153. dma_addr_t tx_dma;
  154. dma_addr_t rx_dma;
  155. };
  156. static const struct mtk_spi_compatible mtk_common_compat;
  157. static const struct mtk_spi_compatible mt2712_compat = {
  158. .must_tx = true,
  159. };
  160. static const struct mtk_spi_compatible mtk_ipm_compat = {
  161. .enhance_timing = true,
  162. .dma_ext = true,
  163. .ipm_design = true,
  164. };
  165. static const struct mtk_spi_compatible mt6765_compat = {
  166. .need_pad_sel = true,
  167. .must_tx = true,
  168. .enhance_timing = true,
  169. .dma_ext = true,
  170. };
  171. static const struct mtk_spi_compatible mt7622_compat = {
  172. .must_tx = true,
  173. .enhance_timing = true,
  174. };
  175. static const struct mtk_spi_compatible mt8173_compat = {
  176. .need_pad_sel = true,
  177. .must_tx = true,
  178. };
  179. static const struct mtk_spi_compatible mt8183_compat = {
  180. .need_pad_sel = true,
  181. .must_tx = true,
  182. .enhance_timing = true,
  183. };
  184. static const struct mtk_spi_compatible mt6893_compat = {
  185. .need_pad_sel = true,
  186. .must_tx = true,
  187. .enhance_timing = true,
  188. .dma_ext = true,
  189. .no_need_unprepare = true,
  190. };
  191. /*
  192. * A piece of default chip info unless the platform
  193. * supplies it.
  194. */
  195. static const struct mtk_chip_config mtk_default_chip_info = {
  196. .sample_sel = 0,
  197. .tick_delay = 0,
  198. };
  199. static const struct of_device_id mtk_spi_of_match[] = {
  200. { .compatible = "mediatek,spi-ipm",
  201. .data = (void *)&mtk_ipm_compat,
  202. },
  203. { .compatible = "mediatek,mt2701-spi",
  204. .data = (void *)&mtk_common_compat,
  205. },
  206. { .compatible = "mediatek,mt2712-spi",
  207. .data = (void *)&mt2712_compat,
  208. },
  209. { .compatible = "mediatek,mt6589-spi",
  210. .data = (void *)&mtk_common_compat,
  211. },
  212. { .compatible = "mediatek,mt6765-spi",
  213. .data = (void *)&mt6765_compat,
  214. },
  215. { .compatible = "mediatek,mt7622-spi",
  216. .data = (void *)&mt7622_compat,
  217. },
  218. { .compatible = "mediatek,mt7629-spi",
  219. .data = (void *)&mt7622_compat,
  220. },
  221. { .compatible = "mediatek,mt8135-spi",
  222. .data = (void *)&mtk_common_compat,
  223. },
  224. { .compatible = "mediatek,mt8173-spi",
  225. .data = (void *)&mt8173_compat,
  226. },
  227. { .compatible = "mediatek,mt8183-spi",
  228. .data = (void *)&mt8183_compat,
  229. },
  230. { .compatible = "mediatek,mt8192-spi",
  231. .data = (void *)&mt6765_compat,
  232. },
  233. { .compatible = "mediatek,mt6893-spi",
  234. .data = (void *)&mt6893_compat,
  235. },
  236. {}
  237. };
  238. MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
  239. static void mtk_spi_reset(struct mtk_spi *mdata)
  240. {
  241. u32 reg_val;
  242. /* set the software reset bit in SPI_CMD_REG. */
  243. reg_val = readl(mdata->base + SPI_CMD_REG);
  244. reg_val |= SPI_CMD_RST;
  245. writel(reg_val, mdata->base + SPI_CMD_REG);
  246. reg_val = readl(mdata->base + SPI_CMD_REG);
  247. reg_val &= ~SPI_CMD_RST;
  248. writel(reg_val, mdata->base + SPI_CMD_REG);
  249. }
  250. static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
  251. {
  252. struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
  253. struct spi_delay *cs_setup = &spi->cs_setup;
  254. struct spi_delay *cs_hold = &spi->cs_hold;
  255. struct spi_delay *cs_inactive = &spi->cs_inactive;
  256. u32 setup, hold, inactive;
  257. u32 reg_val;
  258. int delay;
  259. delay = spi_delay_to_ns(cs_setup, NULL);
  260. if (delay < 0)
  261. return delay;
  262. setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
  263. delay = spi_delay_to_ns(cs_hold, NULL);
  264. if (delay < 0)
  265. return delay;
  266. hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
  267. delay = spi_delay_to_ns(cs_inactive, NULL);
  268. if (delay < 0)
  269. return delay;
  270. inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
  271. if (hold || setup) {
  272. reg_val = readl(mdata->base + SPI_CFG0_REG);
  273. if (mdata->dev_comp->enhance_timing) {
  274. if (hold) {
  275. hold = min_t(u32, hold, 0x10000);
  276. reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
  277. reg_val |= (((hold - 1) & 0xffff)
  278. << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
  279. }
  280. if (setup) {
  281. setup = min_t(u32, setup, 0x10000);
  282. reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
  283. reg_val |= (((setup - 1) & 0xffff)
  284. << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
  285. }
  286. } else {
  287. if (hold) {
  288. hold = min_t(u32, hold, 0x100);
  289. reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
  290. reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
  291. }
  292. if (setup) {
  293. setup = min_t(u32, setup, 0x100);
  294. reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
  295. reg_val |= (((setup - 1) & 0xff)
  296. << SPI_CFG0_CS_SETUP_OFFSET);
  297. }
  298. }
  299. writel(reg_val, mdata->base + SPI_CFG0_REG);
  300. }
  301. if (inactive) {
  302. inactive = min_t(u32, inactive, 0x100);
  303. reg_val = readl(mdata->base + SPI_CFG1_REG);
  304. reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
  305. reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
  306. writel(reg_val, mdata->base + SPI_CFG1_REG);
  307. }
  308. return 0;
  309. }
  310. static int mtk_spi_hw_init(struct spi_master *master,
  311. struct spi_device *spi)
  312. {
  313. u16 cpha, cpol;
  314. u32 reg_val;
  315. struct mtk_chip_config *chip_config = spi->controller_data;
  316. struct mtk_spi *mdata = spi_master_get_devdata(master);
  317. cpha = spi->mode & SPI_CPHA ? 1 : 0;
  318. cpol = spi->mode & SPI_CPOL ? 1 : 0;
  319. reg_val = readl(mdata->base + SPI_CMD_REG);
  320. if (mdata->dev_comp->ipm_design) {
  321. /* SPI transfer without idle time until packet length done */
  322. reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
  323. if (spi->mode & SPI_LOOP)
  324. reg_val |= SPI_CMD_IPM_SPIM_LOOP;
  325. else
  326. reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
  327. }
  328. if (cpha)
  329. reg_val |= SPI_CMD_CPHA;
  330. else
  331. reg_val &= ~SPI_CMD_CPHA;
  332. if (cpol)
  333. reg_val |= SPI_CMD_CPOL;
  334. else
  335. reg_val &= ~SPI_CMD_CPOL;
  336. /* set the mlsbx and mlsbtx */
  337. if (spi->mode & SPI_LSB_FIRST) {
  338. reg_val &= ~SPI_CMD_TXMSBF;
  339. reg_val &= ~SPI_CMD_RXMSBF;
  340. } else {
  341. reg_val |= SPI_CMD_TXMSBF;
  342. reg_val |= SPI_CMD_RXMSBF;
  343. }
  344. /* set the tx/rx endian */
  345. #ifdef __LITTLE_ENDIAN
  346. reg_val &= ~SPI_CMD_TX_ENDIAN;
  347. reg_val &= ~SPI_CMD_RX_ENDIAN;
  348. #else
  349. reg_val |= SPI_CMD_TX_ENDIAN;
  350. reg_val |= SPI_CMD_RX_ENDIAN;
  351. #endif
  352. if (mdata->dev_comp->enhance_timing) {
  353. /* set CS polarity */
  354. if (spi->mode & SPI_CS_HIGH)
  355. reg_val |= SPI_CMD_CS_POL;
  356. else
  357. reg_val &= ~SPI_CMD_CS_POL;
  358. if (chip_config->sample_sel)
  359. reg_val |= SPI_CMD_SAMPLE_SEL;
  360. else
  361. reg_val &= ~SPI_CMD_SAMPLE_SEL;
  362. }
  363. /* set finish and pause interrupt always enable */
  364. reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
  365. /* disable dma mode */
  366. reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
  367. /* disable deassert mode */
  368. reg_val &= ~SPI_CMD_DEASSERT;
  369. writel(reg_val, mdata->base + SPI_CMD_REG);
  370. /* pad select */
  371. if (mdata->dev_comp->need_pad_sel)
  372. writel(mdata->pad_sel[spi->chip_select],
  373. mdata->base + SPI_PAD_SEL_REG);
  374. /* tick delay */
  375. if (mdata->dev_comp->enhance_timing) {
  376. if (mdata->dev_comp->ipm_design) {
  377. reg_val = readl(mdata->base + SPI_CMD_REG);
  378. reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
  379. reg_val |= ((chip_config->tick_delay & 0x7)
  380. << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
  381. writel(reg_val, mdata->base + SPI_CMD_REG);
  382. } else {
  383. reg_val = readl(mdata->base + SPI_CFG1_REG);
  384. reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
  385. reg_val |= ((chip_config->tick_delay & 0x7)
  386. << SPI_CFG1_GET_TICK_DLY_OFFSET);
  387. writel(reg_val, mdata->base + SPI_CFG1_REG);
  388. }
  389. } else {
  390. reg_val = readl(mdata->base + SPI_CFG1_REG);
  391. reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
  392. reg_val |= ((chip_config->tick_delay & 0x3)
  393. << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
  394. writel(reg_val, mdata->base + SPI_CFG1_REG);
  395. }
  396. /* set hw cs timing */
  397. mtk_spi_set_hw_cs_timing(spi);
  398. return 0;
  399. }
  400. static int mtk_spi_prepare_message(struct spi_master *master,
  401. struct spi_message *msg)
  402. {
  403. return mtk_spi_hw_init(master, msg->spi);
  404. }
  405. static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
  406. {
  407. u32 reg_val;
  408. struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
  409. if (spi->mode & SPI_CS_HIGH)
  410. enable = !enable;
  411. reg_val = readl(mdata->base + SPI_CMD_REG);
  412. if (!enable) {
  413. reg_val |= SPI_CMD_PAUSE_EN;
  414. writel(reg_val, mdata->base + SPI_CMD_REG);
  415. } else {
  416. reg_val &= ~SPI_CMD_PAUSE_EN;
  417. writel(reg_val, mdata->base + SPI_CMD_REG);
  418. mdata->state = MTK_SPI_IDLE;
  419. mtk_spi_reset(mdata);
  420. }
  421. }
  422. static void mtk_spi_prepare_transfer(struct spi_master *master,
  423. u32 speed_hz)
  424. {
  425. u32 div, sck_time, reg_val;
  426. struct mtk_spi *mdata = spi_master_get_devdata(master);
  427. if (speed_hz < mdata->spi_clk_hz / 2)
  428. div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
  429. else
  430. div = 1;
  431. sck_time = (div + 1) / 2;
  432. if (mdata->dev_comp->enhance_timing) {
  433. reg_val = readl(mdata->base + SPI_CFG2_REG);
  434. reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
  435. reg_val |= (((sck_time - 1) & 0xffff)
  436. << SPI_CFG2_SCK_HIGH_OFFSET);
  437. reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
  438. reg_val |= (((sck_time - 1) & 0xffff)
  439. << SPI_CFG2_SCK_LOW_OFFSET);
  440. writel(reg_val, mdata->base + SPI_CFG2_REG);
  441. } else {
  442. reg_val = readl(mdata->base + SPI_CFG0_REG);
  443. reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
  444. reg_val |= (((sck_time - 1) & 0xff)
  445. << SPI_CFG0_SCK_HIGH_OFFSET);
  446. reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
  447. reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
  448. writel(reg_val, mdata->base + SPI_CFG0_REG);
  449. }
  450. }
  451. static void mtk_spi_setup_packet(struct spi_master *master)
  452. {
  453. u32 packet_size, packet_loop, reg_val;
  454. struct mtk_spi *mdata = spi_master_get_devdata(master);
  455. if (mdata->dev_comp->ipm_design)
  456. packet_size = min_t(u32,
  457. mdata->xfer_len,
  458. MTK_SPI_IPM_PACKET_SIZE);
  459. else
  460. packet_size = min_t(u32,
  461. mdata->xfer_len,
  462. MTK_SPI_PACKET_SIZE);
  463. packet_loop = mdata->xfer_len / packet_size;
  464. reg_val = readl(mdata->base + SPI_CFG1_REG);
  465. if (mdata->dev_comp->ipm_design)
  466. reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
  467. else
  468. reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
  469. reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
  470. reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
  471. reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
  472. writel(reg_val, mdata->base + SPI_CFG1_REG);
  473. }
  474. static void mtk_spi_enable_transfer(struct spi_master *master)
  475. {
  476. u32 cmd;
  477. struct mtk_spi *mdata = spi_master_get_devdata(master);
  478. cmd = readl(mdata->base + SPI_CMD_REG);
  479. if (mdata->state == MTK_SPI_IDLE)
  480. cmd |= SPI_CMD_ACT;
  481. else
  482. cmd |= SPI_CMD_RESUME;
  483. writel(cmd, mdata->base + SPI_CMD_REG);
  484. }
  485. static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
  486. {
  487. u32 mult_delta = 0;
  488. if (mdata->dev_comp->ipm_design) {
  489. if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
  490. mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
  491. } else {
  492. if (xfer_len > MTK_SPI_PACKET_SIZE)
  493. mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
  494. }
  495. return mult_delta;
  496. }
  497. static void mtk_spi_update_mdata_len(struct spi_master *master)
  498. {
  499. int mult_delta;
  500. struct mtk_spi *mdata = spi_master_get_devdata(master);
  501. if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
  502. if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
  503. mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
  504. mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
  505. mdata->rx_sgl_len = mult_delta;
  506. mdata->tx_sgl_len -= mdata->xfer_len;
  507. } else {
  508. mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
  509. mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
  510. mdata->tx_sgl_len = mult_delta;
  511. mdata->rx_sgl_len -= mdata->xfer_len;
  512. }
  513. } else if (mdata->tx_sgl_len) {
  514. mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
  515. mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
  516. mdata->tx_sgl_len = mult_delta;
  517. } else if (mdata->rx_sgl_len) {
  518. mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
  519. mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
  520. mdata->rx_sgl_len = mult_delta;
  521. }
  522. }
  523. static void mtk_spi_setup_dma_addr(struct spi_master *master,
  524. struct spi_transfer *xfer)
  525. {
  526. struct mtk_spi *mdata = spi_master_get_devdata(master);
  527. if (mdata->tx_sgl) {
  528. writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
  529. mdata->base + SPI_TX_SRC_REG);
  530. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  531. if (mdata->dev_comp->dma_ext)
  532. writel((u32)(xfer->tx_dma >> 32),
  533. mdata->base + SPI_TX_SRC_REG_64);
  534. #endif
  535. }
  536. if (mdata->rx_sgl) {
  537. writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
  538. mdata->base + SPI_RX_DST_REG);
  539. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  540. if (mdata->dev_comp->dma_ext)
  541. writel((u32)(xfer->rx_dma >> 32),
  542. mdata->base + SPI_RX_DST_REG_64);
  543. #endif
  544. }
  545. }
  546. static int mtk_spi_fifo_transfer(struct spi_master *master,
  547. struct spi_device *spi,
  548. struct spi_transfer *xfer)
  549. {
  550. int cnt, remainder;
  551. u32 reg_val;
  552. struct mtk_spi *mdata = spi_master_get_devdata(master);
  553. mdata->cur_transfer = xfer;
  554. mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
  555. mdata->num_xfered = 0;
  556. mtk_spi_prepare_transfer(master, xfer->speed_hz);
  557. mtk_spi_setup_packet(master);
  558. if (xfer->tx_buf) {
  559. cnt = xfer->len / 4;
  560. iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
  561. remainder = xfer->len % 4;
  562. if (remainder > 0) {
  563. reg_val = 0;
  564. memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
  565. writel(reg_val, mdata->base + SPI_TX_DATA_REG);
  566. }
  567. }
  568. mtk_spi_enable_transfer(master);
  569. return 1;
  570. }
  571. static int mtk_spi_dma_transfer(struct spi_master *master,
  572. struct spi_device *spi,
  573. struct spi_transfer *xfer)
  574. {
  575. int cmd;
  576. struct mtk_spi *mdata = spi_master_get_devdata(master);
  577. mdata->tx_sgl = NULL;
  578. mdata->rx_sgl = NULL;
  579. mdata->tx_sgl_len = 0;
  580. mdata->rx_sgl_len = 0;
  581. mdata->cur_transfer = xfer;
  582. mdata->num_xfered = 0;
  583. mtk_spi_prepare_transfer(master, xfer->speed_hz);
  584. cmd = readl(mdata->base + SPI_CMD_REG);
  585. if (xfer->tx_buf)
  586. cmd |= SPI_CMD_TX_DMA;
  587. if (xfer->rx_buf)
  588. cmd |= SPI_CMD_RX_DMA;
  589. writel(cmd, mdata->base + SPI_CMD_REG);
  590. if (xfer->tx_buf)
  591. mdata->tx_sgl = xfer->tx_sg.sgl;
  592. if (xfer->rx_buf)
  593. mdata->rx_sgl = xfer->rx_sg.sgl;
  594. if (mdata->tx_sgl) {
  595. xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
  596. mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
  597. }
  598. if (mdata->rx_sgl) {
  599. xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
  600. mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
  601. }
  602. mtk_spi_update_mdata_len(master);
  603. mtk_spi_setup_packet(master);
  604. mtk_spi_setup_dma_addr(master, xfer);
  605. mtk_spi_enable_transfer(master);
  606. return 1;
  607. }
  608. static int mtk_spi_transfer_one(struct spi_master *master,
  609. struct spi_device *spi,
  610. struct spi_transfer *xfer)
  611. {
  612. struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
  613. u32 reg_val = 0;
  614. /* prepare xfer direction and duplex mode */
  615. if (mdata->dev_comp->ipm_design) {
  616. if (!xfer->tx_buf || !xfer->rx_buf) {
  617. reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
  618. if (xfer->rx_buf)
  619. reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
  620. }
  621. writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
  622. }
  623. if (master->can_dma(master, spi, xfer))
  624. return mtk_spi_dma_transfer(master, spi, xfer);
  625. else
  626. return mtk_spi_fifo_transfer(master, spi, xfer);
  627. }
  628. static bool mtk_spi_can_dma(struct spi_master *master,
  629. struct spi_device *spi,
  630. struct spi_transfer *xfer)
  631. {
  632. /* Buffers for DMA transactions must be 4-byte aligned */
  633. return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
  634. (unsigned long)xfer->tx_buf % 4 == 0 &&
  635. (unsigned long)xfer->rx_buf % 4 == 0);
  636. }
  637. static int mtk_spi_setup(struct spi_device *spi)
  638. {
  639. struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
  640. if (!spi->controller_data)
  641. spi->controller_data = (void *)&mtk_default_chip_info;
  642. if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
  643. /* CS de-asserted, gpiolib will handle inversion */
  644. gpiod_direction_output(spi->cs_gpiod, 0);
  645. return 0;
  646. }
  647. static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
  648. {
  649. u32 cmd, reg_val, cnt, remainder, len;
  650. struct spi_master *master = dev_id;
  651. struct mtk_spi *mdata = spi_master_get_devdata(master);
  652. struct spi_transfer *trans = mdata->cur_transfer;
  653. reg_val = readl(mdata->base + SPI_STATUS0_REG);
  654. if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
  655. mdata->state = MTK_SPI_PAUSED;
  656. else
  657. mdata->state = MTK_SPI_IDLE;
  658. /* SPI-MEM ops */
  659. if (mdata->use_spimem) {
  660. complete(&mdata->spimem_done);
  661. return IRQ_HANDLED;
  662. }
  663. if (!master->can_dma(master, NULL, trans)) {
  664. if (trans->rx_buf) {
  665. cnt = mdata->xfer_len / 4;
  666. ioread32_rep(mdata->base + SPI_RX_DATA_REG,
  667. trans->rx_buf + mdata->num_xfered, cnt);
  668. remainder = mdata->xfer_len % 4;
  669. if (remainder > 0) {
  670. reg_val = readl(mdata->base + SPI_RX_DATA_REG);
  671. memcpy(trans->rx_buf +
  672. mdata->num_xfered +
  673. (cnt * 4),
  674. &reg_val,
  675. remainder);
  676. }
  677. }
  678. mdata->num_xfered += mdata->xfer_len;
  679. if (mdata->num_xfered == trans->len) {
  680. spi_finalize_current_transfer(master);
  681. return IRQ_HANDLED;
  682. }
  683. len = trans->len - mdata->num_xfered;
  684. mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
  685. mtk_spi_setup_packet(master);
  686. cnt = mdata->xfer_len / 4;
  687. iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
  688. trans->tx_buf + mdata->num_xfered, cnt);
  689. remainder = mdata->xfer_len % 4;
  690. if (remainder > 0) {
  691. reg_val = 0;
  692. memcpy(&reg_val,
  693. trans->tx_buf + (cnt * 4) + mdata->num_xfered,
  694. remainder);
  695. writel(reg_val, mdata->base + SPI_TX_DATA_REG);
  696. }
  697. mtk_spi_enable_transfer(master);
  698. return IRQ_HANDLED;
  699. }
  700. if (mdata->tx_sgl)
  701. trans->tx_dma += mdata->xfer_len;
  702. if (mdata->rx_sgl)
  703. trans->rx_dma += mdata->xfer_len;
  704. if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
  705. mdata->tx_sgl = sg_next(mdata->tx_sgl);
  706. if (mdata->tx_sgl) {
  707. trans->tx_dma = sg_dma_address(mdata->tx_sgl);
  708. mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
  709. }
  710. }
  711. if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
  712. mdata->rx_sgl = sg_next(mdata->rx_sgl);
  713. if (mdata->rx_sgl) {
  714. trans->rx_dma = sg_dma_address(mdata->rx_sgl);
  715. mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
  716. }
  717. }
  718. if (!mdata->tx_sgl && !mdata->rx_sgl) {
  719. /* spi disable dma */
  720. cmd = readl(mdata->base + SPI_CMD_REG);
  721. cmd &= ~SPI_CMD_TX_DMA;
  722. cmd &= ~SPI_CMD_RX_DMA;
  723. writel(cmd, mdata->base + SPI_CMD_REG);
  724. spi_finalize_current_transfer(master);
  725. return IRQ_HANDLED;
  726. }
  727. mtk_spi_update_mdata_len(master);
  728. mtk_spi_setup_packet(master);
  729. mtk_spi_setup_dma_addr(master, trans);
  730. mtk_spi_enable_transfer(master);
  731. return IRQ_HANDLED;
  732. }
  733. static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
  734. struct spi_mem_op *op)
  735. {
  736. int opcode_len;
  737. if (op->data.dir != SPI_MEM_NO_DATA) {
  738. opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
  739. if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
  740. op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
  741. /* force data buffer dma-aligned. */
  742. op->data.nbytes -= op->data.nbytes % 4;
  743. }
  744. }
  745. return 0;
  746. }
  747. static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
  748. const struct spi_mem_op *op)
  749. {
  750. if (!spi_mem_default_supports_op(mem, op))
  751. return false;
  752. if (op->addr.nbytes && op->dummy.nbytes &&
  753. op->addr.buswidth != op->dummy.buswidth)
  754. return false;
  755. if (op->addr.nbytes + op->dummy.nbytes > 16)
  756. return false;
  757. if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
  758. if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
  759. MTK_SPI_IPM_PACKET_LOOP ||
  760. op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
  761. return false;
  762. }
  763. return true;
  764. }
  765. static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
  766. const struct spi_mem_op *op)
  767. {
  768. struct mtk_spi *mdata = spi_master_get_devdata(master);
  769. writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
  770. mdata->base + SPI_TX_SRC_REG);
  771. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  772. if (mdata->dev_comp->dma_ext)
  773. writel((u32)(mdata->tx_dma >> 32),
  774. mdata->base + SPI_TX_SRC_REG_64);
  775. #endif
  776. if (op->data.dir == SPI_MEM_DATA_IN) {
  777. writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
  778. mdata->base + SPI_RX_DST_REG);
  779. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  780. if (mdata->dev_comp->dma_ext)
  781. writel((u32)(mdata->rx_dma >> 32),
  782. mdata->base + SPI_RX_DST_REG_64);
  783. #endif
  784. }
  785. }
  786. static int mtk_spi_transfer_wait(struct spi_mem *mem,
  787. const struct spi_mem_op *op)
  788. {
  789. struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
  790. /*
  791. * For each byte we wait for 8 cycles of the SPI clock.
  792. * Since speed is defined in Hz and we want milliseconds,
  793. * so it should be 8 * 1000.
  794. */
  795. u64 ms = 8000LL;
  796. if (op->data.dir == SPI_MEM_NO_DATA)
  797. ms *= 32; /* prevent we may get 0 for short transfers. */
  798. else
  799. ms *= op->data.nbytes;
  800. ms = div_u64(ms, mem->spi->max_speed_hz);
  801. ms += ms + 1000; /* 1s tolerance */
  802. if (ms > UINT_MAX)
  803. ms = UINT_MAX;
  804. if (!wait_for_completion_timeout(&mdata->spimem_done,
  805. msecs_to_jiffies(ms))) {
  806. dev_err(mdata->dev, "spi-mem transfer timeout\n");
  807. return -ETIMEDOUT;
  808. }
  809. return 0;
  810. }
  811. static int mtk_spi_mem_exec_op(struct spi_mem *mem,
  812. const struct spi_mem_op *op)
  813. {
  814. struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
  815. u32 reg_val, nio, tx_size;
  816. char *tx_tmp_buf, *rx_tmp_buf;
  817. int ret = 0;
  818. mdata->use_spimem = true;
  819. reinit_completion(&mdata->spimem_done);
  820. mtk_spi_reset(mdata);
  821. mtk_spi_hw_init(mem->spi->master, mem->spi);
  822. mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
  823. reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
  824. /* opcode byte len */
  825. reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
  826. reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
  827. /* addr & dummy byte len */
  828. reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
  829. if (op->addr.nbytes || op->dummy.nbytes)
  830. reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
  831. SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
  832. /* data byte len */
  833. if (op->data.dir == SPI_MEM_NO_DATA) {
  834. reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
  835. writel(0, mdata->base + SPI_CFG1_REG);
  836. } else {
  837. reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
  838. mdata->xfer_len = op->data.nbytes;
  839. mtk_spi_setup_packet(mem->spi->master);
  840. }
  841. if (op->addr.nbytes || op->dummy.nbytes) {
  842. if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
  843. reg_val |= SPI_CFG3_IPM_XMODE_EN;
  844. else
  845. reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
  846. }
  847. if (op->addr.buswidth == 2 ||
  848. op->dummy.buswidth == 2 ||
  849. op->data.buswidth == 2)
  850. nio = 2;
  851. else if (op->addr.buswidth == 4 ||
  852. op->dummy.buswidth == 4 ||
  853. op->data.buswidth == 4)
  854. nio = 4;
  855. else
  856. nio = 1;
  857. reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
  858. reg_val |= PIN_MODE_CFG(nio);
  859. reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
  860. if (op->data.dir == SPI_MEM_DATA_IN)
  861. reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
  862. else
  863. reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
  864. writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
  865. tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
  866. if (op->data.dir == SPI_MEM_DATA_OUT)
  867. tx_size += op->data.nbytes;
  868. tx_size = max_t(u32, tx_size, 32);
  869. tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
  870. if (!tx_tmp_buf) {
  871. mdata->use_spimem = false;
  872. return -ENOMEM;
  873. }
  874. tx_tmp_buf[0] = op->cmd.opcode;
  875. if (op->addr.nbytes) {
  876. int i;
  877. for (i = 0; i < op->addr.nbytes; i++)
  878. tx_tmp_buf[i + 1] = op->addr.val >>
  879. (8 * (op->addr.nbytes - i - 1));
  880. }
  881. if (op->dummy.nbytes)
  882. memset(tx_tmp_buf + op->addr.nbytes + 1,
  883. 0xff,
  884. op->dummy.nbytes);
  885. if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
  886. memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
  887. op->data.buf.out,
  888. op->data.nbytes);
  889. mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
  890. tx_size, DMA_TO_DEVICE);
  891. if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
  892. ret = -ENOMEM;
  893. goto err_exit;
  894. }
  895. if (op->data.dir == SPI_MEM_DATA_IN) {
  896. if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
  897. rx_tmp_buf = kzalloc(op->data.nbytes,
  898. GFP_KERNEL | GFP_DMA);
  899. if (!rx_tmp_buf) {
  900. ret = -ENOMEM;
  901. goto unmap_tx_dma;
  902. }
  903. } else {
  904. rx_tmp_buf = op->data.buf.in;
  905. }
  906. mdata->rx_dma = dma_map_single(mdata->dev,
  907. rx_tmp_buf,
  908. op->data.nbytes,
  909. DMA_FROM_DEVICE);
  910. if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
  911. ret = -ENOMEM;
  912. goto kfree_rx_tmp_buf;
  913. }
  914. }
  915. reg_val = readl(mdata->base + SPI_CMD_REG);
  916. reg_val |= SPI_CMD_TX_DMA;
  917. if (op->data.dir == SPI_MEM_DATA_IN)
  918. reg_val |= SPI_CMD_RX_DMA;
  919. writel(reg_val, mdata->base + SPI_CMD_REG);
  920. mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
  921. mtk_spi_enable_transfer(mem->spi->master);
  922. /* Wait for the interrupt. */
  923. ret = mtk_spi_transfer_wait(mem, op);
  924. if (ret)
  925. goto unmap_rx_dma;
  926. /* spi disable dma */
  927. reg_val = readl(mdata->base + SPI_CMD_REG);
  928. reg_val &= ~SPI_CMD_TX_DMA;
  929. if (op->data.dir == SPI_MEM_DATA_IN)
  930. reg_val &= ~SPI_CMD_RX_DMA;
  931. writel(reg_val, mdata->base + SPI_CMD_REG);
  932. unmap_rx_dma:
  933. if (op->data.dir == SPI_MEM_DATA_IN) {
  934. dma_unmap_single(mdata->dev, mdata->rx_dma,
  935. op->data.nbytes, DMA_FROM_DEVICE);
  936. if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
  937. memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
  938. }
  939. kfree_rx_tmp_buf:
  940. if (op->data.dir == SPI_MEM_DATA_IN &&
  941. !IS_ALIGNED((size_t)op->data.buf.in, 4))
  942. kfree(rx_tmp_buf);
  943. unmap_tx_dma:
  944. dma_unmap_single(mdata->dev, mdata->tx_dma,
  945. tx_size, DMA_TO_DEVICE);
  946. err_exit:
  947. kfree(tx_tmp_buf);
  948. mdata->use_spimem = false;
  949. return ret;
  950. }
  951. static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
  952. .adjust_op_size = mtk_spi_mem_adjust_op_size,
  953. .supports_op = mtk_spi_mem_supports_op,
  954. .exec_op = mtk_spi_mem_exec_op,
  955. };
  956. static int mtk_spi_probe(struct platform_device *pdev)
  957. {
  958. struct device *dev = &pdev->dev;
  959. struct spi_master *master;
  960. struct mtk_spi *mdata;
  961. int i, irq, ret, addr_bits;
  962. master = devm_spi_alloc_master(dev, sizeof(*mdata));
  963. if (!master)
  964. return dev_err_probe(dev, -ENOMEM, "failed to alloc spi master\n");
  965. master->auto_runtime_pm = true;
  966. master->dev.of_node = dev->of_node;
  967. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
  968. master->set_cs = mtk_spi_set_cs;
  969. master->prepare_message = mtk_spi_prepare_message;
  970. master->transfer_one = mtk_spi_transfer_one;
  971. master->can_dma = mtk_spi_can_dma;
  972. master->setup = mtk_spi_setup;
  973. master->set_cs_timing = mtk_spi_set_hw_cs_timing;
  974. master->use_gpio_descriptors = true;
  975. mdata = spi_master_get_devdata(master);
  976. mdata->dev_comp = device_get_match_data(dev);
  977. if (mdata->dev_comp->enhance_timing)
  978. master->mode_bits |= SPI_CS_HIGH;
  979. if (mdata->dev_comp->must_tx)
  980. master->flags = SPI_MASTER_MUST_TX;
  981. if (mdata->dev_comp->ipm_design)
  982. master->mode_bits |= SPI_LOOP;
  983. if (mdata->dev_comp->ipm_design) {
  984. mdata->dev = dev;
  985. master->mem_ops = &mtk_spi_mem_ops;
  986. init_completion(&mdata->spimem_done);
  987. }
  988. if (mdata->dev_comp->need_pad_sel) {
  989. mdata->pad_num = of_property_count_u32_elems(dev->of_node,
  990. "mediatek,pad-select");
  991. if (mdata->pad_num < 0)
  992. return dev_err_probe(dev, -EINVAL,
  993. "No 'mediatek,pad-select' property\n");
  994. mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
  995. sizeof(u32), GFP_KERNEL);
  996. if (!mdata->pad_sel)
  997. return -ENOMEM;
  998. for (i = 0; i < mdata->pad_num; i++) {
  999. of_property_read_u32_index(dev->of_node,
  1000. "mediatek,pad-select",
  1001. i, &mdata->pad_sel[i]);
  1002. if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
  1003. return dev_err_probe(dev, -EINVAL,
  1004. "wrong pad-sel[%d]: %u\n",
  1005. i, mdata->pad_sel[i]);
  1006. }
  1007. }
  1008. platform_set_drvdata(pdev, master);
  1009. mdata->base = devm_platform_ioremap_resource(pdev, 0);
  1010. if (IS_ERR(mdata->base))
  1011. return PTR_ERR(mdata->base);
  1012. irq = platform_get_irq(pdev, 0);
  1013. if (irq < 0)
  1014. return irq;
  1015. if (!dev->dma_mask)
  1016. dev->dma_mask = &dev->coherent_dma_mask;
  1017. if (mdata->dev_comp->ipm_design)
  1018. dma_set_max_seg_size(dev, SZ_16M);
  1019. else
  1020. dma_set_max_seg_size(dev, SZ_256K);
  1021. mdata->parent_clk = devm_clk_get(dev, "parent-clk");
  1022. if (IS_ERR(mdata->parent_clk))
  1023. return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
  1024. "failed to get parent-clk\n");
  1025. mdata->sel_clk = devm_clk_get(dev, "sel-clk");
  1026. if (IS_ERR(mdata->sel_clk))
  1027. return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
  1028. mdata->spi_clk = devm_clk_get(dev, "spi-clk");
  1029. if (IS_ERR(mdata->spi_clk))
  1030. return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
  1031. mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
  1032. if (IS_ERR(mdata->spi_hclk))
  1033. return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
  1034. ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
  1035. if (ret < 0)
  1036. return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
  1037. ret = clk_prepare_enable(mdata->spi_hclk);
  1038. if (ret < 0)
  1039. return dev_err_probe(dev, ret, "failed to enable hclk\n");
  1040. ret = clk_prepare_enable(mdata->spi_clk);
  1041. if (ret < 0) {
  1042. clk_disable_unprepare(mdata->spi_hclk);
  1043. return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
  1044. }
  1045. mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
  1046. if (mdata->dev_comp->no_need_unprepare) {
  1047. clk_disable(mdata->spi_clk);
  1048. clk_disable(mdata->spi_hclk);
  1049. } else {
  1050. clk_disable_unprepare(mdata->spi_clk);
  1051. clk_disable_unprepare(mdata->spi_hclk);
  1052. }
  1053. if (mdata->dev_comp->need_pad_sel) {
  1054. if (mdata->pad_num != master->num_chipselect)
  1055. return dev_err_probe(dev, -EINVAL,
  1056. "pad_num does not match num_chipselect(%d != %d)\n",
  1057. mdata->pad_num, master->num_chipselect);
  1058. if (!master->cs_gpiods && master->num_chipselect > 1)
  1059. return dev_err_probe(dev, -EINVAL,
  1060. "cs_gpios not specified and num_chipselect > 1\n");
  1061. }
  1062. if (mdata->dev_comp->dma_ext)
  1063. addr_bits = DMA_ADDR_EXT_BITS;
  1064. else
  1065. addr_bits = DMA_ADDR_DEF_BITS;
  1066. ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
  1067. if (ret)
  1068. dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
  1069. addr_bits, ret);
  1070. ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
  1071. IRQF_TRIGGER_NONE, dev_name(dev), master);
  1072. if (ret)
  1073. return dev_err_probe(dev, ret, "failed to register irq\n");
  1074. pm_runtime_enable(dev);
  1075. ret = devm_spi_register_master(dev, master);
  1076. if (ret) {
  1077. pm_runtime_disable(dev);
  1078. return dev_err_probe(dev, ret, "failed to register master\n");
  1079. }
  1080. return 0;
  1081. }
  1082. static int mtk_spi_remove(struct platform_device *pdev)
  1083. {
  1084. struct spi_master *master = platform_get_drvdata(pdev);
  1085. struct mtk_spi *mdata = spi_master_get_devdata(master);
  1086. int ret;
  1087. if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
  1088. complete(&mdata->spimem_done);
  1089. ret = pm_runtime_resume_and_get(&pdev->dev);
  1090. if (ret < 0)
  1091. return ret;
  1092. mtk_spi_reset(mdata);
  1093. if (mdata->dev_comp->no_need_unprepare) {
  1094. clk_unprepare(mdata->spi_clk);
  1095. clk_unprepare(mdata->spi_hclk);
  1096. }
  1097. pm_runtime_put_noidle(&pdev->dev);
  1098. pm_runtime_disable(&pdev->dev);
  1099. return 0;
  1100. }
  1101. #ifdef CONFIG_PM_SLEEP
  1102. static int mtk_spi_suspend(struct device *dev)
  1103. {
  1104. int ret;
  1105. struct spi_master *master = dev_get_drvdata(dev);
  1106. struct mtk_spi *mdata = spi_master_get_devdata(master);
  1107. ret = spi_master_suspend(master);
  1108. if (ret)
  1109. return ret;
  1110. if (!pm_runtime_suspended(dev)) {
  1111. clk_disable_unprepare(mdata->spi_clk);
  1112. clk_disable_unprepare(mdata->spi_hclk);
  1113. }
  1114. return ret;
  1115. }
  1116. static int mtk_spi_resume(struct device *dev)
  1117. {
  1118. int ret;
  1119. struct spi_master *master = dev_get_drvdata(dev);
  1120. struct mtk_spi *mdata = spi_master_get_devdata(master);
  1121. if (!pm_runtime_suspended(dev)) {
  1122. ret = clk_prepare_enable(mdata->spi_clk);
  1123. if (ret < 0) {
  1124. dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
  1125. return ret;
  1126. }
  1127. ret = clk_prepare_enable(mdata->spi_hclk);
  1128. if (ret < 0) {
  1129. dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
  1130. clk_disable_unprepare(mdata->spi_clk);
  1131. return ret;
  1132. }
  1133. }
  1134. ret = spi_master_resume(master);
  1135. if (ret < 0) {
  1136. clk_disable_unprepare(mdata->spi_clk);
  1137. clk_disable_unprepare(mdata->spi_hclk);
  1138. }
  1139. return ret;
  1140. }
  1141. #endif /* CONFIG_PM_SLEEP */
  1142. #ifdef CONFIG_PM
  1143. static int mtk_spi_runtime_suspend(struct device *dev)
  1144. {
  1145. struct spi_master *master = dev_get_drvdata(dev);
  1146. struct mtk_spi *mdata = spi_master_get_devdata(master);
  1147. if (mdata->dev_comp->no_need_unprepare) {
  1148. clk_disable(mdata->spi_clk);
  1149. clk_disable(mdata->spi_hclk);
  1150. } else {
  1151. clk_disable_unprepare(mdata->spi_clk);
  1152. clk_disable_unprepare(mdata->spi_hclk);
  1153. }
  1154. return 0;
  1155. }
  1156. static int mtk_spi_runtime_resume(struct device *dev)
  1157. {
  1158. struct spi_master *master = dev_get_drvdata(dev);
  1159. struct mtk_spi *mdata = spi_master_get_devdata(master);
  1160. int ret;
  1161. if (mdata->dev_comp->no_need_unprepare) {
  1162. ret = clk_enable(mdata->spi_clk);
  1163. if (ret < 0) {
  1164. dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
  1165. return ret;
  1166. }
  1167. ret = clk_enable(mdata->spi_hclk);
  1168. if (ret < 0) {
  1169. dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
  1170. clk_disable(mdata->spi_clk);
  1171. return ret;
  1172. }
  1173. } else {
  1174. ret = clk_prepare_enable(mdata->spi_clk);
  1175. if (ret < 0) {
  1176. dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
  1177. return ret;
  1178. }
  1179. ret = clk_prepare_enable(mdata->spi_hclk);
  1180. if (ret < 0) {
  1181. dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
  1182. clk_disable_unprepare(mdata->spi_clk);
  1183. return ret;
  1184. }
  1185. }
  1186. return 0;
  1187. }
  1188. #endif /* CONFIG_PM */
  1189. static const struct dev_pm_ops mtk_spi_pm = {
  1190. SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
  1191. SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
  1192. mtk_spi_runtime_resume, NULL)
  1193. };
  1194. static struct platform_driver mtk_spi_driver = {
  1195. .driver = {
  1196. .name = "mtk-spi",
  1197. .pm = &mtk_spi_pm,
  1198. .of_match_table = mtk_spi_of_match,
  1199. },
  1200. .probe = mtk_spi_probe,
  1201. .remove = mtk_spi_remove,
  1202. };
  1203. module_platform_driver(mtk_spi_driver);
  1204. MODULE_DESCRIPTION("MTK SPI Controller driver");
  1205. MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
  1206. MODULE_LICENSE("GPL v2");
  1207. MODULE_ALIAS("platform:mtk-spi");