spi-cadence-quadspi.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. //
  3. // Driver for Cadence QSPI Controller
  4. //
  5. // Copyright Altera Corporation (C) 2012-2014. All rights reserved.
  6. // Copyright Intel Corporation (C) 2019-2020. All rights reserved.
  7. // Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
  8. #include <linux/clk.h>
  9. #include <linux/completion.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/err.h>
  14. #include <linux/errno.h>
  15. #include <linux/firmware/xlnx-zynqmp.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/iopoll.h>
  19. #include <linux/jiffies.h>
  20. #include <linux/kernel.h>
  21. #include <linux/log2.h>
  22. #include <linux/module.h>
  23. #include <linux/of_device.h>
  24. #include <linux/of.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/reset.h>
  28. #include <linux/sched.h>
  29. #include <linux/spi/spi.h>
  30. #include <linux/spi/spi-mem.h>
  31. #include <linux/timer.h>
  32. #define CQSPI_NAME "cadence-qspi"
  33. #define CQSPI_MAX_CHIPSELECT 16
  34. /* Quirks */
  35. #define CQSPI_NEEDS_WR_DELAY BIT(0)
  36. #define CQSPI_DISABLE_DAC_MODE BIT(1)
  37. #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
  38. #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
  39. #define CQSPI_SLOW_SRAM BIT(4)
  40. /* Capabilities */
  41. #define CQSPI_SUPPORTS_OCTAL BIT(0)
  42. #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
  43. struct cqspi_st;
  44. struct cqspi_flash_pdata {
  45. struct cqspi_st *cqspi;
  46. u32 clk_rate;
  47. u32 read_delay;
  48. u32 tshsl_ns;
  49. u32 tsd2d_ns;
  50. u32 tchsh_ns;
  51. u32 tslch_ns;
  52. u8 cs;
  53. };
  54. struct cqspi_st {
  55. struct platform_device *pdev;
  56. struct spi_master *master;
  57. struct clk *clk;
  58. unsigned int sclk;
  59. void __iomem *iobase;
  60. void __iomem *ahb_base;
  61. resource_size_t ahb_size;
  62. struct completion transfer_complete;
  63. struct dma_chan *rx_chan;
  64. struct completion rx_dma_complete;
  65. dma_addr_t mmap_phys_base;
  66. int current_cs;
  67. unsigned long master_ref_clk_hz;
  68. bool is_decoded_cs;
  69. u32 fifo_depth;
  70. u32 fifo_width;
  71. u32 num_chipselect;
  72. bool rclk_en;
  73. u32 trigger_address;
  74. u32 wr_delay;
  75. bool use_direct_mode;
  76. struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
  77. bool use_dma_read;
  78. u32 pd_dev_id;
  79. bool wr_completion;
  80. bool slow_sram;
  81. };
  82. struct cqspi_driver_platdata {
  83. u32 hwcaps_mask;
  84. u8 quirks;
  85. int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
  86. u_char *rxbuf, loff_t from_addr, size_t n_rx);
  87. u32 (*get_dma_status)(struct cqspi_st *cqspi);
  88. };
  89. /* Operation timeout value */
  90. #define CQSPI_TIMEOUT_MS 500
  91. #define CQSPI_READ_TIMEOUT_MS 10
  92. #define CQSPI_DUMMY_CLKS_PER_BYTE 8
  93. #define CQSPI_DUMMY_BYTES_MAX 4
  94. #define CQSPI_DUMMY_CLKS_MAX 31
  95. #define CQSPI_STIG_DATA_LEN_MAX 8
  96. /* Register map */
  97. #define CQSPI_REG_CONFIG 0x00
  98. #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
  99. #define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
  100. #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
  101. #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
  102. #define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
  103. #define CQSPI_REG_CONFIG_BAUD_LSB 19
  104. #define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
  105. #define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
  106. #define CQSPI_REG_CONFIG_IDLE_LSB 31
  107. #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
  108. #define CQSPI_REG_CONFIG_BAUD_MASK 0xF
  109. #define CQSPI_REG_RD_INSTR 0x04
  110. #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
  111. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
  112. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
  113. #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
  114. #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
  115. #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
  116. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
  117. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
  118. #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
  119. #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
  120. #define CQSPI_REG_WR_INSTR 0x08
  121. #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
  122. #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
  123. #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
  124. #define CQSPI_REG_DELAY 0x0C
  125. #define CQSPI_REG_DELAY_TSLCH_LSB 0
  126. #define CQSPI_REG_DELAY_TCHSH_LSB 8
  127. #define CQSPI_REG_DELAY_TSD2D_LSB 16
  128. #define CQSPI_REG_DELAY_TSHSL_LSB 24
  129. #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
  130. #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
  131. #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
  132. #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
  133. #define CQSPI_REG_READCAPTURE 0x10
  134. #define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
  135. #define CQSPI_REG_READCAPTURE_DELAY_LSB 1
  136. #define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
  137. #define CQSPI_REG_SIZE 0x14
  138. #define CQSPI_REG_SIZE_ADDRESS_LSB 0
  139. #define CQSPI_REG_SIZE_PAGE_LSB 4
  140. #define CQSPI_REG_SIZE_BLOCK_LSB 16
  141. #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
  142. #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
  143. #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
  144. #define CQSPI_REG_SRAMPARTITION 0x18
  145. #define CQSPI_REG_INDIRECTTRIGGER 0x1C
  146. #define CQSPI_REG_DMA 0x20
  147. #define CQSPI_REG_DMA_SINGLE_LSB 0
  148. #define CQSPI_REG_DMA_BURST_LSB 8
  149. #define CQSPI_REG_DMA_SINGLE_MASK 0xFF
  150. #define CQSPI_REG_DMA_BURST_MASK 0xFF
  151. #define CQSPI_REG_REMAP 0x24
  152. #define CQSPI_REG_MODE_BIT 0x28
  153. #define CQSPI_REG_SDRAMLEVEL 0x2C
  154. #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
  155. #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
  156. #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
  157. #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
  158. #define CQSPI_REG_WR_COMPLETION_CTRL 0x38
  159. #define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
  160. #define CQSPI_REG_IRQSTATUS 0x40
  161. #define CQSPI_REG_IRQMASK 0x44
  162. #define CQSPI_REG_INDIRECTRD 0x60
  163. #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
  164. #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
  165. #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
  166. #define CQSPI_REG_INDIRECTRDWATERMARK 0x64
  167. #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
  168. #define CQSPI_REG_INDIRECTRDBYTES 0x6C
  169. #define CQSPI_REG_CMDCTRL 0x90
  170. #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
  171. #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
  172. #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
  173. #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
  174. #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
  175. #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
  176. #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
  177. #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
  178. #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
  179. #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
  180. #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
  181. #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
  182. #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
  183. #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
  184. #define CQSPI_REG_INDIRECTWR 0x70
  185. #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
  186. #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
  187. #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
  188. #define CQSPI_REG_INDIRECTWRWATERMARK 0x74
  189. #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
  190. #define CQSPI_REG_INDIRECTWRBYTES 0x7C
  191. #define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
  192. #define CQSPI_REG_CMDADDRESS 0x94
  193. #define CQSPI_REG_CMDREADDATALOWER 0xA0
  194. #define CQSPI_REG_CMDREADDATAUPPER 0xA4
  195. #define CQSPI_REG_CMDWRITEDATALOWER 0xA8
  196. #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
  197. #define CQSPI_REG_POLLING_STATUS 0xB0
  198. #define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16
  199. #define CQSPI_REG_OP_EXT_LOWER 0xE0
  200. #define CQSPI_REG_OP_EXT_READ_LSB 24
  201. #define CQSPI_REG_OP_EXT_WRITE_LSB 16
  202. #define CQSPI_REG_OP_EXT_STIG_LSB 0
  203. #define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000
  204. #define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800
  205. #define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804
  206. #define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C
  207. #define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814
  208. #define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818
  209. #define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C
  210. #define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1)
  211. #define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828
  212. #define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00
  213. #define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6
  214. /* Interrupt status bits */
  215. #define CQSPI_REG_IRQ_MODE_ERR BIT(0)
  216. #define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
  217. #define CQSPI_REG_IRQ_IND_COMP BIT(2)
  218. #define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
  219. #define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
  220. #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
  221. #define CQSPI_REG_IRQ_WATERMARK BIT(6)
  222. #define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
  223. #define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
  224. CQSPI_REG_IRQ_IND_SRAM_FULL | \
  225. CQSPI_REG_IRQ_IND_COMP)
  226. #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
  227. CQSPI_REG_IRQ_WATERMARK | \
  228. CQSPI_REG_IRQ_UNDERFLOW)
  229. #define CQSPI_IRQ_STATUS_MASK 0x1FFFF
  230. #define CQSPI_DMA_UNALIGN 0x3
  231. #define CQSPI_REG_VERSAL_DMA_VAL 0x602
  232. static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
  233. {
  234. u32 val;
  235. return readl_relaxed_poll_timeout(reg, val,
  236. (((clr ? ~val : val) & mask) == mask),
  237. 10, CQSPI_TIMEOUT_MS * 1000);
  238. }
  239. static bool cqspi_is_idle(struct cqspi_st *cqspi)
  240. {
  241. u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
  242. return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB);
  243. }
  244. static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
  245. {
  246. u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
  247. reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
  248. return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
  249. }
  250. static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi)
  251. {
  252. u32 dma_status;
  253. dma_status = readl(cqspi->iobase +
  254. CQSPI_REG_VERSAL_DMA_DST_I_STS);
  255. writel(dma_status, cqspi->iobase +
  256. CQSPI_REG_VERSAL_DMA_DST_I_STS);
  257. return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK;
  258. }
  259. static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
  260. {
  261. struct cqspi_st *cqspi = dev;
  262. unsigned int irq_status;
  263. struct device *device = &cqspi->pdev->dev;
  264. const struct cqspi_driver_platdata *ddata;
  265. ddata = of_device_get_match_data(device);
  266. /* Read interrupt status */
  267. irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
  268. /* Clear interrupt */
  269. writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
  270. if (cqspi->use_dma_read && ddata && ddata->get_dma_status) {
  271. if (ddata->get_dma_status(cqspi)) {
  272. complete(&cqspi->transfer_complete);
  273. return IRQ_HANDLED;
  274. }
  275. }
  276. else if (!cqspi->slow_sram)
  277. irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
  278. else
  279. irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
  280. if (irq_status)
  281. complete(&cqspi->transfer_complete);
  282. return IRQ_HANDLED;
  283. }
  284. static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op)
  285. {
  286. u32 rdreg = 0;
  287. rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
  288. rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
  289. rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
  290. return rdreg;
  291. }
  292. static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op)
  293. {
  294. unsigned int dummy_clk;
  295. if (!op->dummy.nbytes)
  296. return 0;
  297. dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
  298. if (op->cmd.dtr)
  299. dummy_clk /= 2;
  300. return dummy_clk;
  301. }
  302. static int cqspi_wait_idle(struct cqspi_st *cqspi)
  303. {
  304. const unsigned int poll_idle_retry = 3;
  305. unsigned int count = 0;
  306. unsigned long timeout;
  307. timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
  308. while (1) {
  309. /*
  310. * Read few times in succession to ensure the controller
  311. * is indeed idle, that is, the bit does not transition
  312. * low again.
  313. */
  314. if (cqspi_is_idle(cqspi))
  315. count++;
  316. else
  317. count = 0;
  318. if (count >= poll_idle_retry)
  319. return 0;
  320. if (time_after(jiffies, timeout)) {
  321. /* Timeout, in busy mode. */
  322. dev_err(&cqspi->pdev->dev,
  323. "QSPI is still busy after %dms timeout.\n",
  324. CQSPI_TIMEOUT_MS);
  325. return -ETIMEDOUT;
  326. }
  327. cpu_relax();
  328. }
  329. }
  330. static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
  331. {
  332. void __iomem *reg_base = cqspi->iobase;
  333. int ret;
  334. /* Write the CMDCTRL without start execution. */
  335. writel(reg, reg_base + CQSPI_REG_CMDCTRL);
  336. /* Start execute */
  337. reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
  338. writel(reg, reg_base + CQSPI_REG_CMDCTRL);
  339. /* Polling for completion. */
  340. ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
  341. CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
  342. if (ret) {
  343. dev_err(&cqspi->pdev->dev,
  344. "Flash command execution timed out.\n");
  345. return ret;
  346. }
  347. /* Polling QSPI idle status. */
  348. return cqspi_wait_idle(cqspi);
  349. }
  350. static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
  351. const struct spi_mem_op *op,
  352. unsigned int shift)
  353. {
  354. struct cqspi_st *cqspi = f_pdata->cqspi;
  355. void __iomem *reg_base = cqspi->iobase;
  356. unsigned int reg;
  357. u8 ext;
  358. if (op->cmd.nbytes != 2)
  359. return -EINVAL;
  360. /* Opcode extension is the LSB. */
  361. ext = op->cmd.opcode & 0xff;
  362. reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER);
  363. reg &= ~(0xff << shift);
  364. reg |= ext << shift;
  365. writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER);
  366. return 0;
  367. }
  368. static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
  369. const struct spi_mem_op *op, unsigned int shift)
  370. {
  371. struct cqspi_st *cqspi = f_pdata->cqspi;
  372. void __iomem *reg_base = cqspi->iobase;
  373. unsigned int reg;
  374. int ret;
  375. reg = readl(reg_base + CQSPI_REG_CONFIG);
  376. /*
  377. * We enable dual byte opcode here. The callers have to set up the
  378. * extension opcode based on which type of operation it is.
  379. */
  380. if (op->cmd.dtr) {
  381. reg |= CQSPI_REG_CONFIG_DTR_PROTO;
  382. reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
  383. /* Set up command opcode extension. */
  384. ret = cqspi_setup_opcode_ext(f_pdata, op, shift);
  385. if (ret)
  386. return ret;
  387. } else {
  388. reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
  389. reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
  390. }
  391. writel(reg, reg_base + CQSPI_REG_CONFIG);
  392. return cqspi_wait_idle(cqspi);
  393. }
  394. static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
  395. const struct spi_mem_op *op)
  396. {
  397. struct cqspi_st *cqspi = f_pdata->cqspi;
  398. void __iomem *reg_base = cqspi->iobase;
  399. u8 *rxbuf = op->data.buf.in;
  400. u8 opcode;
  401. size_t n_rx = op->data.nbytes;
  402. unsigned int rdreg;
  403. unsigned int reg;
  404. unsigned int dummy_clk;
  405. size_t read_len;
  406. int status;
  407. status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
  408. if (status)
  409. return status;
  410. if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
  411. dev_err(&cqspi->pdev->dev,
  412. "Invalid input argument, len %zu rxbuf 0x%p\n",
  413. n_rx, rxbuf);
  414. return -EINVAL;
  415. }
  416. if (op->cmd.dtr)
  417. opcode = op->cmd.opcode >> 8;
  418. else
  419. opcode = op->cmd.opcode;
  420. reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  421. rdreg = cqspi_calc_rdreg(op);
  422. writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
  423. dummy_clk = cqspi_calc_dummy(op);
  424. if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
  425. return -EOPNOTSUPP;
  426. if (dummy_clk)
  427. reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
  428. << CQSPI_REG_CMDCTRL_DUMMY_LSB;
  429. reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
  430. /* 0 means 1 byte. */
  431. reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
  432. << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
  433. status = cqspi_exec_flash_cmd(cqspi, reg);
  434. if (status)
  435. return status;
  436. reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
  437. /* Put the read value into rx_buf */
  438. read_len = (n_rx > 4) ? 4 : n_rx;
  439. memcpy(rxbuf, &reg, read_len);
  440. rxbuf += read_len;
  441. if (n_rx > 4) {
  442. reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
  443. read_len = n_rx - read_len;
  444. memcpy(rxbuf, &reg, read_len);
  445. }
  446. return 0;
  447. }
  448. static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
  449. const struct spi_mem_op *op)
  450. {
  451. struct cqspi_st *cqspi = f_pdata->cqspi;
  452. void __iomem *reg_base = cqspi->iobase;
  453. u8 opcode;
  454. const u8 *txbuf = op->data.buf.out;
  455. size_t n_tx = op->data.nbytes;
  456. unsigned int reg;
  457. unsigned int data;
  458. size_t write_len;
  459. int ret;
  460. ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
  461. if (ret)
  462. return ret;
  463. if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
  464. dev_err(&cqspi->pdev->dev,
  465. "Invalid input argument, cmdlen %zu txbuf 0x%p\n",
  466. n_tx, txbuf);
  467. return -EINVAL;
  468. }
  469. reg = cqspi_calc_rdreg(op);
  470. writel(reg, reg_base + CQSPI_REG_RD_INSTR);
  471. if (op->cmd.dtr)
  472. opcode = op->cmd.opcode >> 8;
  473. else
  474. opcode = op->cmd.opcode;
  475. reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  476. if (op->addr.nbytes) {
  477. reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
  478. reg |= ((op->addr.nbytes - 1) &
  479. CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
  480. << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
  481. writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
  482. }
  483. if (n_tx) {
  484. reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
  485. reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
  486. << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
  487. data = 0;
  488. write_len = (n_tx > 4) ? 4 : n_tx;
  489. memcpy(&data, txbuf, write_len);
  490. txbuf += write_len;
  491. writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
  492. if (n_tx > 4) {
  493. data = 0;
  494. write_len = n_tx - 4;
  495. memcpy(&data, txbuf, write_len);
  496. writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
  497. }
  498. }
  499. return cqspi_exec_flash_cmd(cqspi, reg);
  500. }
  501. static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
  502. const struct spi_mem_op *op)
  503. {
  504. struct cqspi_st *cqspi = f_pdata->cqspi;
  505. void __iomem *reg_base = cqspi->iobase;
  506. unsigned int dummy_clk = 0;
  507. unsigned int reg;
  508. int ret;
  509. u8 opcode;
  510. ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB);
  511. if (ret)
  512. return ret;
  513. if (op->cmd.dtr)
  514. opcode = op->cmd.opcode >> 8;
  515. else
  516. opcode = op->cmd.opcode;
  517. reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
  518. reg |= cqspi_calc_rdreg(op);
  519. /* Setup dummy clock cycles */
  520. dummy_clk = cqspi_calc_dummy(op);
  521. if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
  522. return -EOPNOTSUPP;
  523. if (dummy_clk)
  524. reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
  525. << CQSPI_REG_RD_INSTR_DUMMY_LSB;
  526. writel(reg, reg_base + CQSPI_REG_RD_INSTR);
  527. /* Set address width */
  528. reg = readl(reg_base + CQSPI_REG_SIZE);
  529. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  530. reg |= (op->addr.nbytes - 1);
  531. writel(reg, reg_base + CQSPI_REG_SIZE);
  532. return 0;
  533. }
  534. static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
  535. u8 *rxbuf, loff_t from_addr,
  536. const size_t n_rx)
  537. {
  538. struct cqspi_st *cqspi = f_pdata->cqspi;
  539. struct device *dev = &cqspi->pdev->dev;
  540. void __iomem *reg_base = cqspi->iobase;
  541. void __iomem *ahb_base = cqspi->ahb_base;
  542. unsigned int remaining = n_rx;
  543. unsigned int mod_bytes = n_rx % 4;
  544. unsigned int bytes_to_read = 0;
  545. u8 *rxbuf_end = rxbuf + n_rx;
  546. int ret = 0;
  547. writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
  548. writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
  549. /* Clear all interrupts. */
  550. writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
  551. /*
  552. * On SoCFPGA platform reading the SRAM is slow due to
  553. * hardware limitation and causing read interrupt storm to CPU,
  554. * so enabling only watermark interrupt to disable all read
  555. * interrupts later as we want to run "bytes to read" loop with
  556. * all the read interrupts disabled for max performance.
  557. */
  558. if (!cqspi->slow_sram)
  559. writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
  560. else
  561. writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
  562. reinit_completion(&cqspi->transfer_complete);
  563. writel(CQSPI_REG_INDIRECTRD_START_MASK,
  564. reg_base + CQSPI_REG_INDIRECTRD);
  565. while (remaining > 0) {
  566. if (!wait_for_completion_timeout(&cqspi->transfer_complete,
  567. msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
  568. ret = -ETIMEDOUT;
  569. /*
  570. * Disable all read interrupts until
  571. * we are out of "bytes to read"
  572. */
  573. if (cqspi->slow_sram)
  574. writel(0x0, reg_base + CQSPI_REG_IRQMASK);
  575. bytes_to_read = cqspi_get_rd_sram_level(cqspi);
  576. if (ret && bytes_to_read == 0) {
  577. dev_err(dev, "Indirect read timeout, no bytes\n");
  578. goto failrd;
  579. }
  580. while (bytes_to_read != 0) {
  581. unsigned int word_remain = round_down(remaining, 4);
  582. bytes_to_read *= cqspi->fifo_width;
  583. bytes_to_read = bytes_to_read > remaining ?
  584. remaining : bytes_to_read;
  585. bytes_to_read = round_down(bytes_to_read, 4);
  586. /* Read 4 byte word chunks then single bytes */
  587. if (bytes_to_read) {
  588. ioread32_rep(ahb_base, rxbuf,
  589. (bytes_to_read / 4));
  590. } else if (!word_remain && mod_bytes) {
  591. unsigned int temp = ioread32(ahb_base);
  592. bytes_to_read = mod_bytes;
  593. memcpy(rxbuf, &temp, min((unsigned int)
  594. (rxbuf_end - rxbuf),
  595. bytes_to_read));
  596. }
  597. rxbuf += bytes_to_read;
  598. remaining -= bytes_to_read;
  599. bytes_to_read = cqspi_get_rd_sram_level(cqspi);
  600. }
  601. if (remaining > 0) {
  602. reinit_completion(&cqspi->transfer_complete);
  603. if (cqspi->slow_sram)
  604. writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
  605. }
  606. }
  607. /* Check indirect done status */
  608. ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
  609. CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
  610. if (ret) {
  611. dev_err(dev, "Indirect read completion error (%i)\n", ret);
  612. goto failrd;
  613. }
  614. /* Disable interrupt */
  615. writel(0, reg_base + CQSPI_REG_IRQMASK);
  616. /* Clear indirect completion status */
  617. writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
  618. return 0;
  619. failrd:
  620. /* Disable interrupt */
  621. writel(0, reg_base + CQSPI_REG_IRQMASK);
  622. /* Cancel the indirect read */
  623. writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  624. reg_base + CQSPI_REG_INDIRECTRD);
  625. return ret;
  626. }
  627. static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata,
  628. u_char *rxbuf, loff_t from_addr,
  629. size_t n_rx)
  630. {
  631. struct cqspi_st *cqspi = f_pdata->cqspi;
  632. struct device *dev = &cqspi->pdev->dev;
  633. void __iomem *reg_base = cqspi->iobase;
  634. u32 reg, bytes_to_dma;
  635. loff_t addr = from_addr;
  636. void *buf = rxbuf;
  637. dma_addr_t dma_addr;
  638. u8 bytes_rem;
  639. int ret = 0;
  640. bytes_rem = n_rx % 4;
  641. bytes_to_dma = (n_rx - bytes_rem);
  642. if (!bytes_to_dma)
  643. goto nondmard;
  644. ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA);
  645. if (ret)
  646. return ret;
  647. reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
  648. reg |= CQSPI_REG_CONFIG_DMA_MASK;
  649. writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
  650. dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE);
  651. if (dma_mapping_error(dev, dma_addr)) {
  652. dev_err(dev, "dma mapping failed\n");
  653. return -ENOMEM;
  654. }
  655. writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
  656. writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
  657. writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL,
  658. reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
  659. /* Clear all interrupts. */
  660. writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
  661. /* Enable DMA done interrupt */
  662. writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK,
  663. reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN);
  664. /* Default DMA periph configuration */
  665. writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA);
  666. /* Configure DMA Dst address */
  667. writel(lower_32_bits(dma_addr),
  668. reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR);
  669. writel(upper_32_bits(dma_addr),
  670. reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB);
  671. /* Configure DMA Src address */
  672. writel(cqspi->trigger_address, reg_base +
  673. CQSPI_REG_VERSAL_DMA_SRC_ADDR);
  674. /* Set DMA destination size */
  675. writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE);
  676. /* Set DMA destination control */
  677. writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL,
  678. reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL);
  679. writel(CQSPI_REG_INDIRECTRD_START_MASK,
  680. reg_base + CQSPI_REG_INDIRECTRD);
  681. reinit_completion(&cqspi->transfer_complete);
  682. if (!wait_for_completion_timeout(&cqspi->transfer_complete,
  683. msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
  684. ret = -ETIMEDOUT;
  685. goto failrd;
  686. }
  687. /* Disable DMA interrupt */
  688. writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
  689. /* Clear indirect completion status */
  690. writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
  691. cqspi->iobase + CQSPI_REG_INDIRECTRD);
  692. dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
  693. reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
  694. reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
  695. writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
  696. ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id,
  697. PM_OSPI_MUX_SEL_LINEAR);
  698. if (ret)
  699. return ret;
  700. nondmard:
  701. if (bytes_rem) {
  702. addr += bytes_to_dma;
  703. buf += bytes_to_dma;
  704. ret = cqspi_indirect_read_execute(f_pdata, buf, addr,
  705. bytes_rem);
  706. if (ret)
  707. return ret;
  708. }
  709. return 0;
  710. failrd:
  711. /* Disable DMA interrupt */
  712. writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
  713. /* Cancel the indirect read */
  714. writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  715. reg_base + CQSPI_REG_INDIRECTRD);
  716. dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
  717. reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
  718. reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
  719. writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
  720. zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR);
  721. return ret;
  722. }
  723. static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
  724. const struct spi_mem_op *op)
  725. {
  726. unsigned int reg;
  727. int ret;
  728. struct cqspi_st *cqspi = f_pdata->cqspi;
  729. void __iomem *reg_base = cqspi->iobase;
  730. u8 opcode;
  731. ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB);
  732. if (ret)
  733. return ret;
  734. if (op->cmd.dtr)
  735. opcode = op->cmd.opcode >> 8;
  736. else
  737. opcode = op->cmd.opcode;
  738. /* Set opcode. */
  739. reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
  740. reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
  741. reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
  742. writel(reg, reg_base + CQSPI_REG_WR_INSTR);
  743. reg = cqspi_calc_rdreg(op);
  744. writel(reg, reg_base + CQSPI_REG_RD_INSTR);
  745. /*
  746. * SPI NAND flashes require the address of the status register to be
  747. * passed in the Read SR command. Also, some SPI NOR flashes like the
  748. * cypress Semper flash expect a 4-byte dummy address in the Read SR
  749. * command in DTR mode.
  750. *
  751. * But this controller does not support address phase in the Read SR
  752. * command when doing auto-HW polling. So, disable write completion
  753. * polling on the controller's side. spinand and spi-nor will take
  754. * care of polling the status register.
  755. */
  756. if (cqspi->wr_completion) {
  757. reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
  758. reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
  759. writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
  760. }
  761. reg = readl(reg_base + CQSPI_REG_SIZE);
  762. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  763. reg |= (op->addr.nbytes - 1);
  764. writel(reg, reg_base + CQSPI_REG_SIZE);
  765. return 0;
  766. }
  767. static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
  768. loff_t to_addr, const u8 *txbuf,
  769. const size_t n_tx)
  770. {
  771. struct cqspi_st *cqspi = f_pdata->cqspi;
  772. struct device *dev = &cqspi->pdev->dev;
  773. void __iomem *reg_base = cqspi->iobase;
  774. unsigned int remaining = n_tx;
  775. unsigned int write_bytes;
  776. int ret;
  777. writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
  778. writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
  779. /* Clear all interrupts. */
  780. writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
  781. writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
  782. reinit_completion(&cqspi->transfer_complete);
  783. writel(CQSPI_REG_INDIRECTWR_START_MASK,
  784. reg_base + CQSPI_REG_INDIRECTWR);
  785. /*
  786. * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
  787. * Controller programming sequence, couple of cycles of
  788. * QSPI_REF_CLK delay is required for the above bit to
  789. * be internally synchronized by the QSPI module. Provide 5
  790. * cycles of delay.
  791. */
  792. if (cqspi->wr_delay)
  793. ndelay(cqspi->wr_delay);
  794. while (remaining > 0) {
  795. size_t write_words, mod_bytes;
  796. write_bytes = remaining;
  797. write_words = write_bytes / 4;
  798. mod_bytes = write_bytes % 4;
  799. /* Write 4 bytes at a time then single bytes. */
  800. if (write_words) {
  801. iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
  802. txbuf += (write_words * 4);
  803. }
  804. if (mod_bytes) {
  805. unsigned int temp = 0xFFFFFFFF;
  806. memcpy(&temp, txbuf, mod_bytes);
  807. iowrite32(temp, cqspi->ahb_base);
  808. txbuf += mod_bytes;
  809. }
  810. if (!wait_for_completion_timeout(&cqspi->transfer_complete,
  811. msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
  812. dev_err(dev, "Indirect write timeout\n");
  813. ret = -ETIMEDOUT;
  814. goto failwr;
  815. }
  816. remaining -= write_bytes;
  817. if (remaining > 0)
  818. reinit_completion(&cqspi->transfer_complete);
  819. }
  820. /* Check indirect done status */
  821. ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
  822. CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
  823. if (ret) {
  824. dev_err(dev, "Indirect write completion error (%i)\n", ret);
  825. goto failwr;
  826. }
  827. /* Disable interrupt. */
  828. writel(0, reg_base + CQSPI_REG_IRQMASK);
  829. /* Clear indirect completion status */
  830. writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
  831. cqspi_wait_idle(cqspi);
  832. return 0;
  833. failwr:
  834. /* Disable interrupt. */
  835. writel(0, reg_base + CQSPI_REG_IRQMASK);
  836. /* Cancel the indirect write */
  837. writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  838. reg_base + CQSPI_REG_INDIRECTWR);
  839. return ret;
  840. }
  841. static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata)
  842. {
  843. struct cqspi_st *cqspi = f_pdata->cqspi;
  844. void __iomem *reg_base = cqspi->iobase;
  845. unsigned int chip_select = f_pdata->cs;
  846. unsigned int reg;
  847. reg = readl(reg_base + CQSPI_REG_CONFIG);
  848. if (cqspi->is_decoded_cs) {
  849. reg |= CQSPI_REG_CONFIG_DECODE_MASK;
  850. } else {
  851. reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
  852. /* Convert CS if without decoder.
  853. * CS0 to 4b'1110
  854. * CS1 to 4b'1101
  855. * CS2 to 4b'1011
  856. * CS3 to 4b'0111
  857. */
  858. chip_select = 0xF & ~(1 << chip_select);
  859. }
  860. reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
  861. << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
  862. reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
  863. << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
  864. writel(reg, reg_base + CQSPI_REG_CONFIG);
  865. }
  866. static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
  867. const unsigned int ns_val)
  868. {
  869. unsigned int ticks;
  870. ticks = ref_clk_hz / 1000; /* kHz */
  871. ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
  872. return ticks;
  873. }
  874. static void cqspi_delay(struct cqspi_flash_pdata *f_pdata)
  875. {
  876. struct cqspi_st *cqspi = f_pdata->cqspi;
  877. void __iomem *iobase = cqspi->iobase;
  878. const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
  879. unsigned int tshsl, tchsh, tslch, tsd2d;
  880. unsigned int reg;
  881. unsigned int tsclk;
  882. /* calculate the number of ref ticks for one sclk tick */
  883. tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
  884. tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
  885. /* this particular value must be at least one sclk */
  886. if (tshsl < tsclk)
  887. tshsl = tsclk;
  888. tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
  889. tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
  890. tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
  891. reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
  892. << CQSPI_REG_DELAY_TSHSL_LSB;
  893. reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
  894. << CQSPI_REG_DELAY_TCHSH_LSB;
  895. reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
  896. << CQSPI_REG_DELAY_TSLCH_LSB;
  897. reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
  898. << CQSPI_REG_DELAY_TSD2D_LSB;
  899. writel(reg, iobase + CQSPI_REG_DELAY);
  900. }
  901. static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
  902. {
  903. const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
  904. void __iomem *reg_base = cqspi->iobase;
  905. u32 reg, div;
  906. /* Recalculate the baudrate divisor based on QSPI specification. */
  907. div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
  908. reg = readl(reg_base + CQSPI_REG_CONFIG);
  909. reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
  910. reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
  911. writel(reg, reg_base + CQSPI_REG_CONFIG);
  912. }
  913. static void cqspi_readdata_capture(struct cqspi_st *cqspi,
  914. const bool bypass,
  915. const unsigned int delay)
  916. {
  917. void __iomem *reg_base = cqspi->iobase;
  918. unsigned int reg;
  919. reg = readl(reg_base + CQSPI_REG_READCAPTURE);
  920. if (bypass)
  921. reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
  922. else
  923. reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
  924. reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
  925. << CQSPI_REG_READCAPTURE_DELAY_LSB);
  926. reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
  927. << CQSPI_REG_READCAPTURE_DELAY_LSB;
  928. writel(reg, reg_base + CQSPI_REG_READCAPTURE);
  929. }
  930. static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
  931. {
  932. void __iomem *reg_base = cqspi->iobase;
  933. unsigned int reg;
  934. reg = readl(reg_base + CQSPI_REG_CONFIG);
  935. if (enable)
  936. reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
  937. else
  938. reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
  939. writel(reg, reg_base + CQSPI_REG_CONFIG);
  940. }
  941. static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
  942. unsigned long sclk)
  943. {
  944. struct cqspi_st *cqspi = f_pdata->cqspi;
  945. int switch_cs = (cqspi->current_cs != f_pdata->cs);
  946. int switch_ck = (cqspi->sclk != sclk);
  947. if (switch_cs || switch_ck)
  948. cqspi_controller_enable(cqspi, 0);
  949. /* Switch chip select. */
  950. if (switch_cs) {
  951. cqspi->current_cs = f_pdata->cs;
  952. cqspi_chipselect(f_pdata);
  953. }
  954. /* Setup baudrate divisor and delays */
  955. if (switch_ck) {
  956. cqspi->sclk = sclk;
  957. cqspi_config_baudrate_div(cqspi);
  958. cqspi_delay(f_pdata);
  959. cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
  960. f_pdata->read_delay);
  961. }
  962. if (switch_cs || switch_ck)
  963. cqspi_controller_enable(cqspi, 1);
  964. }
  965. static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
  966. const struct spi_mem_op *op)
  967. {
  968. struct cqspi_st *cqspi = f_pdata->cqspi;
  969. loff_t to = op->addr.val;
  970. size_t len = op->data.nbytes;
  971. const u_char *buf = op->data.buf.out;
  972. int ret;
  973. ret = cqspi_write_setup(f_pdata, op);
  974. if (ret)
  975. return ret;
  976. /*
  977. * Some flashes like the Cypress Semper flash expect a dummy 4-byte
  978. * address (all 0s) with the read status register command in DTR mode.
  979. * But this controller does not support sending dummy address bytes to
  980. * the flash when it is polling the write completion register in DTR
  981. * mode. So, we can not use direct mode when in DTR mode for writing
  982. * data.
  983. */
  984. if (!op->cmd.dtr && cqspi->use_direct_mode &&
  985. ((to + len) <= cqspi->ahb_size)) {
  986. memcpy_toio(cqspi->ahb_base + to, buf, len);
  987. return cqspi_wait_idle(cqspi);
  988. }
  989. return cqspi_indirect_write_execute(f_pdata, to, buf, len);
  990. }
  991. static void cqspi_rx_dma_callback(void *param)
  992. {
  993. struct cqspi_st *cqspi = param;
  994. complete(&cqspi->rx_dma_complete);
  995. }
  996. static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
  997. u_char *buf, loff_t from, size_t len)
  998. {
  999. struct cqspi_st *cqspi = f_pdata->cqspi;
  1000. struct device *dev = &cqspi->pdev->dev;
  1001. enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  1002. dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
  1003. int ret = 0;
  1004. struct dma_async_tx_descriptor *tx;
  1005. dma_cookie_t cookie;
  1006. dma_addr_t dma_dst;
  1007. struct device *ddev;
  1008. if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
  1009. memcpy_fromio(buf, cqspi->ahb_base + from, len);
  1010. return 0;
  1011. }
  1012. ddev = cqspi->rx_chan->device->dev;
  1013. dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
  1014. if (dma_mapping_error(ddev, dma_dst)) {
  1015. dev_err(dev, "dma mapping failed\n");
  1016. return -ENOMEM;
  1017. }
  1018. tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
  1019. len, flags);
  1020. if (!tx) {
  1021. dev_err(dev, "device_prep_dma_memcpy error\n");
  1022. ret = -EIO;
  1023. goto err_unmap;
  1024. }
  1025. tx->callback = cqspi_rx_dma_callback;
  1026. tx->callback_param = cqspi;
  1027. cookie = tx->tx_submit(tx);
  1028. reinit_completion(&cqspi->rx_dma_complete);
  1029. ret = dma_submit_error(cookie);
  1030. if (ret) {
  1031. dev_err(dev, "dma_submit_error %d\n", cookie);
  1032. ret = -EIO;
  1033. goto err_unmap;
  1034. }
  1035. dma_async_issue_pending(cqspi->rx_chan);
  1036. if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
  1037. msecs_to_jiffies(max_t(size_t, len, 500)))) {
  1038. dmaengine_terminate_sync(cqspi->rx_chan);
  1039. dev_err(dev, "DMA wait_for_completion_timeout\n");
  1040. ret = -ETIMEDOUT;
  1041. goto err_unmap;
  1042. }
  1043. err_unmap:
  1044. dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE);
  1045. return ret;
  1046. }
  1047. static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
  1048. const struct spi_mem_op *op)
  1049. {
  1050. struct cqspi_st *cqspi = f_pdata->cqspi;
  1051. struct device *dev = &cqspi->pdev->dev;
  1052. const struct cqspi_driver_platdata *ddata;
  1053. loff_t from = op->addr.val;
  1054. size_t len = op->data.nbytes;
  1055. u_char *buf = op->data.buf.in;
  1056. u64 dma_align = (u64)(uintptr_t)buf;
  1057. int ret;
  1058. ddata = of_device_get_match_data(dev);
  1059. ret = cqspi_read_setup(f_pdata, op);
  1060. if (ret)
  1061. return ret;
  1062. if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
  1063. return cqspi_direct_read_execute(f_pdata, buf, from, len);
  1064. if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
  1065. virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0))
  1066. return ddata->indirect_read_dma(f_pdata, buf, from, len);
  1067. return cqspi_indirect_read_execute(f_pdata, buf, from, len);
  1068. }
  1069. static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
  1070. {
  1071. struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
  1072. struct cqspi_flash_pdata *f_pdata;
  1073. f_pdata = &cqspi->f_pdata[mem->spi->chip_select];
  1074. cqspi_configure(f_pdata, mem->spi->max_speed_hz);
  1075. if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
  1076. if (!op->addr.nbytes)
  1077. return cqspi_command_read(f_pdata, op);
  1078. return cqspi_read(f_pdata, op);
  1079. }
  1080. if (!op->addr.nbytes || !op->data.buf.out)
  1081. return cqspi_command_write(f_pdata, op);
  1082. return cqspi_write(f_pdata, op);
  1083. }
  1084. static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
  1085. {
  1086. int ret;
  1087. ret = cqspi_mem_process(mem, op);
  1088. if (ret)
  1089. dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
  1090. return ret;
  1091. }
  1092. static bool cqspi_supports_mem_op(struct spi_mem *mem,
  1093. const struct spi_mem_op *op)
  1094. {
  1095. bool all_true, all_false;
  1096. /*
  1097. * op->dummy.dtr is required for converting nbytes into ncycles.
  1098. * Also, don't check the dtr field of the op phase having zero nbytes.
  1099. */
  1100. all_true = op->cmd.dtr &&
  1101. (!op->addr.nbytes || op->addr.dtr) &&
  1102. (!op->dummy.nbytes || op->dummy.dtr) &&
  1103. (!op->data.nbytes || op->data.dtr);
  1104. all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
  1105. !op->data.dtr;
  1106. if (all_true) {
  1107. /* Right now we only support 8-8-8 DTR mode. */
  1108. if (op->cmd.nbytes && op->cmd.buswidth != 8)
  1109. return false;
  1110. if (op->addr.nbytes && op->addr.buswidth != 8)
  1111. return false;
  1112. if (op->data.nbytes && op->data.buswidth != 8)
  1113. return false;
  1114. } else if (!all_false) {
  1115. /* Mixed DTR modes are not supported. */
  1116. return false;
  1117. }
  1118. return spi_mem_default_supports_op(mem, op);
  1119. }
  1120. static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
  1121. struct cqspi_flash_pdata *f_pdata,
  1122. struct device_node *np)
  1123. {
  1124. if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
  1125. dev_err(&pdev->dev, "couldn't determine read-delay\n");
  1126. return -ENXIO;
  1127. }
  1128. if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
  1129. dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
  1130. return -ENXIO;
  1131. }
  1132. if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
  1133. dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
  1134. return -ENXIO;
  1135. }
  1136. if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
  1137. dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
  1138. return -ENXIO;
  1139. }
  1140. if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
  1141. dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
  1142. return -ENXIO;
  1143. }
  1144. if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
  1145. dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
  1146. return -ENXIO;
  1147. }
  1148. return 0;
  1149. }
  1150. static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
  1151. {
  1152. struct device *dev = &cqspi->pdev->dev;
  1153. struct device_node *np = dev->of_node;
  1154. u32 id[2];
  1155. cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
  1156. if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
  1157. dev_err(dev, "couldn't determine fifo-depth\n");
  1158. return -ENXIO;
  1159. }
  1160. if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
  1161. dev_err(dev, "couldn't determine fifo-width\n");
  1162. return -ENXIO;
  1163. }
  1164. if (of_property_read_u32(np, "cdns,trigger-address",
  1165. &cqspi->trigger_address)) {
  1166. dev_err(dev, "couldn't determine trigger-address\n");
  1167. return -ENXIO;
  1168. }
  1169. if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect))
  1170. cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT;
  1171. cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
  1172. if (!of_property_read_u32_array(np, "power-domains", id,
  1173. ARRAY_SIZE(id)))
  1174. cqspi->pd_dev_id = id[1];
  1175. return 0;
  1176. }
  1177. static void cqspi_controller_init(struct cqspi_st *cqspi)
  1178. {
  1179. u32 reg;
  1180. cqspi_controller_enable(cqspi, 0);
  1181. /* Configure the remap address register, no remap */
  1182. writel(0, cqspi->iobase + CQSPI_REG_REMAP);
  1183. /* Disable all interrupts. */
  1184. writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
  1185. /* Configure the SRAM split to 1:1 . */
  1186. writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
  1187. /* Load indirect trigger address. */
  1188. writel(cqspi->trigger_address,
  1189. cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
  1190. /* Program read watermark -- 1/2 of the FIFO. */
  1191. writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
  1192. cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
  1193. /* Program write watermark -- 1/8 of the FIFO. */
  1194. writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
  1195. cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
  1196. /* Disable direct access controller */
  1197. if (!cqspi->use_direct_mode) {
  1198. reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
  1199. reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
  1200. writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
  1201. }
  1202. /* Enable DMA interface */
  1203. if (cqspi->use_dma_read) {
  1204. reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
  1205. reg |= CQSPI_REG_CONFIG_DMA_MASK;
  1206. writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
  1207. }
  1208. cqspi_controller_enable(cqspi, 1);
  1209. }
  1210. static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
  1211. {
  1212. dma_cap_mask_t mask;
  1213. dma_cap_zero(mask);
  1214. dma_cap_set(DMA_MEMCPY, mask);
  1215. cqspi->rx_chan = dma_request_chan_by_mask(&mask);
  1216. if (IS_ERR(cqspi->rx_chan)) {
  1217. int ret = PTR_ERR(cqspi->rx_chan);
  1218. cqspi->rx_chan = NULL;
  1219. return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
  1220. }
  1221. init_completion(&cqspi->rx_dma_complete);
  1222. return 0;
  1223. }
  1224. static const char *cqspi_get_name(struct spi_mem *mem)
  1225. {
  1226. struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
  1227. struct device *dev = &cqspi->pdev->dev;
  1228. return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
  1229. }
  1230. static const struct spi_controller_mem_ops cqspi_mem_ops = {
  1231. .exec_op = cqspi_exec_mem_op,
  1232. .get_name = cqspi_get_name,
  1233. .supports_op = cqspi_supports_mem_op,
  1234. };
  1235. static const struct spi_controller_mem_caps cqspi_mem_caps = {
  1236. .dtr = true,
  1237. };
  1238. static int cqspi_setup_flash(struct cqspi_st *cqspi)
  1239. {
  1240. struct platform_device *pdev = cqspi->pdev;
  1241. struct device *dev = &pdev->dev;
  1242. struct device_node *np = dev->of_node;
  1243. struct cqspi_flash_pdata *f_pdata;
  1244. unsigned int cs;
  1245. int ret;
  1246. /* Get flash device data */
  1247. for_each_available_child_of_node(dev->of_node, np) {
  1248. ret = of_property_read_u32(np, "reg", &cs);
  1249. if (ret) {
  1250. dev_err(dev, "Couldn't determine chip select.\n");
  1251. of_node_put(np);
  1252. return ret;
  1253. }
  1254. if (cs >= CQSPI_MAX_CHIPSELECT) {
  1255. dev_err(dev, "Chip select %d out of range.\n", cs);
  1256. of_node_put(np);
  1257. return -EINVAL;
  1258. }
  1259. f_pdata = &cqspi->f_pdata[cs];
  1260. f_pdata->cqspi = cqspi;
  1261. f_pdata->cs = cs;
  1262. ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
  1263. if (ret) {
  1264. of_node_put(np);
  1265. return ret;
  1266. }
  1267. }
  1268. return 0;
  1269. }
  1270. static int cqspi_probe(struct platform_device *pdev)
  1271. {
  1272. const struct cqspi_driver_platdata *ddata;
  1273. struct reset_control *rstc, *rstc_ocp;
  1274. struct device *dev = &pdev->dev;
  1275. struct spi_master *master;
  1276. struct resource *res_ahb;
  1277. struct cqspi_st *cqspi;
  1278. struct resource *res;
  1279. int ret;
  1280. int irq;
  1281. master = devm_spi_alloc_master(&pdev->dev, sizeof(*cqspi));
  1282. if (!master) {
  1283. dev_err(&pdev->dev, "spi_alloc_master failed\n");
  1284. return -ENOMEM;
  1285. }
  1286. master->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
  1287. master->mem_ops = &cqspi_mem_ops;
  1288. master->mem_caps = &cqspi_mem_caps;
  1289. master->dev.of_node = pdev->dev.of_node;
  1290. cqspi = spi_master_get_devdata(master);
  1291. cqspi->pdev = pdev;
  1292. cqspi->master = master;
  1293. platform_set_drvdata(pdev, cqspi);
  1294. /* Obtain configuration from OF. */
  1295. ret = cqspi_of_get_pdata(cqspi);
  1296. if (ret) {
  1297. dev_err(dev, "Cannot get mandatory OF data.\n");
  1298. return -ENODEV;
  1299. }
  1300. /* Obtain QSPI clock. */
  1301. cqspi->clk = devm_clk_get(dev, NULL);
  1302. if (IS_ERR(cqspi->clk)) {
  1303. dev_err(dev, "Cannot claim QSPI clock.\n");
  1304. ret = PTR_ERR(cqspi->clk);
  1305. return ret;
  1306. }
  1307. /* Obtain and remap controller address. */
  1308. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1309. cqspi->iobase = devm_ioremap_resource(dev, res);
  1310. if (IS_ERR(cqspi->iobase)) {
  1311. dev_err(dev, "Cannot remap controller address.\n");
  1312. ret = PTR_ERR(cqspi->iobase);
  1313. return ret;
  1314. }
  1315. /* Obtain and remap AHB address. */
  1316. res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1317. cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
  1318. if (IS_ERR(cqspi->ahb_base)) {
  1319. dev_err(dev, "Cannot remap AHB address.\n");
  1320. ret = PTR_ERR(cqspi->ahb_base);
  1321. return ret;
  1322. }
  1323. cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
  1324. cqspi->ahb_size = resource_size(res_ahb);
  1325. init_completion(&cqspi->transfer_complete);
  1326. /* Obtain IRQ line. */
  1327. irq = platform_get_irq(pdev, 0);
  1328. if (irq < 0)
  1329. return -ENXIO;
  1330. pm_runtime_enable(dev);
  1331. ret = pm_runtime_resume_and_get(dev);
  1332. if (ret < 0)
  1333. goto probe_pm_failed;
  1334. ret = clk_prepare_enable(cqspi->clk);
  1335. if (ret) {
  1336. dev_err(dev, "Cannot enable QSPI clock.\n");
  1337. goto probe_clk_failed;
  1338. }
  1339. /* Obtain QSPI reset control */
  1340. rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
  1341. if (IS_ERR(rstc)) {
  1342. ret = PTR_ERR(rstc);
  1343. dev_err(dev, "Cannot get QSPI reset.\n");
  1344. goto probe_reset_failed;
  1345. }
  1346. rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
  1347. if (IS_ERR(rstc_ocp)) {
  1348. ret = PTR_ERR(rstc_ocp);
  1349. dev_err(dev, "Cannot get QSPI OCP reset.\n");
  1350. goto probe_reset_failed;
  1351. }
  1352. reset_control_assert(rstc);
  1353. reset_control_deassert(rstc);
  1354. reset_control_assert(rstc_ocp);
  1355. reset_control_deassert(rstc_ocp);
  1356. cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
  1357. master->max_speed_hz = cqspi->master_ref_clk_hz;
  1358. /* write completion is supported by default */
  1359. cqspi->wr_completion = true;
  1360. ddata = of_device_get_match_data(dev);
  1361. if (ddata) {
  1362. if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
  1363. cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
  1364. cqspi->master_ref_clk_hz);
  1365. if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
  1366. master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
  1367. if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
  1368. cqspi->use_direct_mode = true;
  1369. if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
  1370. cqspi->use_dma_read = true;
  1371. if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
  1372. cqspi->wr_completion = false;
  1373. if (ddata->quirks & CQSPI_SLOW_SRAM)
  1374. cqspi->slow_sram = true;
  1375. if (of_device_is_compatible(pdev->dev.of_node,
  1376. "xlnx,versal-ospi-1.0")) {
  1377. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  1378. if (ret)
  1379. goto probe_reset_failed;
  1380. }
  1381. }
  1382. ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
  1383. pdev->name, cqspi);
  1384. if (ret) {
  1385. dev_err(dev, "Cannot request IRQ.\n");
  1386. goto probe_reset_failed;
  1387. }
  1388. cqspi_wait_idle(cqspi);
  1389. cqspi_controller_init(cqspi);
  1390. cqspi->current_cs = -1;
  1391. cqspi->sclk = 0;
  1392. master->num_chipselect = cqspi->num_chipselect;
  1393. ret = cqspi_setup_flash(cqspi);
  1394. if (ret) {
  1395. dev_err(dev, "failed to setup flash parameters %d\n", ret);
  1396. goto probe_setup_failed;
  1397. }
  1398. if (cqspi->use_direct_mode) {
  1399. ret = cqspi_request_mmap_dma(cqspi);
  1400. if (ret == -EPROBE_DEFER)
  1401. goto probe_setup_failed;
  1402. }
  1403. ret = spi_register_master(master);
  1404. if (ret) {
  1405. dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
  1406. goto probe_setup_failed;
  1407. }
  1408. return 0;
  1409. probe_setup_failed:
  1410. cqspi_controller_enable(cqspi, 0);
  1411. probe_reset_failed:
  1412. clk_disable_unprepare(cqspi->clk);
  1413. probe_clk_failed:
  1414. pm_runtime_put_sync(dev);
  1415. probe_pm_failed:
  1416. pm_runtime_disable(dev);
  1417. return ret;
  1418. }
  1419. static int cqspi_remove(struct platform_device *pdev)
  1420. {
  1421. struct cqspi_st *cqspi = platform_get_drvdata(pdev);
  1422. spi_unregister_master(cqspi->master);
  1423. cqspi_controller_enable(cqspi, 0);
  1424. if (cqspi->rx_chan)
  1425. dma_release_channel(cqspi->rx_chan);
  1426. clk_disable_unprepare(cqspi->clk);
  1427. pm_runtime_put_sync(&pdev->dev);
  1428. pm_runtime_disable(&pdev->dev);
  1429. return 0;
  1430. }
  1431. static int cqspi_suspend(struct device *dev)
  1432. {
  1433. struct cqspi_st *cqspi = dev_get_drvdata(dev);
  1434. struct spi_master *master = dev_get_drvdata(dev);
  1435. int ret;
  1436. ret = spi_master_suspend(master);
  1437. cqspi_controller_enable(cqspi, 0);
  1438. clk_disable_unprepare(cqspi->clk);
  1439. return ret;
  1440. }
  1441. static int cqspi_resume(struct device *dev)
  1442. {
  1443. struct cqspi_st *cqspi = dev_get_drvdata(dev);
  1444. struct spi_master *master = dev_get_drvdata(dev);
  1445. clk_prepare_enable(cqspi->clk);
  1446. cqspi_wait_idle(cqspi);
  1447. cqspi_controller_init(cqspi);
  1448. cqspi->current_cs = -1;
  1449. cqspi->sclk = 0;
  1450. return spi_master_resume(master);
  1451. }
  1452. static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);
  1453. static const struct cqspi_driver_platdata cdns_qspi = {
  1454. .quirks = CQSPI_DISABLE_DAC_MODE,
  1455. };
  1456. static const struct cqspi_driver_platdata k2g_qspi = {
  1457. .quirks = CQSPI_NEEDS_WR_DELAY,
  1458. };
  1459. static const struct cqspi_driver_platdata am654_ospi = {
  1460. .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
  1461. .quirks = CQSPI_NEEDS_WR_DELAY,
  1462. };
  1463. static const struct cqspi_driver_platdata intel_lgm_qspi = {
  1464. .quirks = CQSPI_DISABLE_DAC_MODE,
  1465. };
  1466. static const struct cqspi_driver_platdata socfpga_qspi = {
  1467. .quirks = CQSPI_DISABLE_DAC_MODE
  1468. | CQSPI_NO_SUPPORT_WR_COMPLETION
  1469. | CQSPI_SLOW_SRAM,
  1470. };
  1471. static const struct cqspi_driver_platdata versal_ospi = {
  1472. .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
  1473. .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
  1474. .indirect_read_dma = cqspi_versal_indirect_read_dma,
  1475. .get_dma_status = cqspi_get_versal_dma_status,
  1476. };
  1477. static const struct of_device_id cqspi_dt_ids[] = {
  1478. {
  1479. .compatible = "cdns,qspi-nor",
  1480. .data = &cdns_qspi,
  1481. },
  1482. {
  1483. .compatible = "ti,k2g-qspi",
  1484. .data = &k2g_qspi,
  1485. },
  1486. {
  1487. .compatible = "ti,am654-ospi",
  1488. .data = &am654_ospi,
  1489. },
  1490. {
  1491. .compatible = "intel,lgm-qspi",
  1492. .data = &intel_lgm_qspi,
  1493. },
  1494. {
  1495. .compatible = "xlnx,versal-ospi-1.0",
  1496. .data = &versal_ospi,
  1497. },
  1498. {
  1499. .compatible = "intel,socfpga-qspi",
  1500. .data = &socfpga_qspi,
  1501. },
  1502. { /* end of table */ }
  1503. };
  1504. MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
  1505. static struct platform_driver cqspi_platform_driver = {
  1506. .probe = cqspi_probe,
  1507. .remove = cqspi_remove,
  1508. .driver = {
  1509. .name = CQSPI_NAME,
  1510. .pm = &cqspi_dev_pm_ops,
  1511. .of_match_table = cqspi_dt_ids,
  1512. },
  1513. };
  1514. module_platform_driver(cqspi_platform_driver);
  1515. MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
  1516. MODULE_LICENSE("GPL v2");
  1517. MODULE_ALIAS("platform:" CQSPI_NAME);
  1518. MODULE_AUTHOR("Ley Foon Tan <[email protected]>");
  1519. MODULE_AUTHOR("Graham Moore <[email protected]>");
  1520. MODULE_AUTHOR("Vadivel Murugan R <[email protected]>");
  1521. MODULE_AUTHOR("Vignesh Raghavendra <[email protected]>");
  1522. MODULE_AUTHOR("Pratyush Yadav <[email protected]>");