spi-bcm-qspi.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
  4. *
  5. * Copyright 2016 Broadcom
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/delay.h>
  9. #include <linux/device.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/ioport.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/of.h>
  17. #include <linux/of_irq.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/slab.h>
  20. #include <linux/spi/spi.h>
  21. #include <linux/spi/spi-mem.h>
  22. #include <linux/sysfs.h>
  23. #include <linux/types.h>
  24. #include "spi-bcm-qspi.h"
  25. #define DRIVER_NAME "bcm_qspi"
  26. /* BSPI register offsets */
  27. #define BSPI_REVISION_ID 0x000
  28. #define BSPI_SCRATCH 0x004
  29. #define BSPI_MAST_N_BOOT_CTRL 0x008
  30. #define BSPI_BUSY_STATUS 0x00c
  31. #define BSPI_INTR_STATUS 0x010
  32. #define BSPI_B0_STATUS 0x014
  33. #define BSPI_B0_CTRL 0x018
  34. #define BSPI_B1_STATUS 0x01c
  35. #define BSPI_B1_CTRL 0x020
  36. #define BSPI_STRAP_OVERRIDE_CTRL 0x024
  37. #define BSPI_FLEX_MODE_ENABLE 0x028
  38. #define BSPI_BITS_PER_CYCLE 0x02c
  39. #define BSPI_BITS_PER_PHASE 0x030
  40. #define BSPI_CMD_AND_MODE_BYTE 0x034
  41. #define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
  42. #define BSPI_BSPI_XOR_VALUE 0x03c
  43. #define BSPI_BSPI_XOR_ENABLE 0x040
  44. #define BSPI_BSPI_PIO_MODE_ENABLE 0x044
  45. #define BSPI_BSPI_PIO_IODIR 0x048
  46. #define BSPI_BSPI_PIO_DATA 0x04c
  47. /* RAF register offsets */
  48. #define BSPI_RAF_START_ADDR 0x100
  49. #define BSPI_RAF_NUM_WORDS 0x104
  50. #define BSPI_RAF_CTRL 0x108
  51. #define BSPI_RAF_FULLNESS 0x10c
  52. #define BSPI_RAF_WATERMARK 0x110
  53. #define BSPI_RAF_STATUS 0x114
  54. #define BSPI_RAF_READ_DATA 0x118
  55. #define BSPI_RAF_WORD_CNT 0x11c
  56. #define BSPI_RAF_CURR_ADDR 0x120
  57. /* Override mode masks */
  58. #define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
  59. #define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
  60. #define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
  61. #define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
  62. #define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
  63. #define BSPI_ADDRLEN_3BYTES 3
  64. #define BSPI_ADDRLEN_4BYTES 4
  65. #define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
  66. #define BSPI_RAF_CTRL_START_MASK BIT(0)
  67. #define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
  68. #define BSPI_BPP_MODE_SELECT_MASK BIT(8)
  69. #define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
  70. #define BSPI_READ_LENGTH 256
  71. /* MSPI register offsets */
  72. #define MSPI_SPCR0_LSB 0x000
  73. #define MSPI_SPCR0_MSB 0x004
  74. #define MSPI_SPCR0_MSB_CPHA BIT(0)
  75. #define MSPI_SPCR0_MSB_CPOL BIT(1)
  76. #define MSPI_SPCR0_MSB_BITS_SHIFT 0x2
  77. #define MSPI_SPCR1_LSB 0x008
  78. #define MSPI_SPCR1_MSB 0x00c
  79. #define MSPI_NEWQP 0x010
  80. #define MSPI_ENDQP 0x014
  81. #define MSPI_SPCR2 0x018
  82. #define MSPI_MSPI_STATUS 0x020
  83. #define MSPI_CPTQP 0x024
  84. #define MSPI_SPCR3 0x028
  85. #define MSPI_REV 0x02c
  86. #define MSPI_TXRAM 0x040
  87. #define MSPI_RXRAM 0x0c0
  88. #define MSPI_CDRAM 0x140
  89. #define MSPI_WRITE_LOCK 0x180
  90. #define MSPI_MASTER_BIT BIT(7)
  91. #define MSPI_NUM_CDRAM 16
  92. #define MSPI_CDRAM_OUTP BIT(8)
  93. #define MSPI_CDRAM_CONT_BIT BIT(7)
  94. #define MSPI_CDRAM_BITSE_BIT BIT(6)
  95. #define MSPI_CDRAM_DT_BIT BIT(5)
  96. #define MSPI_CDRAM_PCS 0xf
  97. #define MSPI_SPCR2_SPE BIT(6)
  98. #define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
  99. #define MSPI_SPCR3_FASTBR BIT(0)
  100. #define MSPI_SPCR3_FASTDT BIT(1)
  101. #define MSPI_SPCR3_SYSCLKSEL_MASK GENMASK(11, 10)
  102. #define MSPI_SPCR3_SYSCLKSEL_27 (MSPI_SPCR3_SYSCLKSEL_MASK & \
  103. ~(BIT(10) | BIT(11)))
  104. #define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
  105. BIT(11))
  106. #define MSPI_SPCR3_TXRXDAM_MASK GENMASK(4, 2)
  107. #define MSPI_SPCR3_DAM_8BYTE 0
  108. #define MSPI_SPCR3_DAM_16BYTE (BIT(2) | BIT(4))
  109. #define MSPI_SPCR3_DAM_32BYTE (BIT(3) | BIT(5))
  110. #define MSPI_SPCR3_HALFDUPLEX BIT(6)
  111. #define MSPI_SPCR3_HDOUTTYPE BIT(7)
  112. #define MSPI_SPCR3_DATA_REG_SZ BIT(8)
  113. #define MSPI_SPCR3_CPHARX BIT(9)
  114. #define MSPI_MSPI_STATUS_SPIF BIT(0)
  115. #define INTR_BASE_BIT_SHIFT 0x02
  116. #define INTR_COUNT 0x07
  117. #define NUM_CHIPSELECT 4
  118. #define QSPI_SPBR_MAX 255U
  119. #define MSPI_BASE_FREQ 27000000UL
  120. #define OPCODE_DIOR 0xBB
  121. #define OPCODE_QIOR 0xEB
  122. #define OPCODE_DIOR_4B 0xBC
  123. #define OPCODE_QIOR_4B 0xEC
  124. #define MAX_CMD_SIZE 6
  125. #define ADDR_4MB_MASK GENMASK(22, 0)
  126. /* stop at end of transfer, no other reason */
  127. #define TRANS_STATUS_BREAK_NONE 0
  128. /* stop at end of spi_message */
  129. #define TRANS_STATUS_BREAK_EOM 1
  130. /* stop at end of spi_transfer if delay */
  131. #define TRANS_STATUS_BREAK_DELAY 2
  132. /* stop at end of spi_transfer if cs_change */
  133. #define TRANS_STATUS_BREAK_CS_CHANGE 4
  134. /* stop if we run out of bytes */
  135. #define TRANS_STATUS_BREAK_NO_BYTES 8
  136. /* events that make us stop filling TX slots */
  137. #define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
  138. TRANS_STATUS_BREAK_DELAY | \
  139. TRANS_STATUS_BREAK_CS_CHANGE)
  140. /* events that make us deassert CS */
  141. #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
  142. TRANS_STATUS_BREAK_CS_CHANGE)
  143. /*
  144. * Used for writing and reading data in the right order
  145. * to TXRAM and RXRAM when used as 32-bit registers respectively
  146. */
  147. #define swap4bytes(__val) \
  148. ((((__val) >> 24) & 0x000000FF) | (((__val) >> 8) & 0x0000FF00) | \
  149. (((__val) << 8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000))
  150. struct bcm_qspi_parms {
  151. u32 speed_hz;
  152. u8 mode;
  153. u8 bits_per_word;
  154. };
  155. struct bcm_xfer_mode {
  156. bool flex_mode;
  157. unsigned int width;
  158. unsigned int addrlen;
  159. unsigned int hp;
  160. };
  161. enum base_type {
  162. MSPI,
  163. BSPI,
  164. CHIP_SELECT,
  165. BASEMAX,
  166. };
  167. enum irq_source {
  168. SINGLE_L2,
  169. MUXED_L1,
  170. };
  171. struct bcm_qspi_irq {
  172. const char *irq_name;
  173. const irq_handler_t irq_handler;
  174. int irq_source;
  175. u32 mask;
  176. };
  177. struct bcm_qspi_dev_id {
  178. const struct bcm_qspi_irq *irqp;
  179. void *dev;
  180. };
  181. struct qspi_trans {
  182. struct spi_transfer *trans;
  183. int byte;
  184. bool mspi_last_trans;
  185. };
  186. struct bcm_qspi {
  187. struct platform_device *pdev;
  188. struct spi_master *master;
  189. struct clk *clk;
  190. u32 base_clk;
  191. u32 max_speed_hz;
  192. void __iomem *base[BASEMAX];
  193. /* Some SoCs provide custom interrupt status register(s) */
  194. struct bcm_qspi_soc_intc *soc_intc;
  195. struct bcm_qspi_parms last_parms;
  196. struct qspi_trans trans_pos;
  197. int curr_cs;
  198. int bspi_maj_rev;
  199. int bspi_min_rev;
  200. int bspi_enabled;
  201. const struct spi_mem_op *bspi_rf_op;
  202. u32 bspi_rf_op_idx;
  203. u32 bspi_rf_op_len;
  204. u32 bspi_rf_op_status;
  205. struct bcm_xfer_mode xfer_mode;
  206. u32 s3_strap_override_ctrl;
  207. bool bspi_mode;
  208. bool big_endian;
  209. int num_irqs;
  210. struct bcm_qspi_dev_id *dev_ids;
  211. struct completion mspi_done;
  212. struct completion bspi_done;
  213. u8 mspi_maj_rev;
  214. u8 mspi_min_rev;
  215. bool mspi_spcr3_sysclk;
  216. };
  217. static inline bool has_bspi(struct bcm_qspi *qspi)
  218. {
  219. return qspi->bspi_mode;
  220. }
  221. /* hardware supports spcr3 and fast baud-rate */
  222. static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
  223. {
  224. if (!has_bspi(qspi) &&
  225. ((qspi->mspi_maj_rev >= 1) &&
  226. (qspi->mspi_min_rev >= 5)))
  227. return true;
  228. return false;
  229. }
  230. /* hardware supports sys clk 108Mhz */
  231. static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
  232. {
  233. if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
  234. ((qspi->mspi_maj_rev >= 1) &&
  235. (qspi->mspi_min_rev >= 6))))
  236. return true;
  237. return false;
  238. }
  239. static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
  240. {
  241. if (bcm_qspi_has_fastbr(qspi))
  242. return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1);
  243. else
  244. return 8;
  245. }
  246. static u32 bcm_qspi_calc_spbr(u32 clk_speed_hz,
  247. const struct bcm_qspi_parms *xp)
  248. {
  249. u32 spbr = 0;
  250. /* SPBR = System Clock/(2 * SCK Baud Rate) */
  251. if (xp->speed_hz)
  252. spbr = clk_speed_hz / (xp->speed_hz * 2);
  253. return spbr;
  254. }
  255. /* Read qspi controller register*/
  256. static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
  257. unsigned int offset)
  258. {
  259. return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
  260. }
  261. /* Write qspi controller register*/
  262. static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
  263. unsigned int offset, unsigned int data)
  264. {
  265. bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
  266. }
  267. /* BSPI helpers */
  268. static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
  269. {
  270. int i;
  271. /* this should normally finish within 10us */
  272. for (i = 0; i < 1000; i++) {
  273. if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
  274. return 0;
  275. udelay(1);
  276. }
  277. dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
  278. return -EIO;
  279. }
  280. static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
  281. {
  282. if (qspi->bspi_maj_rev < 4)
  283. return true;
  284. return false;
  285. }
  286. static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
  287. {
  288. bcm_qspi_bspi_busy_poll(qspi);
  289. /* Force rising edge for the b0/b1 'flush' field */
  290. bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
  291. bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
  292. bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
  293. bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
  294. }
  295. static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
  296. {
  297. return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
  298. BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
  299. }
  300. static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
  301. {
  302. u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
  303. /* BSPI v3 LR is LE only, convert data to host endianness */
  304. if (bcm_qspi_bspi_ver_three(qspi))
  305. data = le32_to_cpu(data);
  306. return data;
  307. }
  308. static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
  309. {
  310. bcm_qspi_bspi_busy_poll(qspi);
  311. bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
  312. BSPI_RAF_CTRL_START_MASK);
  313. }
  314. static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
  315. {
  316. bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
  317. BSPI_RAF_CTRL_CLEAR_MASK);
  318. bcm_qspi_bspi_flush_prefetch_buffers(qspi);
  319. }
  320. static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
  321. {
  322. u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
  323. u32 data = 0;
  324. dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
  325. qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
  326. while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
  327. data = bcm_qspi_bspi_lr_read_fifo(qspi);
  328. if (likely(qspi->bspi_rf_op_len >= 4) &&
  329. IS_ALIGNED((uintptr_t)buf, 4)) {
  330. buf[qspi->bspi_rf_op_idx++] = data;
  331. qspi->bspi_rf_op_len -= 4;
  332. } else {
  333. /* Read out remaining bytes, make sure*/
  334. u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
  335. data = cpu_to_le32(data);
  336. while (qspi->bspi_rf_op_len) {
  337. *cbuf++ = (u8)data;
  338. data >>= 8;
  339. qspi->bspi_rf_op_len--;
  340. }
  341. }
  342. }
  343. }
  344. static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
  345. int bpp, int bpc, int flex_mode)
  346. {
  347. bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
  348. bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
  349. bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
  350. bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
  351. bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
  352. }
  353. static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
  354. const struct spi_mem_op *op, int hp)
  355. {
  356. int bpc = 0, bpp = 0;
  357. u8 command = op->cmd.opcode;
  358. int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
  359. int addrlen = op->addr.nbytes;
  360. int flex_mode = 1;
  361. dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
  362. width, addrlen, hp);
  363. if (addrlen == BSPI_ADDRLEN_4BYTES)
  364. bpp = BSPI_BPP_ADDR_SELECT_MASK;
  365. if (op->dummy.nbytes)
  366. bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
  367. switch (width) {
  368. case SPI_NBITS_SINGLE:
  369. if (addrlen == BSPI_ADDRLEN_3BYTES)
  370. /* default mode, does not need flex_cmd */
  371. flex_mode = 0;
  372. break;
  373. case SPI_NBITS_DUAL:
  374. bpc = 0x00000001;
  375. if (hp) {
  376. bpc |= 0x00010100; /* address and mode are 2-bit */
  377. bpp = BSPI_BPP_MODE_SELECT_MASK;
  378. }
  379. break;
  380. case SPI_NBITS_QUAD:
  381. bpc = 0x00000002;
  382. if (hp) {
  383. bpc |= 0x00020200; /* address and mode are 4-bit */
  384. bpp |= BSPI_BPP_MODE_SELECT_MASK;
  385. }
  386. break;
  387. default:
  388. return -EINVAL;
  389. }
  390. bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
  391. return 0;
  392. }
  393. static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
  394. const struct spi_mem_op *op, int hp)
  395. {
  396. int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
  397. int addrlen = op->addr.nbytes;
  398. u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
  399. dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
  400. width, addrlen, hp);
  401. switch (width) {
  402. case SPI_NBITS_SINGLE:
  403. /* clear quad/dual mode */
  404. data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
  405. BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
  406. break;
  407. case SPI_NBITS_QUAD:
  408. /* clear dual mode and set quad mode */
  409. data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
  410. data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
  411. break;
  412. case SPI_NBITS_DUAL:
  413. /* clear quad mode set dual mode */
  414. data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
  415. data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
  416. break;
  417. default:
  418. return -EINVAL;
  419. }
  420. if (addrlen == BSPI_ADDRLEN_4BYTES)
  421. /* set 4byte mode*/
  422. data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
  423. else
  424. /* clear 4 byte mode */
  425. data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
  426. /* set the override mode */
  427. data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
  428. bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
  429. bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
  430. return 0;
  431. }
  432. static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
  433. const struct spi_mem_op *op, int hp)
  434. {
  435. int error = 0;
  436. int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
  437. int addrlen = op->addr.nbytes;
  438. /* default mode */
  439. qspi->xfer_mode.flex_mode = true;
  440. if (!bcm_qspi_bspi_ver_three(qspi)) {
  441. u32 val, mask;
  442. val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
  443. mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
  444. if (val & mask || qspi->s3_strap_override_ctrl & mask) {
  445. qspi->xfer_mode.flex_mode = false;
  446. bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
  447. error = bcm_qspi_bspi_set_override(qspi, op, hp);
  448. }
  449. }
  450. if (qspi->xfer_mode.flex_mode)
  451. error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
  452. if (error) {
  453. dev_warn(&qspi->pdev->dev,
  454. "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
  455. width, addrlen, hp);
  456. } else if (qspi->xfer_mode.width != width ||
  457. qspi->xfer_mode.addrlen != addrlen ||
  458. qspi->xfer_mode.hp != hp) {
  459. qspi->xfer_mode.width = width;
  460. qspi->xfer_mode.addrlen = addrlen;
  461. qspi->xfer_mode.hp = hp;
  462. dev_dbg(&qspi->pdev->dev,
  463. "cs:%d %d-lane output, %d-byte address%s\n",
  464. qspi->curr_cs,
  465. qspi->xfer_mode.width,
  466. qspi->xfer_mode.addrlen,
  467. qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
  468. }
  469. return error;
  470. }
  471. static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
  472. {
  473. if (!has_bspi(qspi))
  474. return;
  475. qspi->bspi_enabled = 1;
  476. if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
  477. return;
  478. bcm_qspi_bspi_flush_prefetch_buffers(qspi);
  479. udelay(1);
  480. bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
  481. udelay(1);
  482. }
  483. static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
  484. {
  485. if (!has_bspi(qspi))
  486. return;
  487. qspi->bspi_enabled = 0;
  488. if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
  489. return;
  490. bcm_qspi_bspi_busy_poll(qspi);
  491. bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
  492. udelay(1);
  493. }
  494. static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
  495. {
  496. u32 rd = 0;
  497. u32 wr = 0;
  498. if (cs >= 0 && qspi->base[CHIP_SELECT]) {
  499. rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
  500. wr = (rd & ~0xff) | (1 << cs);
  501. if (rd == wr)
  502. return;
  503. bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
  504. usleep_range(10, 20);
  505. }
  506. dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
  507. qspi->curr_cs = cs;
  508. }
  509. static bool bcmspi_parms_did_change(const struct bcm_qspi_parms * const cur,
  510. const struct bcm_qspi_parms * const prev)
  511. {
  512. return (cur->speed_hz != prev->speed_hz) ||
  513. (cur->mode != prev->mode) ||
  514. (cur->bits_per_word != prev->bits_per_word);
  515. }
  516. /* MSPI helpers */
  517. static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
  518. const struct bcm_qspi_parms *xp)
  519. {
  520. u32 spcr, spbr = 0;
  521. if (!bcmspi_parms_did_change(xp, &qspi->last_parms))
  522. return;
  523. if (!qspi->mspi_maj_rev)
  524. /* legacy controller */
  525. spcr = MSPI_MASTER_BIT;
  526. else
  527. spcr = 0;
  528. /*
  529. * Bits per transfer. BITS determines the number of data bits
  530. * transferred if the command control bit (BITSE of a
  531. * CDRAM Register) is equal to 1.
  532. * If CDRAM BITSE is equal to 0, 8 data bits are transferred
  533. * regardless
  534. */
  535. if (xp->bits_per_word != 16 && xp->bits_per_word != 64)
  536. spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT;
  537. spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL);
  538. bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
  539. if (bcm_qspi_has_fastbr(qspi)) {
  540. spcr = 0;
  541. /* enable fastbr */
  542. spcr |= MSPI_SPCR3_FASTBR;
  543. if (xp->mode & SPI_3WIRE)
  544. spcr |= MSPI_SPCR3_HALFDUPLEX | MSPI_SPCR3_HDOUTTYPE;
  545. if (bcm_qspi_has_sysclk_108(qspi)) {
  546. /* check requested baud rate before moving to 108Mhz */
  547. spbr = bcm_qspi_calc_spbr(MSPI_BASE_FREQ * 4, xp);
  548. if (spbr > QSPI_SPBR_MAX) {
  549. /* use SYSCLK_27Mhz for slower baud rates */
  550. spcr &= ~MSPI_SPCR3_SYSCLKSEL_MASK;
  551. qspi->base_clk = MSPI_BASE_FREQ;
  552. } else {
  553. /* SYSCLK_108Mhz */
  554. spcr |= MSPI_SPCR3_SYSCLKSEL_108;
  555. qspi->base_clk = MSPI_BASE_FREQ * 4;
  556. }
  557. }
  558. if (xp->bits_per_word > 16) {
  559. /* data_reg_size 1 (64bit) */
  560. spcr |= MSPI_SPCR3_DATA_REG_SZ;
  561. /* TxRx RAM data access mode 2 for 32B and set fastdt */
  562. spcr |= MSPI_SPCR3_DAM_32BYTE | MSPI_SPCR3_FASTDT;
  563. /*
  564. * Set length of delay after transfer
  565. * DTL from 0(256) to 1
  566. */
  567. bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1);
  568. } else {
  569. /* data_reg_size[8] = 0 */
  570. spcr &= ~(MSPI_SPCR3_DATA_REG_SZ);
  571. /*
  572. * TxRx RAM access mode 8B
  573. * and disable fastdt
  574. */
  575. spcr &= ~(MSPI_SPCR3_DAM_32BYTE);
  576. }
  577. bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
  578. }
  579. /* SCK Baud Rate = System Clock/(2 * SPBR) */
  580. qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
  581. spbr = bcm_qspi_calc_spbr(qspi->base_clk, xp);
  582. spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
  583. bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
  584. qspi->last_parms = *xp;
  585. }
  586. static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
  587. struct spi_device *spi,
  588. struct spi_transfer *trans)
  589. {
  590. struct bcm_qspi_parms xp;
  591. xp.speed_hz = trans->speed_hz;
  592. xp.bits_per_word = trans->bits_per_word;
  593. xp.mode = spi->mode;
  594. bcm_qspi_hw_set_parms(qspi, &xp);
  595. }
  596. static int bcm_qspi_setup(struct spi_device *spi)
  597. {
  598. struct bcm_qspi_parms *xp;
  599. if (spi->bits_per_word > 64)
  600. return -EINVAL;
  601. xp = spi_get_ctldata(spi);
  602. if (!xp) {
  603. xp = kzalloc(sizeof(*xp), GFP_KERNEL);
  604. if (!xp)
  605. return -ENOMEM;
  606. spi_set_ctldata(spi, xp);
  607. }
  608. xp->speed_hz = spi->max_speed_hz;
  609. xp->mode = spi->mode;
  610. if (spi->bits_per_word)
  611. xp->bits_per_word = spi->bits_per_word;
  612. else
  613. xp->bits_per_word = 8;
  614. return 0;
  615. }
  616. static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
  617. struct qspi_trans *qt)
  618. {
  619. if (qt->mspi_last_trans &&
  620. spi_transfer_is_last(qspi->master, qt->trans))
  621. return true;
  622. else
  623. return false;
  624. }
  625. static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
  626. struct qspi_trans *qt, int flags)
  627. {
  628. int ret = TRANS_STATUS_BREAK_NONE;
  629. /* count the last transferred bytes */
  630. if (qt->trans->bits_per_word <= 8)
  631. qt->byte++;
  632. else if (qt->trans->bits_per_word <= 16)
  633. qt->byte += 2;
  634. else if (qt->trans->bits_per_word <= 32)
  635. qt->byte += 4;
  636. else if (qt->trans->bits_per_word <= 64)
  637. qt->byte += 8;
  638. if (qt->byte >= qt->trans->len) {
  639. /* we're at the end of the spi_transfer */
  640. /* in TX mode, need to pause for a delay or CS change */
  641. if (qt->trans->delay.value &&
  642. (flags & TRANS_STATUS_BREAK_DELAY))
  643. ret |= TRANS_STATUS_BREAK_DELAY;
  644. if (qt->trans->cs_change &&
  645. (flags & TRANS_STATUS_BREAK_CS_CHANGE))
  646. ret |= TRANS_STATUS_BREAK_CS_CHANGE;
  647. if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
  648. ret |= TRANS_STATUS_BREAK_EOM;
  649. else
  650. ret |= TRANS_STATUS_BREAK_NO_BYTES;
  651. qt->trans = NULL;
  652. }
  653. dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
  654. qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
  655. return ret;
  656. }
  657. static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
  658. {
  659. u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
  660. /* mask out reserved bits */
  661. return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
  662. }
  663. static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
  664. {
  665. u32 reg_offset = MSPI_RXRAM;
  666. u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
  667. u32 msb_offset = reg_offset + (slot << 3);
  668. return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
  669. ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
  670. }
  671. static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot)
  672. {
  673. u32 reg_offset = MSPI_RXRAM;
  674. u32 offset = reg_offset + (slot << 3);
  675. u32 val;
  676. val = bcm_qspi_read(qspi, MSPI, offset);
  677. val = swap4bytes(val);
  678. return val;
  679. }
  680. static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot)
  681. {
  682. u32 reg_offset = MSPI_RXRAM;
  683. u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
  684. u32 msb_offset = reg_offset + (slot << 3);
  685. u32 msb, lsb;
  686. msb = bcm_qspi_read(qspi, MSPI, msb_offset);
  687. msb = swap4bytes(msb);
  688. lsb = bcm_qspi_read(qspi, MSPI, lsb_offset);
  689. lsb = swap4bytes(lsb);
  690. return ((u64)msb << 32 | lsb);
  691. }
  692. static void read_from_hw(struct bcm_qspi *qspi, int slots)
  693. {
  694. struct qspi_trans tp;
  695. int slot;
  696. bcm_qspi_disable_bspi(qspi);
  697. if (slots > MSPI_NUM_CDRAM) {
  698. /* should never happen */
  699. dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
  700. return;
  701. }
  702. tp = qspi->trans_pos;
  703. for (slot = 0; slot < slots; slot++) {
  704. if (tp.trans->bits_per_word <= 8) {
  705. u8 *buf = tp.trans->rx_buf;
  706. if (buf)
  707. buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
  708. dev_dbg(&qspi->pdev->dev, "RD %02x\n",
  709. buf ? buf[tp.byte] : 0x0);
  710. } else if (tp.trans->bits_per_word <= 16) {
  711. u16 *buf = tp.trans->rx_buf;
  712. if (buf)
  713. buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
  714. slot);
  715. dev_dbg(&qspi->pdev->dev, "RD %04x\n",
  716. buf ? buf[tp.byte / 2] : 0x0);
  717. } else if (tp.trans->bits_per_word <= 32) {
  718. u32 *buf = tp.trans->rx_buf;
  719. if (buf)
  720. buf[tp.byte / 4] = read_rxram_slot_u32(qspi,
  721. slot);
  722. dev_dbg(&qspi->pdev->dev, "RD %08x\n",
  723. buf ? buf[tp.byte / 4] : 0x0);
  724. } else if (tp.trans->bits_per_word <= 64) {
  725. u64 *buf = tp.trans->rx_buf;
  726. if (buf)
  727. buf[tp.byte / 8] = read_rxram_slot_u64(qspi,
  728. slot);
  729. dev_dbg(&qspi->pdev->dev, "RD %llx\n",
  730. buf ? buf[tp.byte / 8] : 0x0);
  731. }
  732. update_qspi_trans_byte_count(qspi, &tp,
  733. TRANS_STATUS_BREAK_NONE);
  734. }
  735. qspi->trans_pos = tp;
  736. }
  737. static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
  738. u8 val)
  739. {
  740. u32 reg_offset = MSPI_TXRAM + (slot << 3);
  741. /* mask out reserved bits */
  742. bcm_qspi_write(qspi, MSPI, reg_offset, val);
  743. }
  744. static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
  745. u16 val)
  746. {
  747. u32 reg_offset = MSPI_TXRAM;
  748. u32 msb_offset = reg_offset + (slot << 3);
  749. u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
  750. bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
  751. bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
  752. }
  753. static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot,
  754. u32 val)
  755. {
  756. u32 reg_offset = MSPI_TXRAM;
  757. u32 msb_offset = reg_offset + (slot << 3);
  758. bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val));
  759. }
  760. static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot,
  761. u64 val)
  762. {
  763. u32 reg_offset = MSPI_TXRAM;
  764. u32 msb_offset = reg_offset + (slot << 3);
  765. u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
  766. u32 msb = upper_32_bits(val);
  767. u32 lsb = lower_32_bits(val);
  768. bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb));
  769. bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb));
  770. }
  771. static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
  772. {
  773. return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
  774. }
  775. static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
  776. {
  777. bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
  778. }
  779. /* Return number of slots written */
  780. static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
  781. {
  782. struct qspi_trans tp;
  783. int slot = 0, tstatus = 0;
  784. u32 mspi_cdram = 0;
  785. bcm_qspi_disable_bspi(qspi);
  786. tp = qspi->trans_pos;
  787. bcm_qspi_update_parms(qspi, spi, tp.trans);
  788. /* Run until end of transfer or reached the max data */
  789. while (!tstatus && slot < MSPI_NUM_CDRAM) {
  790. mspi_cdram = MSPI_CDRAM_CONT_BIT;
  791. if (tp.trans->bits_per_word <= 8) {
  792. const u8 *buf = tp.trans->tx_buf;
  793. u8 val = buf ? buf[tp.byte] : 0x00;
  794. write_txram_slot_u8(qspi, slot, val);
  795. dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
  796. } else if (tp.trans->bits_per_word <= 16) {
  797. const u16 *buf = tp.trans->tx_buf;
  798. u16 val = buf ? buf[tp.byte / 2] : 0x0000;
  799. write_txram_slot_u16(qspi, slot, val);
  800. dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
  801. } else if (tp.trans->bits_per_word <= 32) {
  802. const u32 *buf = tp.trans->tx_buf;
  803. u32 val = buf ? buf[tp.byte/4] : 0x0;
  804. write_txram_slot_u32(qspi, slot, val);
  805. dev_dbg(&qspi->pdev->dev, "WR %08x\n", val);
  806. } else if (tp.trans->bits_per_word <= 64) {
  807. const u64 *buf = tp.trans->tx_buf;
  808. u64 val = (buf ? buf[tp.byte/8] : 0x0);
  809. /* use the length of delay from SPCR1_LSB */
  810. if (bcm_qspi_has_fastbr(qspi))
  811. mspi_cdram |= MSPI_CDRAM_DT_BIT;
  812. write_txram_slot_u64(qspi, slot, val);
  813. dev_dbg(&qspi->pdev->dev, "WR %llx\n", val);
  814. }
  815. mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
  816. MSPI_CDRAM_BITSE_BIT);
  817. /* set 3wrire halfduplex mode data from master to slave */
  818. if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf)
  819. mspi_cdram |= MSPI_CDRAM_OUTP;
  820. if (has_bspi(qspi))
  821. mspi_cdram &= ~1;
  822. else
  823. mspi_cdram |= (~(1 << spi->chip_select) &
  824. MSPI_CDRAM_PCS);
  825. write_cdram_slot(qspi, slot, mspi_cdram);
  826. tstatus = update_qspi_trans_byte_count(qspi, &tp,
  827. TRANS_STATUS_BREAK_TX);
  828. slot++;
  829. }
  830. if (!slot) {
  831. dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
  832. goto done;
  833. }
  834. dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
  835. bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
  836. bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
  837. /*
  838. * case 1) EOM =1, cs_change =0: SSb inactive
  839. * case 2) EOM =1, cs_change =1: SSb stay active
  840. * case 3) EOM =0, cs_change =0: SSb stay active
  841. * case 4) EOM =0, cs_change =1: SSb inactive
  842. */
  843. if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
  844. == TRANS_STATUS_BREAK_CS_CHANGE) ||
  845. ((tstatus & TRANS_STATUS_BREAK_DESELECT)
  846. == TRANS_STATUS_BREAK_EOM)) {
  847. mspi_cdram = read_cdram_slot(qspi, slot - 1) &
  848. ~MSPI_CDRAM_CONT_BIT;
  849. write_cdram_slot(qspi, slot - 1, mspi_cdram);
  850. }
  851. if (has_bspi(qspi))
  852. bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
  853. /* Must flush previous writes before starting MSPI operation */
  854. mb();
  855. /* Set cont | spe | spifie */
  856. bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
  857. done:
  858. return slot;
  859. }
  860. static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
  861. const struct spi_mem_op *op)
  862. {
  863. struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
  864. u32 addr = 0, len, rdlen, len_words, from = 0;
  865. int ret = 0;
  866. unsigned long timeo = msecs_to_jiffies(100);
  867. struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
  868. if (bcm_qspi_bspi_ver_three(qspi))
  869. if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
  870. return -EIO;
  871. from = op->addr.val;
  872. if (!spi->cs_gpiod)
  873. bcm_qspi_chip_select(qspi, spi->chip_select);
  874. bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
  875. /*
  876. * when using flex mode we need to send
  877. * the upper address byte to bspi
  878. */
  879. if (!bcm_qspi_bspi_ver_three(qspi)) {
  880. addr = from & 0xff000000;
  881. bcm_qspi_write(qspi, BSPI,
  882. BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
  883. }
  884. if (!qspi->xfer_mode.flex_mode)
  885. addr = from;
  886. else
  887. addr = from & 0x00ffffff;
  888. if (bcm_qspi_bspi_ver_three(qspi) == true)
  889. addr = (addr + 0xc00000) & 0xffffff;
  890. /*
  891. * read into the entire buffer by breaking the reads
  892. * into RAF buffer read lengths
  893. */
  894. len = op->data.nbytes;
  895. qspi->bspi_rf_op_idx = 0;
  896. do {
  897. if (len > BSPI_READ_LENGTH)
  898. rdlen = BSPI_READ_LENGTH;
  899. else
  900. rdlen = len;
  901. reinit_completion(&qspi->bspi_done);
  902. bcm_qspi_enable_bspi(qspi);
  903. len_words = (rdlen + 3) >> 2;
  904. qspi->bspi_rf_op = op;
  905. qspi->bspi_rf_op_status = 0;
  906. qspi->bspi_rf_op_len = rdlen;
  907. dev_dbg(&qspi->pdev->dev,
  908. "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
  909. bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
  910. bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
  911. bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
  912. if (qspi->soc_intc) {
  913. /*
  914. * clear soc MSPI and BSPI interrupts and enable
  915. * BSPI interrupts.
  916. */
  917. soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
  918. soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
  919. }
  920. /* Must flush previous writes before starting BSPI operation */
  921. mb();
  922. bcm_qspi_bspi_lr_start(qspi);
  923. if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
  924. dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
  925. ret = -ETIMEDOUT;
  926. break;
  927. }
  928. /* set msg return length */
  929. addr += rdlen;
  930. len -= rdlen;
  931. } while (len);
  932. return ret;
  933. }
  934. static int bcm_qspi_transfer_one(struct spi_master *master,
  935. struct spi_device *spi,
  936. struct spi_transfer *trans)
  937. {
  938. struct bcm_qspi *qspi = spi_master_get_devdata(master);
  939. int slots;
  940. unsigned long timeo = msecs_to_jiffies(100);
  941. if (!spi->cs_gpiod)
  942. bcm_qspi_chip_select(qspi, spi->chip_select);
  943. qspi->trans_pos.trans = trans;
  944. qspi->trans_pos.byte = 0;
  945. while (qspi->trans_pos.byte < trans->len) {
  946. reinit_completion(&qspi->mspi_done);
  947. slots = write_to_hw(qspi, spi);
  948. if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
  949. dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
  950. return -ETIMEDOUT;
  951. }
  952. read_from_hw(qspi, slots);
  953. }
  954. bcm_qspi_enable_bspi(qspi);
  955. return 0;
  956. }
  957. static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
  958. const struct spi_mem_op *op)
  959. {
  960. struct spi_master *master = spi->master;
  961. struct bcm_qspi *qspi = spi_master_get_devdata(master);
  962. struct spi_transfer t[2];
  963. u8 cmd[6] = { };
  964. int ret, i;
  965. memset(cmd, 0, sizeof(cmd));
  966. memset(t, 0, sizeof(t));
  967. /* tx */
  968. /* opcode is in cmd[0] */
  969. cmd[0] = op->cmd.opcode;
  970. for (i = 0; i < op->addr.nbytes; i++)
  971. cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
  972. t[0].tx_buf = cmd;
  973. t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
  974. t[0].bits_per_word = spi->bits_per_word;
  975. t[0].tx_nbits = op->cmd.buswidth;
  976. /* lets mspi know that this is not last transfer */
  977. qspi->trans_pos.mspi_last_trans = false;
  978. ret = bcm_qspi_transfer_one(master, spi, &t[0]);
  979. /* rx */
  980. qspi->trans_pos.mspi_last_trans = true;
  981. if (!ret) {
  982. /* rx */
  983. t[1].rx_buf = op->data.buf.in;
  984. t[1].len = op->data.nbytes;
  985. t[1].rx_nbits = op->data.buswidth;
  986. t[1].bits_per_word = spi->bits_per_word;
  987. ret = bcm_qspi_transfer_one(master, spi, &t[1]);
  988. }
  989. return ret;
  990. }
  991. static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
  992. const struct spi_mem_op *op)
  993. {
  994. struct spi_device *spi = mem->spi;
  995. struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
  996. int ret = 0;
  997. bool mspi_read = false;
  998. u32 addr = 0, len;
  999. u_char *buf;
  1000. if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
  1001. op->data.dir != SPI_MEM_DATA_IN)
  1002. return -ENOTSUPP;
  1003. buf = op->data.buf.in;
  1004. addr = op->addr.val;
  1005. len = op->data.nbytes;
  1006. if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
  1007. /*
  1008. * The address coming into this function is a raw flash offset.
  1009. * But for BSPI <= V3, we need to convert it to a remapped BSPI
  1010. * address. If it crosses a 4MB boundary, just revert back to
  1011. * using MSPI.
  1012. */
  1013. addr = (addr + 0xc00000) & 0xffffff;
  1014. if ((~ADDR_4MB_MASK & addr) ^
  1015. (~ADDR_4MB_MASK & (addr + len - 1)))
  1016. mspi_read = true;
  1017. }
  1018. /* non-aligned and very short transfers are handled by MSPI */
  1019. if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
  1020. len < 4)
  1021. mspi_read = true;
  1022. if (!has_bspi(qspi) || mspi_read)
  1023. return bcm_qspi_mspi_exec_mem_op(spi, op);
  1024. ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
  1025. if (!ret)
  1026. ret = bcm_qspi_bspi_exec_mem_op(spi, op);
  1027. return ret;
  1028. }
  1029. static void bcm_qspi_cleanup(struct spi_device *spi)
  1030. {
  1031. struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
  1032. kfree(xp);
  1033. }
  1034. static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
  1035. {
  1036. struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
  1037. struct bcm_qspi *qspi = qspi_dev_id->dev;
  1038. u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
  1039. if (status & MSPI_MSPI_STATUS_SPIF) {
  1040. struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
  1041. /* clear interrupt */
  1042. status &= ~MSPI_MSPI_STATUS_SPIF;
  1043. bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
  1044. if (qspi->soc_intc)
  1045. soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
  1046. complete(&qspi->mspi_done);
  1047. return IRQ_HANDLED;
  1048. }
  1049. return IRQ_NONE;
  1050. }
  1051. static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
  1052. {
  1053. struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
  1054. struct bcm_qspi *qspi = qspi_dev_id->dev;
  1055. struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
  1056. u32 status = qspi_dev_id->irqp->mask;
  1057. if (qspi->bspi_enabled && qspi->bspi_rf_op) {
  1058. bcm_qspi_bspi_lr_data_read(qspi);
  1059. if (qspi->bspi_rf_op_len == 0) {
  1060. qspi->bspi_rf_op = NULL;
  1061. if (qspi->soc_intc) {
  1062. /* disable soc BSPI interrupt */
  1063. soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
  1064. false);
  1065. /* indicate done */
  1066. status = INTR_BSPI_LR_SESSION_DONE_MASK;
  1067. }
  1068. if (qspi->bspi_rf_op_status)
  1069. bcm_qspi_bspi_lr_clear(qspi);
  1070. else
  1071. bcm_qspi_bspi_flush_prefetch_buffers(qspi);
  1072. }
  1073. if (qspi->soc_intc)
  1074. /* clear soc BSPI interrupt */
  1075. soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
  1076. }
  1077. status &= INTR_BSPI_LR_SESSION_DONE_MASK;
  1078. if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
  1079. complete(&qspi->bspi_done);
  1080. return IRQ_HANDLED;
  1081. }
  1082. static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
  1083. {
  1084. struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
  1085. struct bcm_qspi *qspi = qspi_dev_id->dev;
  1086. struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
  1087. dev_err(&qspi->pdev->dev, "BSPI INT error\n");
  1088. qspi->bspi_rf_op_status = -EIO;
  1089. if (qspi->soc_intc)
  1090. /* clear soc interrupt */
  1091. soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
  1092. complete(&qspi->bspi_done);
  1093. return IRQ_HANDLED;
  1094. }
  1095. static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
  1096. {
  1097. struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
  1098. struct bcm_qspi *qspi = qspi_dev_id->dev;
  1099. struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
  1100. irqreturn_t ret = IRQ_NONE;
  1101. if (soc_intc) {
  1102. u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
  1103. if (status & MSPI_DONE)
  1104. ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
  1105. else if (status & BSPI_DONE)
  1106. ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
  1107. else if (status & BSPI_ERR)
  1108. ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
  1109. }
  1110. return ret;
  1111. }
  1112. static const struct bcm_qspi_irq qspi_irq_tab[] = {
  1113. {
  1114. .irq_name = "spi_lr_fullness_reached",
  1115. .irq_handler = bcm_qspi_bspi_lr_l2_isr,
  1116. .mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
  1117. },
  1118. {
  1119. .irq_name = "spi_lr_session_aborted",
  1120. .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
  1121. .mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
  1122. },
  1123. {
  1124. .irq_name = "spi_lr_impatient",
  1125. .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
  1126. .mask = INTR_BSPI_LR_IMPATIENT_MASK,
  1127. },
  1128. {
  1129. .irq_name = "spi_lr_session_done",
  1130. .irq_handler = bcm_qspi_bspi_lr_l2_isr,
  1131. .mask = INTR_BSPI_LR_SESSION_DONE_MASK,
  1132. },
  1133. #ifdef QSPI_INT_DEBUG
  1134. /* this interrupt is for debug purposes only, dont request irq */
  1135. {
  1136. .irq_name = "spi_lr_overread",
  1137. .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
  1138. .mask = INTR_BSPI_LR_OVERREAD_MASK,
  1139. },
  1140. #endif
  1141. {
  1142. .irq_name = "mspi_done",
  1143. .irq_handler = bcm_qspi_mspi_l2_isr,
  1144. .mask = INTR_MSPI_DONE_MASK,
  1145. },
  1146. {
  1147. .irq_name = "mspi_halted",
  1148. .irq_handler = bcm_qspi_mspi_l2_isr,
  1149. .mask = INTR_MSPI_HALTED_MASK,
  1150. },
  1151. {
  1152. /* single muxed L1 interrupt source */
  1153. .irq_name = "spi_l1_intr",
  1154. .irq_handler = bcm_qspi_l1_isr,
  1155. .irq_source = MUXED_L1,
  1156. .mask = QSPI_INTERRUPTS_ALL,
  1157. },
  1158. };
  1159. static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
  1160. {
  1161. u32 val = 0;
  1162. val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
  1163. qspi->bspi_maj_rev = (val >> 8) & 0xff;
  1164. qspi->bspi_min_rev = val & 0xff;
  1165. if (!(bcm_qspi_bspi_ver_three(qspi))) {
  1166. /* Force mapping of BSPI address -> flash offset */
  1167. bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
  1168. bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
  1169. }
  1170. qspi->bspi_enabled = 1;
  1171. bcm_qspi_disable_bspi(qspi);
  1172. bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
  1173. bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
  1174. }
  1175. static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
  1176. {
  1177. struct bcm_qspi_parms parms;
  1178. bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
  1179. bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
  1180. bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
  1181. bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
  1182. bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
  1183. parms.mode = SPI_MODE_3;
  1184. parms.bits_per_word = 8;
  1185. parms.speed_hz = qspi->max_speed_hz;
  1186. bcm_qspi_hw_set_parms(qspi, &parms);
  1187. if (has_bspi(qspi))
  1188. bcm_qspi_bspi_init(qspi);
  1189. }
  1190. static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
  1191. {
  1192. u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
  1193. bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
  1194. if (has_bspi(qspi))
  1195. bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
  1196. /* clear interrupt */
  1197. bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status & ~1);
  1198. }
  1199. static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
  1200. .exec_op = bcm_qspi_exec_mem_op,
  1201. };
  1202. struct bcm_qspi_data {
  1203. bool has_mspi_rev;
  1204. bool has_spcr3_sysclk;
  1205. };
  1206. static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
  1207. .has_mspi_rev = false,
  1208. .has_spcr3_sysclk = false,
  1209. };
  1210. static const struct bcm_qspi_data bcm_qspi_rev_data = {
  1211. .has_mspi_rev = true,
  1212. .has_spcr3_sysclk = false,
  1213. };
  1214. static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
  1215. .has_mspi_rev = true,
  1216. .has_spcr3_sysclk = true,
  1217. };
  1218. static const struct of_device_id bcm_qspi_of_match[] = {
  1219. {
  1220. .compatible = "brcm,spi-bcm7445-qspi",
  1221. .data = &bcm_qspi_rev_data,
  1222. },
  1223. {
  1224. .compatible = "brcm,spi-bcm-qspi",
  1225. .data = &bcm_qspi_no_rev_data,
  1226. },
  1227. {
  1228. .compatible = "brcm,spi-bcm7216-qspi",
  1229. .data = &bcm_qspi_spcr3_data,
  1230. },
  1231. {
  1232. .compatible = "brcm,spi-bcm7278-qspi",
  1233. .data = &bcm_qspi_spcr3_data,
  1234. },
  1235. {},
  1236. };
  1237. MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
  1238. int bcm_qspi_probe(struct platform_device *pdev,
  1239. struct bcm_qspi_soc_intc *soc_intc)
  1240. {
  1241. const struct of_device_id *of_id = NULL;
  1242. const struct bcm_qspi_data *data;
  1243. struct device *dev = &pdev->dev;
  1244. struct bcm_qspi *qspi;
  1245. struct spi_master *master;
  1246. struct resource *res;
  1247. int irq, ret = 0, num_ints = 0;
  1248. u32 val;
  1249. u32 rev = 0;
  1250. const char *name = NULL;
  1251. int num_irqs = ARRAY_SIZE(qspi_irq_tab);
  1252. /* We only support device-tree instantiation */
  1253. if (!dev->of_node)
  1254. return -ENODEV;
  1255. of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
  1256. if (!of_id)
  1257. return -ENODEV;
  1258. data = of_id->data;
  1259. master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
  1260. if (!master) {
  1261. dev_err(dev, "error allocating spi_master\n");
  1262. return -ENOMEM;
  1263. }
  1264. qspi = spi_master_get_devdata(master);
  1265. qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
  1266. if (IS_ERR(qspi->clk))
  1267. return PTR_ERR(qspi->clk);
  1268. qspi->pdev = pdev;
  1269. qspi->trans_pos.trans = NULL;
  1270. qspi->trans_pos.byte = 0;
  1271. qspi->trans_pos.mspi_last_trans = true;
  1272. qspi->master = master;
  1273. master->bus_num = -1;
  1274. master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
  1275. SPI_3WIRE;
  1276. master->setup = bcm_qspi_setup;
  1277. master->transfer_one = bcm_qspi_transfer_one;
  1278. master->mem_ops = &bcm_qspi_mem_ops;
  1279. master->cleanup = bcm_qspi_cleanup;
  1280. master->dev.of_node = dev->of_node;
  1281. master->num_chipselect = NUM_CHIPSELECT;
  1282. master->use_gpio_descriptors = true;
  1283. qspi->big_endian = of_device_is_big_endian(dev->of_node);
  1284. if (!of_property_read_u32(dev->of_node, "num-cs", &val))
  1285. master->num_chipselect = val;
  1286. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
  1287. if (!res)
  1288. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1289. "mspi");
  1290. qspi->base[MSPI] = devm_ioremap_resource(dev, res);
  1291. if (IS_ERR(qspi->base[MSPI]))
  1292. return PTR_ERR(qspi->base[MSPI]);
  1293. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
  1294. if (res) {
  1295. qspi->base[BSPI] = devm_ioremap_resource(dev, res);
  1296. if (IS_ERR(qspi->base[BSPI]))
  1297. return PTR_ERR(qspi->base[BSPI]);
  1298. qspi->bspi_mode = true;
  1299. } else {
  1300. qspi->bspi_mode = false;
  1301. }
  1302. dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
  1303. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
  1304. if (res) {
  1305. qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
  1306. if (IS_ERR(qspi->base[CHIP_SELECT]))
  1307. return PTR_ERR(qspi->base[CHIP_SELECT]);
  1308. }
  1309. qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
  1310. GFP_KERNEL);
  1311. if (!qspi->dev_ids)
  1312. return -ENOMEM;
  1313. /*
  1314. * Some SoCs integrate spi controller (e.g., its interrupt bits)
  1315. * in specific ways
  1316. */
  1317. if (soc_intc) {
  1318. qspi->soc_intc = soc_intc;
  1319. soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
  1320. } else {
  1321. qspi->soc_intc = NULL;
  1322. }
  1323. if (qspi->clk) {
  1324. ret = clk_prepare_enable(qspi->clk);
  1325. if (ret) {
  1326. dev_err(dev, "failed to prepare clock\n");
  1327. goto qspi_probe_err;
  1328. }
  1329. qspi->base_clk = clk_get_rate(qspi->clk);
  1330. } else {
  1331. qspi->base_clk = MSPI_BASE_FREQ;
  1332. }
  1333. if (data->has_mspi_rev) {
  1334. rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
  1335. /* some older revs do not have a MSPI_REV register */
  1336. if ((rev & 0xff) == 0xff)
  1337. rev = 0;
  1338. }
  1339. qspi->mspi_maj_rev = (rev >> 4) & 0xf;
  1340. qspi->mspi_min_rev = rev & 0xf;
  1341. qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
  1342. qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
  1343. /*
  1344. * On SW resets it is possible to have the mask still enabled
  1345. * Need to disable the mask and clear the status while we init
  1346. */
  1347. bcm_qspi_hw_uninit(qspi);
  1348. for (val = 0; val < num_irqs; val++) {
  1349. irq = -1;
  1350. name = qspi_irq_tab[val].irq_name;
  1351. if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
  1352. /* get the l2 interrupts */
  1353. irq = platform_get_irq_byname_optional(pdev, name);
  1354. } else if (!num_ints && soc_intc) {
  1355. /* all mspi, bspi intrs muxed to one L1 intr */
  1356. irq = platform_get_irq(pdev, 0);
  1357. }
  1358. if (irq >= 0) {
  1359. ret = devm_request_irq(&pdev->dev, irq,
  1360. qspi_irq_tab[val].irq_handler, 0,
  1361. name,
  1362. &qspi->dev_ids[val]);
  1363. if (ret < 0) {
  1364. dev_err(&pdev->dev, "IRQ %s not found\n", name);
  1365. goto qspi_unprepare_err;
  1366. }
  1367. qspi->dev_ids[val].dev = qspi;
  1368. qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
  1369. num_ints++;
  1370. dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
  1371. qspi_irq_tab[val].irq_name,
  1372. irq);
  1373. }
  1374. }
  1375. if (!num_ints) {
  1376. dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
  1377. ret = -EINVAL;
  1378. goto qspi_unprepare_err;
  1379. }
  1380. bcm_qspi_hw_init(qspi);
  1381. init_completion(&qspi->mspi_done);
  1382. init_completion(&qspi->bspi_done);
  1383. qspi->curr_cs = -1;
  1384. platform_set_drvdata(pdev, qspi);
  1385. qspi->xfer_mode.width = -1;
  1386. qspi->xfer_mode.addrlen = -1;
  1387. qspi->xfer_mode.hp = -1;
  1388. ret = spi_register_master(master);
  1389. if (ret < 0) {
  1390. dev_err(dev, "can't register master\n");
  1391. goto qspi_reg_err;
  1392. }
  1393. return 0;
  1394. qspi_reg_err:
  1395. bcm_qspi_hw_uninit(qspi);
  1396. qspi_unprepare_err:
  1397. clk_disable_unprepare(qspi->clk);
  1398. qspi_probe_err:
  1399. kfree(qspi->dev_ids);
  1400. return ret;
  1401. }
  1402. /* probe function to be called by SoC specific platform driver probe */
  1403. EXPORT_SYMBOL_GPL(bcm_qspi_probe);
  1404. int bcm_qspi_remove(struct platform_device *pdev)
  1405. {
  1406. struct bcm_qspi *qspi = platform_get_drvdata(pdev);
  1407. spi_unregister_master(qspi->master);
  1408. bcm_qspi_hw_uninit(qspi);
  1409. clk_disable_unprepare(qspi->clk);
  1410. kfree(qspi->dev_ids);
  1411. return 0;
  1412. }
  1413. /* function to be called by SoC specific platform driver remove() */
  1414. EXPORT_SYMBOL_GPL(bcm_qspi_remove);
  1415. static int __maybe_unused bcm_qspi_suspend(struct device *dev)
  1416. {
  1417. struct bcm_qspi *qspi = dev_get_drvdata(dev);
  1418. /* store the override strap value */
  1419. if (!bcm_qspi_bspi_ver_three(qspi))
  1420. qspi->s3_strap_override_ctrl =
  1421. bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
  1422. spi_master_suspend(qspi->master);
  1423. clk_disable_unprepare(qspi->clk);
  1424. bcm_qspi_hw_uninit(qspi);
  1425. return 0;
  1426. };
  1427. static int __maybe_unused bcm_qspi_resume(struct device *dev)
  1428. {
  1429. struct bcm_qspi *qspi = dev_get_drvdata(dev);
  1430. int ret = 0;
  1431. bcm_qspi_hw_init(qspi);
  1432. bcm_qspi_chip_select(qspi, qspi->curr_cs);
  1433. if (qspi->soc_intc)
  1434. /* enable MSPI interrupt */
  1435. qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
  1436. true);
  1437. ret = clk_prepare_enable(qspi->clk);
  1438. if (!ret)
  1439. spi_master_resume(qspi->master);
  1440. return ret;
  1441. }
  1442. SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
  1443. /* pm_ops to be called by SoC specific platform driver */
  1444. EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
  1445. MODULE_AUTHOR("Kamal Dasu");
  1446. MODULE_DESCRIPTION("Broadcom QSPI driver");
  1447. MODULE_LICENSE("GPL v2");
  1448. MODULE_ALIAS("platform:" DRIVER_NAME);