spi-tegra210-quad.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. //
  3. // Copyright (C) 2020 NVIDIA CORPORATION.
  4. #include <linux/clk.h>
  5. #include <linux/completion.h>
  6. #include <linux/delay.h>
  7. #include <linux/dmaengine.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/dmapool.h>
  10. #include <linux/err.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/iopoll.h>
  14. #include <linux/kernel.h>
  15. #include <linux/kthread.h>
  16. #include <linux/module.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/of.h>
  20. #include <linux/of_device.h>
  21. #include <linux/reset.h>
  22. #include <linux/spi/spi.h>
  23. #include <linux/acpi.h>
  24. #include <linux/property.h>
  25. #define QSPI_COMMAND1 0x000
  26. #define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
  27. #define QSPI_PACKED BIT(5)
  28. #define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7)
  29. #define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7)
  30. #define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0)
  31. #define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1)
  32. #define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2)
  33. #define QSPI_SDR_DDR_SEL BIT(9)
  34. #define QSPI_TX_EN BIT(11)
  35. #define QSPI_RX_EN BIT(12)
  36. #define QSPI_CS_SW_VAL BIT(20)
  37. #define QSPI_CS_SW_HW BIT(21)
  38. #define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
  39. #define QSPI_CS_POL_INACTIVE_MASK (0xF << 22)
  40. #define QSPI_CS_SEL_0 (0 << 26)
  41. #define QSPI_CS_SEL_1 (1 << 26)
  42. #define QSPI_CS_SEL_2 (2 << 26)
  43. #define QSPI_CS_SEL_3 (3 << 26)
  44. #define QSPI_CS_SEL_MASK (3 << 26)
  45. #define QSPI_CS_SEL(x) (((x) & 0x3) << 26)
  46. #define QSPI_CONTROL_MODE_0 (0 << 28)
  47. #define QSPI_CONTROL_MODE_3 (3 << 28)
  48. #define QSPI_CONTROL_MODE_MASK (3 << 28)
  49. #define QSPI_M_S BIT(30)
  50. #define QSPI_PIO BIT(31)
  51. #define QSPI_COMMAND2 0x004
  52. #define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10)
  53. #define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0)
  54. #define QSPI_CS_TIMING1 0x008
  55. #define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
  56. #define QSPI_CS_TIMING2 0x00c
  57. #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0)
  58. #define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5)
  59. #define QSPI_TRANS_STATUS 0x010
  60. #define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff)
  61. #define QSPI_RDY BIT(30)
  62. #define QSPI_FIFO_STATUS 0x014
  63. #define QSPI_RX_FIFO_EMPTY BIT(0)
  64. #define QSPI_RX_FIFO_FULL BIT(1)
  65. #define QSPI_TX_FIFO_EMPTY BIT(2)
  66. #define QSPI_TX_FIFO_FULL BIT(3)
  67. #define QSPI_RX_FIFO_UNF BIT(4)
  68. #define QSPI_RX_FIFO_OVF BIT(5)
  69. #define QSPI_TX_FIFO_UNF BIT(6)
  70. #define QSPI_TX_FIFO_OVF BIT(7)
  71. #define QSPI_ERR BIT(8)
  72. #define QSPI_TX_FIFO_FLUSH BIT(14)
  73. #define QSPI_RX_FIFO_FLUSH BIT(15)
  74. #define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f)
  75. #define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f)
  76. #define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \
  77. QSPI_RX_FIFO_OVF | \
  78. QSPI_TX_FIFO_UNF | \
  79. QSPI_TX_FIFO_OVF)
  80. #define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \
  81. QSPI_TX_FIFO_EMPTY)
  82. #define QSPI_TX_DATA 0x018
  83. #define QSPI_RX_DATA 0x01c
  84. #define QSPI_DMA_CTL 0x020
  85. #define QSPI_TX_TRIG(n) (((n) & 0x3) << 15)
  86. #define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0)
  87. #define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1)
  88. #define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2)
  89. #define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3)
  90. #define QSPI_RX_TRIG(n) (((n) & 0x3) << 19)
  91. #define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0)
  92. #define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1)
  93. #define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2)
  94. #define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3)
  95. #define QSPI_DMA_EN BIT(31)
  96. #define QSPI_DMA_BLK 0x024
  97. #define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
  98. #define QSPI_TX_FIFO 0x108
  99. #define QSPI_RX_FIFO 0x188
  100. #define QSPI_FIFO_DEPTH 64
  101. #define QSPI_INTR_MASK 0x18c
  102. #define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25)
  103. #define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26)
  104. #define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27)
  105. #define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28)
  106. #define QSPI_INTR_RDY_MASK BIT(29)
  107. #define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \
  108. QSPI_INTR_RX_FIFO_OVF_MASK | \
  109. QSPI_INTR_TX_FIFO_UNF_MASK | \
  110. QSPI_INTR_TX_FIFO_OVF_MASK)
  111. #define QSPI_MISC_REG 0x194
  112. #define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
  113. #define QSPI_DUMMY_CYCLES_MAX 0xff
  114. #define QSPI_CMB_SEQ_CMD 0x19c
  115. #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0)
  116. #define QSPI_CMB_SEQ_CMD_CFG 0x1a0
  117. #define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13)
  118. #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13)
  119. #define QSPI_COMMAND_SDR_DDR BIT(12)
  120. #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0)
  121. #define QSPI_GLOBAL_CONFIG 0X1a4
  122. #define QSPI_CMB_SEQ_EN BIT(0)
  123. #define QSPI_CMB_SEQ_ADDR 0x1a8
  124. #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0)
  125. #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac
  126. #define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13)
  127. #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13)
  128. #define QSPI_ADDRESS_SDR_DDR BIT(12)
  129. #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0)
  130. #define DATA_DIR_TX BIT(0)
  131. #define DATA_DIR_RX BIT(1)
  132. #define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
  133. #define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
  134. #define CMD_TRANSFER 0
  135. #define ADDR_TRANSFER 1
  136. #define DATA_TRANSFER 2
  137. struct tegra_qspi_soc_data {
  138. bool has_dma;
  139. bool cmb_xfer_capable;
  140. unsigned int cs_count;
  141. };
  142. struct tegra_qspi_client_data {
  143. int tx_clk_tap_delay;
  144. int rx_clk_tap_delay;
  145. };
  146. struct tegra_qspi {
  147. struct device *dev;
  148. struct spi_master *master;
  149. /* lock to protect data accessed by irq */
  150. spinlock_t lock;
  151. struct clk *clk;
  152. void __iomem *base;
  153. phys_addr_t phys;
  154. unsigned int irq;
  155. u32 cur_speed;
  156. unsigned int cur_pos;
  157. unsigned int words_per_32bit;
  158. unsigned int bytes_per_word;
  159. unsigned int curr_dma_words;
  160. unsigned int cur_direction;
  161. unsigned int cur_rx_pos;
  162. unsigned int cur_tx_pos;
  163. unsigned int dma_buf_size;
  164. unsigned int max_buf_size;
  165. bool is_curr_dma_xfer;
  166. struct completion rx_dma_complete;
  167. struct completion tx_dma_complete;
  168. u32 tx_status;
  169. u32 rx_status;
  170. u32 status_reg;
  171. bool is_packed;
  172. bool use_dma;
  173. u32 command1_reg;
  174. u32 dma_control_reg;
  175. u32 def_command1_reg;
  176. u32 def_command2_reg;
  177. u32 spi_cs_timing1;
  178. u32 spi_cs_timing2;
  179. u8 dummy_cycles;
  180. struct completion xfer_completion;
  181. struct spi_transfer *curr_xfer;
  182. struct dma_chan *rx_dma_chan;
  183. u32 *rx_dma_buf;
  184. dma_addr_t rx_dma_phys;
  185. struct dma_async_tx_descriptor *rx_dma_desc;
  186. struct dma_chan *tx_dma_chan;
  187. u32 *tx_dma_buf;
  188. dma_addr_t tx_dma_phys;
  189. struct dma_async_tx_descriptor *tx_dma_desc;
  190. const struct tegra_qspi_soc_data *soc_data;
  191. };
  192. static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
  193. {
  194. return readl(tqspi->base + offset);
  195. }
  196. static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
  197. {
  198. writel(value, tqspi->base + offset);
  199. /* read back register to make sure that register writes completed */
  200. if (offset != QSPI_TX_FIFO)
  201. readl(tqspi->base + QSPI_COMMAND1);
  202. }
  203. static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
  204. {
  205. u32 value;
  206. /* write 1 to clear status register */
  207. value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
  208. tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
  209. value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
  210. if (!(value & QSPI_INTR_RDY_MASK)) {
  211. value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
  212. tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
  213. }
  214. /* clear fifo status error if any */
  215. value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
  216. if (value & QSPI_ERR)
  217. tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
  218. }
  219. static unsigned int
  220. tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
  221. {
  222. unsigned int max_word, max_len, total_fifo_words;
  223. unsigned int remain_len = t->len - tqspi->cur_pos;
  224. unsigned int bits_per_word = t->bits_per_word;
  225. tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
  226. /*
  227. * Tegra QSPI controller supports packed or unpacked mode transfers.
  228. * Packed mode is used for data transfers using 8, 16, or 32 bits per
  229. * word with a minimum transfer of 1 word and for all other transfers
  230. * unpacked mode will be used.
  231. */
  232. if ((bits_per_word == 8 || bits_per_word == 16 ||
  233. bits_per_word == 32) && t->len > 3) {
  234. tqspi->is_packed = true;
  235. tqspi->words_per_32bit = 32 / bits_per_word;
  236. } else {
  237. tqspi->is_packed = false;
  238. tqspi->words_per_32bit = 1;
  239. }
  240. if (tqspi->is_packed) {
  241. max_len = min(remain_len, tqspi->max_buf_size);
  242. tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
  243. total_fifo_words = (max_len + 3) / 4;
  244. } else {
  245. max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
  246. max_word = min(max_word, tqspi->max_buf_size / 4);
  247. tqspi->curr_dma_words = max_word;
  248. total_fifo_words = max_word;
  249. }
  250. return total_fifo_words;
  251. }
  252. static unsigned int
  253. tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
  254. {
  255. unsigned int written_words, fifo_words_left, count;
  256. unsigned int len, tx_empty_count, max_n_32bit, i;
  257. u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
  258. u32 fifo_status;
  259. fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
  260. tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
  261. if (tqspi->is_packed) {
  262. fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
  263. written_words = min(fifo_words_left, tqspi->curr_dma_words);
  264. len = written_words * tqspi->bytes_per_word;
  265. max_n_32bit = DIV_ROUND_UP(len, 4);
  266. for (count = 0; count < max_n_32bit; count++) {
  267. u32 x = 0;
  268. for (i = 0; (i < 4) && len; i++, len--)
  269. x |= (u32)(*tx_buf++) << (i * 8);
  270. tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
  271. }
  272. tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
  273. } else {
  274. unsigned int write_bytes;
  275. u8 bytes_per_word = tqspi->bytes_per_word;
  276. max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
  277. written_words = max_n_32bit;
  278. len = written_words * tqspi->bytes_per_word;
  279. if (len > t->len - tqspi->cur_pos)
  280. len = t->len - tqspi->cur_pos;
  281. write_bytes = len;
  282. for (count = 0; count < max_n_32bit; count++) {
  283. u32 x = 0;
  284. for (i = 0; len && (i < bytes_per_word); i++, len--)
  285. x |= (u32)(*tx_buf++) << (i * 8);
  286. tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
  287. }
  288. tqspi->cur_tx_pos += write_bytes;
  289. }
  290. return written_words;
  291. }
  292. static unsigned int
  293. tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
  294. {
  295. u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
  296. unsigned int len, rx_full_count, count, i;
  297. unsigned int read_words = 0;
  298. u32 fifo_status, x;
  299. fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
  300. rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
  301. if (tqspi->is_packed) {
  302. len = tqspi->curr_dma_words * tqspi->bytes_per_word;
  303. for (count = 0; count < rx_full_count; count++) {
  304. x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
  305. for (i = 0; len && (i < 4); i++, len--)
  306. *rx_buf++ = (x >> i * 8) & 0xff;
  307. }
  308. read_words += tqspi->curr_dma_words;
  309. tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
  310. } else {
  311. u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
  312. u8 bytes_per_word = tqspi->bytes_per_word;
  313. unsigned int read_bytes;
  314. len = rx_full_count * bytes_per_word;
  315. if (len > t->len - tqspi->cur_pos)
  316. len = t->len - tqspi->cur_pos;
  317. read_bytes = len;
  318. for (count = 0; count < rx_full_count; count++) {
  319. x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
  320. for (i = 0; len && (i < bytes_per_word); i++, len--)
  321. *rx_buf++ = (x >> (i * 8)) & 0xff;
  322. }
  323. read_words += rx_full_count;
  324. tqspi->cur_rx_pos += read_bytes;
  325. }
  326. return read_words;
  327. }
  328. static void
  329. tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
  330. {
  331. dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
  332. tqspi->dma_buf_size, DMA_TO_DEVICE);
  333. /*
  334. * In packed mode, each word in FIFO may contain multiple packets
  335. * based on bits per word. So all bytes in each FIFO word are valid.
  336. *
  337. * In unpacked mode, each word in FIFO contains single packet and
  338. * based on bits per word any remaining bits in FIFO word will be
  339. * ignored by the hardware and are invalid bits.
  340. */
  341. if (tqspi->is_packed) {
  342. tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
  343. } else {
  344. u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
  345. unsigned int i, count, consume, write_bytes;
  346. /*
  347. * Fill tx_dma_buf to contain single packet in each word based
  348. * on bits per word from SPI core tx_buf.
  349. */
  350. consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
  351. if (consume > t->len - tqspi->cur_pos)
  352. consume = t->len - tqspi->cur_pos;
  353. write_bytes = consume;
  354. for (count = 0; count < tqspi->curr_dma_words; count++) {
  355. u32 x = 0;
  356. for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
  357. x |= (u32)(*tx_buf++) << (i * 8);
  358. tqspi->tx_dma_buf[count] = x;
  359. }
  360. tqspi->cur_tx_pos += write_bytes;
  361. }
  362. dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
  363. tqspi->dma_buf_size, DMA_TO_DEVICE);
  364. }
  365. static void
  366. tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
  367. {
  368. dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
  369. tqspi->dma_buf_size, DMA_FROM_DEVICE);
  370. if (tqspi->is_packed) {
  371. tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
  372. } else {
  373. unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
  374. u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
  375. unsigned int i, count, consume, read_bytes;
  376. /*
  377. * Each FIFO word contains single data packet.
  378. * Skip invalid bits in each FIFO word based on bits per word
  379. * and align bytes while filling in SPI core rx_buf.
  380. */
  381. consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
  382. if (consume > t->len - tqspi->cur_pos)
  383. consume = t->len - tqspi->cur_pos;
  384. read_bytes = consume;
  385. for (count = 0; count < tqspi->curr_dma_words; count++) {
  386. u32 x = tqspi->rx_dma_buf[count] & rx_mask;
  387. for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
  388. *rx_buf++ = (x >> (i * 8)) & 0xff;
  389. }
  390. tqspi->cur_rx_pos += read_bytes;
  391. }
  392. dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
  393. tqspi->dma_buf_size, DMA_FROM_DEVICE);
  394. }
  395. static void tegra_qspi_dma_complete(void *args)
  396. {
  397. struct completion *dma_complete = args;
  398. complete(dma_complete);
  399. }
  400. static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
  401. {
  402. dma_addr_t tx_dma_phys;
  403. reinit_completion(&tqspi->tx_dma_complete);
  404. if (tqspi->is_packed)
  405. tx_dma_phys = t->tx_dma;
  406. else
  407. tx_dma_phys = tqspi->tx_dma_phys;
  408. tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
  409. len, DMA_MEM_TO_DEV,
  410. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  411. if (!tqspi->tx_dma_desc) {
  412. dev_err(tqspi->dev, "Unable to get TX descriptor\n");
  413. return -EIO;
  414. }
  415. tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
  416. tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
  417. dmaengine_submit(tqspi->tx_dma_desc);
  418. dma_async_issue_pending(tqspi->tx_dma_chan);
  419. return 0;
  420. }
  421. static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
  422. {
  423. dma_addr_t rx_dma_phys;
  424. reinit_completion(&tqspi->rx_dma_complete);
  425. if (tqspi->is_packed)
  426. rx_dma_phys = t->rx_dma;
  427. else
  428. rx_dma_phys = tqspi->rx_dma_phys;
  429. tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
  430. len, DMA_DEV_TO_MEM,
  431. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  432. if (!tqspi->rx_dma_desc) {
  433. dev_err(tqspi->dev, "Unable to get RX descriptor\n");
  434. return -EIO;
  435. }
  436. tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
  437. tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
  438. dmaengine_submit(tqspi->rx_dma_desc);
  439. dma_async_issue_pending(tqspi->rx_dma_chan);
  440. return 0;
  441. }
  442. static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
  443. {
  444. void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
  445. u32 val;
  446. val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
  447. if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
  448. return 0;
  449. val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
  450. tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
  451. if (!atomic)
  452. return readl_relaxed_poll_timeout(addr, val,
  453. (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
  454. 1000, 1000000);
  455. return readl_relaxed_poll_timeout_atomic(addr, val,
  456. (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
  457. 1000, 1000000);
  458. }
  459. static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
  460. {
  461. u32 intr_mask;
  462. intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
  463. intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
  464. tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
  465. }
  466. static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
  467. {
  468. u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
  469. u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
  470. unsigned int len;
  471. len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
  472. if (t->tx_buf) {
  473. t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
  474. if (dma_mapping_error(tqspi->dev, t->tx_dma))
  475. return -ENOMEM;
  476. }
  477. if (t->rx_buf) {
  478. t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
  479. if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
  480. dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
  481. return -ENOMEM;
  482. }
  483. }
  484. return 0;
  485. }
  486. static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
  487. {
  488. unsigned int len;
  489. len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
  490. dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
  491. dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
  492. }
  493. static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
  494. {
  495. struct dma_slave_config dma_sconfig = { 0 };
  496. unsigned int len;
  497. u8 dma_burst;
  498. int ret = 0;
  499. u32 val;
  500. if (tqspi->is_packed) {
  501. ret = tegra_qspi_dma_map_xfer(tqspi, t);
  502. if (ret < 0)
  503. return ret;
  504. }
  505. val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
  506. tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
  507. tegra_qspi_unmask_irq(tqspi);
  508. if (tqspi->is_packed)
  509. len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
  510. else
  511. len = tqspi->curr_dma_words * 4;
  512. /* set attention level based on length of transfer */
  513. val = 0;
  514. if (len & 0xf) {
  515. val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
  516. dma_burst = 1;
  517. } else if (((len) >> 4) & 0x1) {
  518. val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
  519. dma_burst = 4;
  520. } else {
  521. val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
  522. dma_burst = 8;
  523. }
  524. tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
  525. tqspi->dma_control_reg = val;
  526. dma_sconfig.device_fc = true;
  527. if (tqspi->cur_direction & DATA_DIR_TX) {
  528. dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
  529. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  530. dma_sconfig.dst_maxburst = dma_burst;
  531. ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
  532. if (ret < 0) {
  533. dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
  534. return ret;
  535. }
  536. tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
  537. ret = tegra_qspi_start_tx_dma(tqspi, t, len);
  538. if (ret < 0) {
  539. dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
  540. return ret;
  541. }
  542. }
  543. if (tqspi->cur_direction & DATA_DIR_RX) {
  544. dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
  545. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  546. dma_sconfig.src_maxburst = dma_burst;
  547. ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
  548. if (ret < 0) {
  549. dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
  550. return ret;
  551. }
  552. dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
  553. tqspi->dma_buf_size,
  554. DMA_FROM_DEVICE);
  555. ret = tegra_qspi_start_rx_dma(tqspi, t, len);
  556. if (ret < 0) {
  557. dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
  558. if (tqspi->cur_direction & DATA_DIR_TX)
  559. dmaengine_terminate_all(tqspi->tx_dma_chan);
  560. return ret;
  561. }
  562. }
  563. tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
  564. tqspi->is_curr_dma_xfer = true;
  565. tqspi->dma_control_reg = val;
  566. val |= QSPI_DMA_EN;
  567. tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
  568. return ret;
  569. }
  570. static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
  571. {
  572. u32 val;
  573. unsigned int cur_words;
  574. if (qspi->cur_direction & DATA_DIR_TX)
  575. cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
  576. else
  577. cur_words = qspi->curr_dma_words;
  578. val = QSPI_DMA_BLK_SET(cur_words - 1);
  579. tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
  580. tegra_qspi_unmask_irq(qspi);
  581. qspi->is_curr_dma_xfer = false;
  582. val = qspi->command1_reg;
  583. val |= QSPI_PIO;
  584. tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
  585. return 0;
  586. }
  587. static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
  588. {
  589. if (!tqspi->soc_data->has_dma)
  590. return;
  591. if (tqspi->tx_dma_buf) {
  592. dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
  593. tqspi->tx_dma_buf, tqspi->tx_dma_phys);
  594. tqspi->tx_dma_buf = NULL;
  595. }
  596. if (tqspi->tx_dma_chan) {
  597. dma_release_channel(tqspi->tx_dma_chan);
  598. tqspi->tx_dma_chan = NULL;
  599. }
  600. if (tqspi->rx_dma_buf) {
  601. dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
  602. tqspi->rx_dma_buf, tqspi->rx_dma_phys);
  603. tqspi->rx_dma_buf = NULL;
  604. }
  605. if (tqspi->rx_dma_chan) {
  606. dma_release_channel(tqspi->rx_dma_chan);
  607. tqspi->rx_dma_chan = NULL;
  608. }
  609. }
  610. static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
  611. {
  612. struct dma_chan *dma_chan;
  613. dma_addr_t dma_phys;
  614. u32 *dma_buf;
  615. int err;
  616. if (!tqspi->soc_data->has_dma)
  617. return 0;
  618. dma_chan = dma_request_chan(tqspi->dev, "rx");
  619. if (IS_ERR(dma_chan)) {
  620. err = PTR_ERR(dma_chan);
  621. goto err_out;
  622. }
  623. tqspi->rx_dma_chan = dma_chan;
  624. dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
  625. if (!dma_buf) {
  626. err = -ENOMEM;
  627. goto err_out;
  628. }
  629. tqspi->rx_dma_buf = dma_buf;
  630. tqspi->rx_dma_phys = dma_phys;
  631. dma_chan = dma_request_chan(tqspi->dev, "tx");
  632. if (IS_ERR(dma_chan)) {
  633. err = PTR_ERR(dma_chan);
  634. goto err_out;
  635. }
  636. tqspi->tx_dma_chan = dma_chan;
  637. dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
  638. if (!dma_buf) {
  639. err = -ENOMEM;
  640. goto err_out;
  641. }
  642. tqspi->tx_dma_buf = dma_buf;
  643. tqspi->tx_dma_phys = dma_phys;
  644. tqspi->use_dma = true;
  645. return 0;
  646. err_out:
  647. tegra_qspi_deinit_dma(tqspi);
  648. if (err != -EPROBE_DEFER) {
  649. dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
  650. dev_err(tqspi->dev, "falling back to PIO\n");
  651. return 0;
  652. }
  653. return err;
  654. }
  655. static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
  656. bool is_first_of_msg)
  657. {
  658. struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
  659. struct tegra_qspi_client_data *cdata = spi->controller_data;
  660. u32 command1, command2, speed = t->speed_hz;
  661. u8 bits_per_word = t->bits_per_word;
  662. u32 tx_tap = 0, rx_tap = 0;
  663. int req_mode;
  664. if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
  665. clk_set_rate(tqspi->clk, speed);
  666. tqspi->cur_speed = speed;
  667. }
  668. tqspi->cur_pos = 0;
  669. tqspi->cur_rx_pos = 0;
  670. tqspi->cur_tx_pos = 0;
  671. tqspi->curr_xfer = t;
  672. if (is_first_of_msg) {
  673. tegra_qspi_mask_clear_irq(tqspi);
  674. command1 = tqspi->def_command1_reg;
  675. command1 |= QSPI_CS_SEL(spi->chip_select);
  676. command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
  677. command1 &= ~QSPI_CONTROL_MODE_MASK;
  678. req_mode = spi->mode & 0x3;
  679. if (req_mode == SPI_MODE_3)
  680. command1 |= QSPI_CONTROL_MODE_3;
  681. else
  682. command1 |= QSPI_CONTROL_MODE_0;
  683. if (spi->mode & SPI_CS_HIGH)
  684. command1 |= QSPI_CS_SW_VAL;
  685. else
  686. command1 &= ~QSPI_CS_SW_VAL;
  687. tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
  688. if (cdata && cdata->tx_clk_tap_delay)
  689. tx_tap = cdata->tx_clk_tap_delay;
  690. if (cdata && cdata->rx_clk_tap_delay)
  691. rx_tap = cdata->rx_clk_tap_delay;
  692. command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
  693. if (command2 != tqspi->def_command2_reg)
  694. tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
  695. } else {
  696. command1 = tqspi->command1_reg;
  697. command1 &= ~QSPI_BIT_LENGTH(~0);
  698. command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
  699. }
  700. command1 &= ~QSPI_SDR_DDR_SEL;
  701. return command1;
  702. }
  703. static int tegra_qspi_start_transfer_one(struct spi_device *spi,
  704. struct spi_transfer *t, u32 command1)
  705. {
  706. struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
  707. unsigned int total_fifo_words;
  708. u8 bus_width = 0;
  709. int ret;
  710. total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
  711. command1 &= ~QSPI_PACKED;
  712. if (tqspi->is_packed)
  713. command1 |= QSPI_PACKED;
  714. tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
  715. tqspi->cur_direction = 0;
  716. command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
  717. if (t->rx_buf) {
  718. command1 |= QSPI_RX_EN;
  719. tqspi->cur_direction |= DATA_DIR_RX;
  720. bus_width = t->rx_nbits;
  721. }
  722. if (t->tx_buf) {
  723. command1 |= QSPI_TX_EN;
  724. tqspi->cur_direction |= DATA_DIR_TX;
  725. bus_width = t->tx_nbits;
  726. }
  727. command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
  728. if (bus_width == SPI_NBITS_QUAD)
  729. command1 |= QSPI_INTERFACE_WIDTH_QUAD;
  730. else if (bus_width == SPI_NBITS_DUAL)
  731. command1 |= QSPI_INTERFACE_WIDTH_DUAL;
  732. else
  733. command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
  734. tqspi->command1_reg = command1;
  735. tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
  736. ret = tegra_qspi_flush_fifos(tqspi, false);
  737. if (ret < 0)
  738. return ret;
  739. if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
  740. ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
  741. else
  742. ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
  743. return ret;
  744. }
  745. static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
  746. {
  747. struct tegra_qspi_client_data *cdata;
  748. struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
  749. cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
  750. if (!cdata)
  751. return NULL;
  752. device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
  753. &cdata->tx_clk_tap_delay);
  754. device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
  755. &cdata->rx_clk_tap_delay);
  756. return cdata;
  757. }
  758. static int tegra_qspi_setup(struct spi_device *spi)
  759. {
  760. struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
  761. struct tegra_qspi_client_data *cdata = spi->controller_data;
  762. unsigned long flags;
  763. u32 val;
  764. int ret;
  765. ret = pm_runtime_resume_and_get(tqspi->dev);
  766. if (ret < 0) {
  767. dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
  768. return ret;
  769. }
  770. if (!cdata) {
  771. cdata = tegra_qspi_parse_cdata_dt(spi);
  772. spi->controller_data = cdata;
  773. }
  774. spin_lock_irqsave(&tqspi->lock, flags);
  775. /* keep default cs state to inactive */
  776. val = tqspi->def_command1_reg;
  777. val |= QSPI_CS_SEL(spi->chip_select);
  778. if (spi->mode & SPI_CS_HIGH)
  779. val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select);
  780. else
  781. val |= QSPI_CS_POL_INACTIVE(spi->chip_select);
  782. tqspi->def_command1_reg = val;
  783. tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
  784. spin_unlock_irqrestore(&tqspi->lock, flags);
  785. pm_runtime_put(tqspi->dev);
  786. return 0;
  787. }
  788. static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
  789. {
  790. dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
  791. dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
  792. tegra_qspi_readl(tqspi, QSPI_COMMAND1),
  793. tegra_qspi_readl(tqspi, QSPI_COMMAND2));
  794. dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
  795. tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
  796. tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
  797. dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n",
  798. tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
  799. tegra_qspi_readl(tqspi, QSPI_MISC_REG));
  800. dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
  801. tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
  802. tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
  803. }
  804. static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
  805. {
  806. dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
  807. tegra_qspi_dump_regs(tqspi);
  808. tegra_qspi_flush_fifos(tqspi, true);
  809. if (device_reset(tqspi->dev) < 0)
  810. dev_warn_once(tqspi->dev, "device reset failed\n");
  811. }
  812. static void tegra_qspi_transfer_end(struct spi_device *spi)
  813. {
  814. struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
  815. int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
  816. if (cs_val)
  817. tqspi->command1_reg |= QSPI_CS_SW_VAL;
  818. else
  819. tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
  820. tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
  821. tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
  822. }
  823. static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
  824. {
  825. u32 cmd_config = 0;
  826. /* Extract Command configuration and value */
  827. if (is_ddr)
  828. cmd_config |= QSPI_COMMAND_SDR_DDR;
  829. else
  830. cmd_config &= ~QSPI_COMMAND_SDR_DDR;
  831. cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
  832. cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
  833. return cmd_config;
  834. }
  835. static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
  836. {
  837. u32 addr_config = 0;
  838. /* Extract Address configuration and value */
  839. is_ddr = 0; //Only SDR mode supported
  840. bus_width = 0; //X1 mode
  841. if (is_ddr)
  842. addr_config |= QSPI_ADDRESS_SDR_DDR;
  843. else
  844. addr_config &= ~QSPI_ADDRESS_SDR_DDR;
  845. addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
  846. addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
  847. return addr_config;
  848. }
  849. static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
  850. struct spi_message *msg)
  851. {
  852. bool is_first_msg = true;
  853. struct spi_transfer *xfer;
  854. struct spi_device *spi = msg->spi;
  855. u8 transfer_phase = 0;
  856. u32 cmd1 = 0, dma_ctl = 0;
  857. int ret = 0;
  858. u32 address_value = 0;
  859. u32 cmd_config = 0, addr_config = 0;
  860. u8 cmd_value = 0, val = 0;
  861. /* Enable Combined sequence mode */
  862. val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
  863. val |= QSPI_CMB_SEQ_EN;
  864. tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
  865. /* Process individual transfer list */
  866. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  867. switch (transfer_phase) {
  868. case CMD_TRANSFER:
  869. /* X1 SDR mode */
  870. cmd_config = tegra_qspi_cmd_config(false, 0,
  871. xfer->len);
  872. cmd_value = *((const u8 *)(xfer->tx_buf));
  873. break;
  874. case ADDR_TRANSFER:
  875. /* X1 SDR mode */
  876. addr_config = tegra_qspi_addr_config(false, 0,
  877. xfer->len);
  878. address_value = *((const u32 *)(xfer->tx_buf));
  879. break;
  880. case DATA_TRANSFER:
  881. /* Program Command, Address value in register */
  882. tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
  883. tegra_qspi_writel(tqspi, address_value,
  884. QSPI_CMB_SEQ_ADDR);
  885. /* Program Command and Address config in register */
  886. tegra_qspi_writel(tqspi, cmd_config,
  887. QSPI_CMB_SEQ_CMD_CFG);
  888. tegra_qspi_writel(tqspi, addr_config,
  889. QSPI_CMB_SEQ_ADDR_CFG);
  890. reinit_completion(&tqspi->xfer_completion);
  891. cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
  892. is_first_msg);
  893. ret = tegra_qspi_start_transfer_one(spi, xfer,
  894. cmd1);
  895. if (ret < 0) {
  896. dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
  897. ret);
  898. return ret;
  899. }
  900. is_first_msg = false;
  901. ret = wait_for_completion_timeout
  902. (&tqspi->xfer_completion,
  903. QSPI_DMA_TIMEOUT);
  904. if (WARN_ON(ret == 0)) {
  905. dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
  906. ret);
  907. if (tqspi->is_curr_dma_xfer &&
  908. (tqspi->cur_direction & DATA_DIR_TX))
  909. dmaengine_terminate_all
  910. (tqspi->tx_dma_chan);
  911. if (tqspi->is_curr_dma_xfer &&
  912. (tqspi->cur_direction & DATA_DIR_RX))
  913. dmaengine_terminate_all
  914. (tqspi->rx_dma_chan);
  915. /* Abort transfer by resetting pio/dma bit */
  916. if (!tqspi->is_curr_dma_xfer) {
  917. cmd1 = tegra_qspi_readl
  918. (tqspi,
  919. QSPI_COMMAND1);
  920. cmd1 &= ~QSPI_PIO;
  921. tegra_qspi_writel
  922. (tqspi, cmd1,
  923. QSPI_COMMAND1);
  924. } else {
  925. dma_ctl = tegra_qspi_readl
  926. (tqspi,
  927. QSPI_DMA_CTL);
  928. dma_ctl &= ~QSPI_DMA_EN;
  929. tegra_qspi_writel(tqspi, dma_ctl,
  930. QSPI_DMA_CTL);
  931. }
  932. /* Reset controller if timeout happens */
  933. if (device_reset(tqspi->dev) < 0)
  934. dev_warn_once(tqspi->dev,
  935. "device reset failed\n");
  936. ret = -EIO;
  937. goto exit;
  938. }
  939. if (tqspi->tx_status || tqspi->rx_status) {
  940. dev_err(tqspi->dev, "QSPI Transfer failed\n");
  941. tqspi->tx_status = 0;
  942. tqspi->rx_status = 0;
  943. ret = -EIO;
  944. goto exit;
  945. }
  946. if (!xfer->cs_change) {
  947. tegra_qspi_transfer_end(spi);
  948. spi_transfer_delay_exec(xfer);
  949. }
  950. break;
  951. default:
  952. ret = -EINVAL;
  953. goto exit;
  954. }
  955. msg->actual_length += xfer->len;
  956. transfer_phase++;
  957. }
  958. ret = 0;
  959. exit:
  960. msg->status = ret;
  961. if (ret < 0) {
  962. tegra_qspi_transfer_end(spi);
  963. spi_transfer_delay_exec(xfer);
  964. }
  965. return ret;
  966. }
  967. static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
  968. struct spi_message *msg)
  969. {
  970. struct spi_device *spi = msg->spi;
  971. struct spi_transfer *transfer;
  972. bool is_first_msg = true;
  973. int ret = 0, val = 0;
  974. msg->status = 0;
  975. msg->actual_length = 0;
  976. tqspi->tx_status = 0;
  977. tqspi->rx_status = 0;
  978. /* Disable Combined sequence mode */
  979. val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
  980. val &= ~QSPI_CMB_SEQ_EN;
  981. tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
  982. list_for_each_entry(transfer, &msg->transfers, transfer_list) {
  983. struct spi_transfer *xfer = transfer;
  984. u8 dummy_bytes = 0;
  985. u32 cmd1;
  986. tqspi->dummy_cycles = 0;
  987. /*
  988. * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
  989. * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
  990. * So, check if the next transfer is dummy data transfer and program dummy
  991. * clock cycles along with the current transfer and skip next transfer.
  992. */
  993. if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
  994. struct spi_transfer *next_xfer;
  995. next_xfer = list_next_entry(xfer, transfer_list);
  996. if (next_xfer->dummy_data) {
  997. u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
  998. if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
  999. tqspi->dummy_cycles = dummy_cycles;
  1000. dummy_bytes = next_xfer->len;
  1001. transfer = next_xfer;
  1002. }
  1003. }
  1004. }
  1005. reinit_completion(&tqspi->xfer_completion);
  1006. cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
  1007. ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
  1008. if (ret < 0) {
  1009. dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
  1010. goto complete_xfer;
  1011. }
  1012. ret = wait_for_completion_timeout(&tqspi->xfer_completion,
  1013. QSPI_DMA_TIMEOUT);
  1014. if (WARN_ON(ret == 0)) {
  1015. dev_err(tqspi->dev, "transfer timeout\n");
  1016. if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
  1017. dmaengine_terminate_all(tqspi->tx_dma_chan);
  1018. if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
  1019. dmaengine_terminate_all(tqspi->rx_dma_chan);
  1020. tegra_qspi_handle_error(tqspi);
  1021. ret = -EIO;
  1022. goto complete_xfer;
  1023. }
  1024. if (tqspi->tx_status || tqspi->rx_status) {
  1025. tegra_qspi_handle_error(tqspi);
  1026. ret = -EIO;
  1027. goto complete_xfer;
  1028. }
  1029. msg->actual_length += xfer->len + dummy_bytes;
  1030. complete_xfer:
  1031. if (ret < 0) {
  1032. tegra_qspi_transfer_end(spi);
  1033. spi_transfer_delay_exec(xfer);
  1034. goto exit;
  1035. }
  1036. if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
  1037. /* de-activate CS after last transfer only when cs_change is not set */
  1038. if (!xfer->cs_change) {
  1039. tegra_qspi_transfer_end(spi);
  1040. spi_transfer_delay_exec(xfer);
  1041. }
  1042. } else if (xfer->cs_change) {
  1043. /* de-activated CS between the transfers only when cs_change is set */
  1044. tegra_qspi_transfer_end(spi);
  1045. spi_transfer_delay_exec(xfer);
  1046. }
  1047. }
  1048. ret = 0;
  1049. exit:
  1050. msg->status = ret;
  1051. return ret;
  1052. }
  1053. static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
  1054. struct spi_message *msg)
  1055. {
  1056. int transfer_count = 0;
  1057. struct spi_transfer *xfer;
  1058. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1059. transfer_count++;
  1060. }
  1061. if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
  1062. return false;
  1063. xfer = list_first_entry(&msg->transfers, typeof(*xfer),
  1064. transfer_list);
  1065. if (xfer->len > 2)
  1066. return false;
  1067. xfer = list_next_entry(xfer, transfer_list);
  1068. if (xfer->len > 4 || xfer->len < 3)
  1069. return false;
  1070. xfer = list_next_entry(xfer, transfer_list);
  1071. if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
  1072. return false;
  1073. return true;
  1074. }
  1075. static int tegra_qspi_transfer_one_message(struct spi_master *master,
  1076. struct spi_message *msg)
  1077. {
  1078. struct tegra_qspi *tqspi = spi_master_get_devdata(master);
  1079. int ret;
  1080. if (tegra_qspi_validate_cmb_seq(tqspi, msg))
  1081. ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
  1082. else
  1083. ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
  1084. spi_finalize_current_message(master);
  1085. return ret;
  1086. }
  1087. static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
  1088. {
  1089. struct spi_transfer *t = tqspi->curr_xfer;
  1090. unsigned long flags;
  1091. spin_lock_irqsave(&tqspi->lock, flags);
  1092. if (tqspi->tx_status || tqspi->rx_status) {
  1093. tegra_qspi_handle_error(tqspi);
  1094. complete(&tqspi->xfer_completion);
  1095. goto exit;
  1096. }
  1097. if (tqspi->cur_direction & DATA_DIR_RX)
  1098. tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
  1099. if (tqspi->cur_direction & DATA_DIR_TX)
  1100. tqspi->cur_pos = tqspi->cur_tx_pos;
  1101. else
  1102. tqspi->cur_pos = tqspi->cur_rx_pos;
  1103. if (tqspi->cur_pos == t->len) {
  1104. complete(&tqspi->xfer_completion);
  1105. goto exit;
  1106. }
  1107. tegra_qspi_calculate_curr_xfer_param(tqspi, t);
  1108. tegra_qspi_start_cpu_based_transfer(tqspi, t);
  1109. exit:
  1110. spin_unlock_irqrestore(&tqspi->lock, flags);
  1111. return IRQ_HANDLED;
  1112. }
  1113. static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
  1114. {
  1115. struct spi_transfer *t = tqspi->curr_xfer;
  1116. unsigned int total_fifo_words;
  1117. unsigned long flags;
  1118. long wait_status;
  1119. int err = 0;
  1120. if (tqspi->cur_direction & DATA_DIR_TX) {
  1121. if (tqspi->tx_status) {
  1122. dmaengine_terminate_all(tqspi->tx_dma_chan);
  1123. err += 1;
  1124. } else {
  1125. wait_status = wait_for_completion_interruptible_timeout(
  1126. &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
  1127. if (wait_status <= 0) {
  1128. dmaengine_terminate_all(tqspi->tx_dma_chan);
  1129. dev_err(tqspi->dev, "failed TX DMA transfer\n");
  1130. err += 1;
  1131. }
  1132. }
  1133. }
  1134. if (tqspi->cur_direction & DATA_DIR_RX) {
  1135. if (tqspi->rx_status) {
  1136. dmaengine_terminate_all(tqspi->rx_dma_chan);
  1137. err += 2;
  1138. } else {
  1139. wait_status = wait_for_completion_interruptible_timeout(
  1140. &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
  1141. if (wait_status <= 0) {
  1142. dmaengine_terminate_all(tqspi->rx_dma_chan);
  1143. dev_err(tqspi->dev, "failed RX DMA transfer\n");
  1144. err += 2;
  1145. }
  1146. }
  1147. }
  1148. spin_lock_irqsave(&tqspi->lock, flags);
  1149. if (err) {
  1150. tegra_qspi_dma_unmap_xfer(tqspi, t);
  1151. tegra_qspi_handle_error(tqspi);
  1152. complete(&tqspi->xfer_completion);
  1153. goto exit;
  1154. }
  1155. if (tqspi->cur_direction & DATA_DIR_RX)
  1156. tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
  1157. if (tqspi->cur_direction & DATA_DIR_TX)
  1158. tqspi->cur_pos = tqspi->cur_tx_pos;
  1159. else
  1160. tqspi->cur_pos = tqspi->cur_rx_pos;
  1161. if (tqspi->cur_pos == t->len) {
  1162. tegra_qspi_dma_unmap_xfer(tqspi, t);
  1163. complete(&tqspi->xfer_completion);
  1164. goto exit;
  1165. }
  1166. tegra_qspi_dma_unmap_xfer(tqspi, t);
  1167. /* continue transfer in current message */
  1168. total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
  1169. if (total_fifo_words > QSPI_FIFO_DEPTH)
  1170. err = tegra_qspi_start_dma_based_transfer(tqspi, t);
  1171. else
  1172. err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
  1173. exit:
  1174. spin_unlock_irqrestore(&tqspi->lock, flags);
  1175. return IRQ_HANDLED;
  1176. }
  1177. static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
  1178. {
  1179. struct tegra_qspi *tqspi = context_data;
  1180. tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
  1181. if (tqspi->cur_direction & DATA_DIR_TX)
  1182. tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
  1183. if (tqspi->cur_direction & DATA_DIR_RX)
  1184. tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
  1185. tegra_qspi_mask_clear_irq(tqspi);
  1186. if (!tqspi->is_curr_dma_xfer)
  1187. return handle_cpu_based_xfer(tqspi);
  1188. return handle_dma_based_xfer(tqspi);
  1189. }
  1190. static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
  1191. .has_dma = true,
  1192. .cmb_xfer_capable = false,
  1193. .cs_count = 1,
  1194. };
  1195. static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
  1196. .has_dma = true,
  1197. .cmb_xfer_capable = true,
  1198. .cs_count = 1,
  1199. };
  1200. static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
  1201. .has_dma = false,
  1202. .cmb_xfer_capable = true,
  1203. .cs_count = 1,
  1204. };
  1205. static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
  1206. .has_dma = false,
  1207. .cmb_xfer_capable = true,
  1208. .cs_count = 4,
  1209. };
  1210. static const struct of_device_id tegra_qspi_of_match[] = {
  1211. {
  1212. .compatible = "nvidia,tegra210-qspi",
  1213. .data = &tegra210_qspi_soc_data,
  1214. }, {
  1215. .compatible = "nvidia,tegra186-qspi",
  1216. .data = &tegra186_qspi_soc_data,
  1217. }, {
  1218. .compatible = "nvidia,tegra194-qspi",
  1219. .data = &tegra186_qspi_soc_data,
  1220. }, {
  1221. .compatible = "nvidia,tegra234-qspi",
  1222. .data = &tegra234_qspi_soc_data,
  1223. }, {
  1224. .compatible = "nvidia,tegra241-qspi",
  1225. .data = &tegra241_qspi_soc_data,
  1226. },
  1227. {}
  1228. };
  1229. MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
  1230. #ifdef CONFIG_ACPI
  1231. static const struct acpi_device_id tegra_qspi_acpi_match[] = {
  1232. {
  1233. .id = "NVDA1213",
  1234. .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
  1235. }, {
  1236. .id = "NVDA1313",
  1237. .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
  1238. }, {
  1239. .id = "NVDA1413",
  1240. .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
  1241. }, {
  1242. .id = "NVDA1513",
  1243. .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
  1244. },
  1245. {}
  1246. };
  1247. MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
  1248. #endif
  1249. static int tegra_qspi_probe(struct platform_device *pdev)
  1250. {
  1251. struct spi_master *master;
  1252. struct tegra_qspi *tqspi;
  1253. struct resource *r;
  1254. int ret, qspi_irq;
  1255. int bus_num;
  1256. master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
  1257. if (!master)
  1258. return -ENOMEM;
  1259. platform_set_drvdata(pdev, master);
  1260. tqspi = spi_master_get_devdata(master);
  1261. master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
  1262. SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
  1263. master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
  1264. master->setup = tegra_qspi_setup;
  1265. master->transfer_one_message = tegra_qspi_transfer_one_message;
  1266. master->num_chipselect = 1;
  1267. master->auto_runtime_pm = true;
  1268. bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
  1269. if (bus_num >= 0)
  1270. master->bus_num = bus_num;
  1271. tqspi->master = master;
  1272. tqspi->dev = &pdev->dev;
  1273. spin_lock_init(&tqspi->lock);
  1274. tqspi->soc_data = device_get_match_data(&pdev->dev);
  1275. master->num_chipselect = tqspi->soc_data->cs_count;
  1276. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1277. tqspi->base = devm_ioremap_resource(&pdev->dev, r);
  1278. if (IS_ERR(tqspi->base))
  1279. return PTR_ERR(tqspi->base);
  1280. tqspi->phys = r->start;
  1281. qspi_irq = platform_get_irq(pdev, 0);
  1282. if (qspi_irq < 0)
  1283. return qspi_irq;
  1284. tqspi->irq = qspi_irq;
  1285. if (!has_acpi_companion(tqspi->dev)) {
  1286. tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
  1287. if (IS_ERR(tqspi->clk)) {
  1288. ret = PTR_ERR(tqspi->clk);
  1289. dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
  1290. return ret;
  1291. }
  1292. }
  1293. tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
  1294. tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
  1295. ret = tegra_qspi_init_dma(tqspi);
  1296. if (ret < 0)
  1297. return ret;
  1298. if (tqspi->use_dma)
  1299. tqspi->max_buf_size = tqspi->dma_buf_size;
  1300. init_completion(&tqspi->tx_dma_complete);
  1301. init_completion(&tqspi->rx_dma_complete);
  1302. init_completion(&tqspi->xfer_completion);
  1303. pm_runtime_enable(&pdev->dev);
  1304. ret = pm_runtime_resume_and_get(&pdev->dev);
  1305. if (ret < 0) {
  1306. dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
  1307. goto exit_pm_disable;
  1308. }
  1309. if (device_reset(tqspi->dev) < 0)
  1310. dev_warn_once(tqspi->dev, "device reset failed\n");
  1311. tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
  1312. tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
  1313. tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
  1314. tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
  1315. tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
  1316. pm_runtime_put(&pdev->dev);
  1317. ret = request_threaded_irq(tqspi->irq, NULL,
  1318. tegra_qspi_isr_thread, IRQF_ONESHOT,
  1319. dev_name(&pdev->dev), tqspi);
  1320. if (ret < 0) {
  1321. dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
  1322. goto exit_pm_disable;
  1323. }
  1324. master->dev.of_node = pdev->dev.of_node;
  1325. ret = spi_register_master(master);
  1326. if (ret < 0) {
  1327. dev_err(&pdev->dev, "failed to register master: %d\n", ret);
  1328. goto exit_free_irq;
  1329. }
  1330. return 0;
  1331. exit_free_irq:
  1332. free_irq(qspi_irq, tqspi);
  1333. exit_pm_disable:
  1334. pm_runtime_force_suspend(&pdev->dev);
  1335. tegra_qspi_deinit_dma(tqspi);
  1336. return ret;
  1337. }
  1338. static int tegra_qspi_remove(struct platform_device *pdev)
  1339. {
  1340. struct spi_master *master = platform_get_drvdata(pdev);
  1341. struct tegra_qspi *tqspi = spi_master_get_devdata(master);
  1342. spi_unregister_master(master);
  1343. free_irq(tqspi->irq, tqspi);
  1344. pm_runtime_force_suspend(&pdev->dev);
  1345. tegra_qspi_deinit_dma(tqspi);
  1346. return 0;
  1347. }
  1348. static int __maybe_unused tegra_qspi_suspend(struct device *dev)
  1349. {
  1350. struct spi_master *master = dev_get_drvdata(dev);
  1351. return spi_master_suspend(master);
  1352. }
  1353. static int __maybe_unused tegra_qspi_resume(struct device *dev)
  1354. {
  1355. struct spi_master *master = dev_get_drvdata(dev);
  1356. struct tegra_qspi *tqspi = spi_master_get_devdata(master);
  1357. int ret;
  1358. ret = pm_runtime_resume_and_get(dev);
  1359. if (ret < 0) {
  1360. dev_err(dev, "failed to get runtime PM: %d\n", ret);
  1361. return ret;
  1362. }
  1363. tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
  1364. tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
  1365. pm_runtime_put(dev);
  1366. return spi_master_resume(master);
  1367. }
  1368. static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
  1369. {
  1370. struct spi_master *master = dev_get_drvdata(dev);
  1371. struct tegra_qspi *tqspi = spi_master_get_devdata(master);
  1372. /* Runtime pm disabled with ACPI */
  1373. if (has_acpi_companion(tqspi->dev))
  1374. return 0;
  1375. /* flush all write which are in PPSB queue by reading back */
  1376. tegra_qspi_readl(tqspi, QSPI_COMMAND1);
  1377. clk_disable_unprepare(tqspi->clk);
  1378. return 0;
  1379. }
  1380. static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
  1381. {
  1382. struct spi_master *master = dev_get_drvdata(dev);
  1383. struct tegra_qspi *tqspi = spi_master_get_devdata(master);
  1384. int ret;
  1385. /* Runtime pm disabled with ACPI */
  1386. if (has_acpi_companion(tqspi->dev))
  1387. return 0;
  1388. ret = clk_prepare_enable(tqspi->clk);
  1389. if (ret < 0)
  1390. dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
  1391. return ret;
  1392. }
  1393. static const struct dev_pm_ops tegra_qspi_pm_ops = {
  1394. SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
  1395. SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
  1396. };
  1397. static struct platform_driver tegra_qspi_driver = {
  1398. .driver = {
  1399. .name = "tegra-qspi",
  1400. .pm = &tegra_qspi_pm_ops,
  1401. .of_match_table = tegra_qspi_of_match,
  1402. .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
  1403. },
  1404. .probe = tegra_qspi_probe,
  1405. .remove = tegra_qspi_remove,
  1406. };
  1407. module_platform_driver(tegra_qspi_driver);
  1408. MODULE_ALIAS("platform:qspi-tegra");
  1409. MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
  1410. MODULE_AUTHOR("Sowjanya Komatineni <[email protected]>");
  1411. MODULE_LICENSE("GPL v2");