spi-msm-geni.c 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/delay.h>
  8. #include <linux/dmaengine.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/ipc_logging.h>
  12. #include <linux/io.h>
  13. #include <linux/irq.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/of_platform.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/soc/qcom/geni-se.h>
  19. #include <linux/qcom-geni-se-common.h>
  20. #include <linux/msm_gpi.h>
  21. #include <linux/spi/spi.h>
  22. #include <linux/pinctrl/consumer.h>
  23. #include <linux/suspend.h>
  24. #define SPI_NUM_CHIPSELECT (4)
  25. #define SPI_XFER_TIMEOUT_MS (250)
  26. #define SPI_AUTO_SUSPEND_DELAY (250)
  27. #define SPI_XFER_TIMEOUT_OFFSET (250)
  28. #define SPI_SLAVE_SYNC_XFER_TIMEOUT_OFFSET (50)
  29. /* SPI SE specific registers */
  30. #define SE_SPI_CPHA (0x224)
  31. #define SE_SPI_LOOPBACK (0x22C)
  32. #define SE_SPI_CPOL (0x230)
  33. #define SE_SPI_DEMUX_OUTPUT_INV (0x24C)
  34. #define SE_SPI_DEMUX_SEL (0x250)
  35. #define SE_SPI_TRANS_CFG (0x25C)
  36. #define SE_SPI_WORD_LEN (0x268)
  37. #define SE_SPI_TX_TRANS_LEN (0x26C)
  38. #define SE_SPI_RX_TRANS_LEN (0x270)
  39. #define SE_SPI_PRE_POST_CMD_DLY (0x274)
  40. #define SE_SPI_DELAY_COUNTERS (0x278)
  41. #define SE_SPI_SLAVE_EN (0x2BC)
  42. #define SPI_SLAVE_EN BIT(0)
  43. /* SE_SPI_CPHA register fields */
  44. #define CPHA (BIT(0))
  45. /* SE_SPI_LOOPBACK register fields */
  46. #define LOOPBACK_ENABLE (0x1)
  47. #define NORMAL_MODE (0x0)
  48. #define LOOPBACK_MSK (GENMASK(1, 0))
  49. /* SE_SPI_CPOL register fields */
  50. #define CPOL (BIT(2))
  51. /* SE_SPI_DEMUX_OUTPUT_INV register fields */
  52. #define CS_DEMUX_OUTPUT_INV_MSK (GENMASK(3, 0))
  53. /* SE_SPI_DEMUX_SEL register fields */
  54. #define CS_DEMUX_OUTPUT_SEL (GENMASK(3, 0))
  55. /* SE_SPI_TX_TRANS_CFG register fields */
  56. #define CS_TOGGLE (BIT(0))
  57. /* SE_SPI_WORD_LEN register fields */
  58. #define WORD_LEN_MSK (GENMASK(9, 0))
  59. #define MIN_WORD_LEN (4)
  60. /* SPI_TX/SPI_RX_TRANS_LEN fields */
  61. #define TRANS_LEN_MSK (GENMASK(23, 0))
  62. /* SE_SPI_DELAY_COUNTERS */
  63. #define SPI_INTER_WORDS_DELAY_MSK (GENMASK(9, 0))
  64. #define SPI_CS_CLK_DELAY_MSK (GENMASK(19, 10))
  65. #define SPI_CS_CLK_DELAY_SHFT (10)
  66. /* M_CMD OP codes for SPI */
  67. #define SPI_TX_ONLY (1)
  68. #define SPI_RX_ONLY (2)
  69. #define SPI_FULL_DUPLEX (3)
  70. #define SPI_TX_RX (7)
  71. #define SPI_CS_ASSERT (8)
  72. #define SPI_CS_DEASSERT (9)
  73. #define SPI_SCK_ONLY (10)
  74. /* M_CMD params for SPI */
  75. #define SPI_PRE_CMD_DELAY BIT(0)
  76. #define TIMESTAMP_BEFORE BIT(1)
  77. #define FRAGMENTATION BIT(2)
  78. #define TIMESTAMP_AFTER BIT(3)
  79. #define POST_CMD_DELAY BIT(4)
  80. /* GSI CONFIG0 TRE Params */
  81. /* Flags bit fields */
  82. #define GSI_LOOPBACK_EN (BIT(0))
  83. #define GSI_CS_TOGGLE (BIT(3))
  84. #define GSI_CPHA (BIT(4))
  85. #define GSI_CPOL (BIT(5))
  86. #define MAX_TX_SG (3)
  87. #define NUM_SPI_XFER (8)
  88. /* SPI sampling registers */
  89. #define SE_GENI_CGC_CTRL (0x28)
  90. #define SE_GENI_CFG_SEQ_START (0x84)
  91. #define SE_GENI_CFG_REG108 (0x2B0)
  92. #define SE_GENI_CFG_REG109 (0x2B4)
  93. #define CPOL_CTRL_SHFT 1
  94. #define RX_IO_POS_FF_EN_SEL_SHFT 4
  95. #define RX_IO_EN2CORE_EN_DELAY_SHFT 8
  96. #define RX_SI_EN2IO_DELAY_SHFT 12
  97. #define PINCTRL_DEFAULT "default"
  98. #define PINCTRL_ACTIVE "active"
  99. #define PINCTRL_SLEEP "sleep"
  100. #define SPI_LOG_DBG(log_ctx, print, dev, x...) do { \
  101. GENI_SE_DBG(log_ctx, print, dev, x); \
  102. if (dev) \
  103. spi_trace_log(dev, x); \
  104. } while (0)
  105. #define SPI_LOG_ERR(log_ctx, print, dev, x...) do { \
  106. GENI_SE_ERR(log_ctx, print, dev, x); \
  107. if (dev) \
  108. spi_trace_log(dev, x); \
  109. } while (0)
  110. #define CREATE_TRACE_POINTS
  111. #include "spi-qup-trace.h"
  112. /* FTRACE Logging */
  113. void spi_trace_log(struct device *dev, const char *fmt, ...)
  114. {
  115. struct va_format vaf = {
  116. .fmt = fmt,
  117. };
  118. va_list args;
  119. va_start(args, fmt);
  120. vaf.va = &args;
  121. trace_spi_log_info(dev_name(dev), &vaf);
  122. va_end(args);
  123. }
  124. struct gsi_desc_cb {
  125. struct spi_master *spi;
  126. struct spi_transfer *xfer;
  127. };
  128. struct spi_geni_qcom_ctrl_data {
  129. u32 spi_cs_clk_delay;
  130. u32 spi_inter_words_delay;
  131. };
  132. struct spi_geni_gsi {
  133. struct msm_gpi_tre lock_t;
  134. struct msm_gpi_tre unlock_t;
  135. struct msm_gpi_tre config0_tre;
  136. struct msm_gpi_tre go_tre;
  137. struct msm_gpi_tre tx_dma_tre;
  138. struct msm_gpi_tre rx_dma_tre;
  139. struct scatterlist tx_sg[MAX_TX_SG];
  140. struct scatterlist rx_sg;
  141. dma_cookie_t tx_cookie;
  142. dma_cookie_t rx_cookie;
  143. struct msm_gpi_dma_async_tx_cb_param tx_cb_param;
  144. struct msm_gpi_dma_async_tx_cb_param rx_cb_param;
  145. struct dma_async_tx_descriptor *tx_desc;
  146. struct dma_async_tx_descriptor *rx_desc;
  147. struct gsi_desc_cb desc_cb;
  148. };
  149. struct spi_geni_master {
  150. struct geni_se spi_rsc;
  151. struct clk *m_ahb_clk;
  152. struct clk *s_ahb_clk;
  153. struct pinctrl *geni_pinctrl;
  154. struct pinctrl_state *geni_gpio_active;
  155. struct pinctrl_state *geni_gpio_sleep;
  156. resource_size_t phys_addr;
  157. resource_size_t size;
  158. void __iomem *base;
  159. int irq;
  160. struct device *dev;
  161. int rx_fifo_depth;
  162. int tx_fifo_depth;
  163. int tx_fifo_width;
  164. int tx_wm;
  165. bool setup;
  166. u32 cur_speed_hz;
  167. int cur_word_len;
  168. unsigned int tx_rem_bytes;
  169. unsigned int rx_rem_bytes;
  170. struct spi_transfer *cur_xfer;
  171. struct completion xfer_done;
  172. struct device *wrapper_dev;
  173. int oversampling;
  174. struct spi_geni_gsi *gsi, *gsi_lock_unlock;
  175. struct dma_chan *tx;
  176. struct dma_chan *rx;
  177. struct msm_gpi_ctrl tx_event;
  178. struct msm_gpi_ctrl rx_event;
  179. struct completion tx_cb;
  180. struct completion rx_cb;
  181. bool qn_err;
  182. int cur_xfer_mode;
  183. int num_tx_eot;
  184. int num_rx_eot;
  185. int num_xfers;
  186. void *ipc;
  187. void *ipc_log_kpi;
  188. int spi_kpi;
  189. bool gsi_mode; /* GSI Mode */
  190. bool shared_ee; /* Dual EE use case */
  191. bool shared_se; /* True Multi EE use case */
  192. bool is_le_vm; /* LE VM usecase */
  193. bool is_la_vm; /* LA VM property */
  194. bool dis_autosuspend;
  195. bool cmd_done;
  196. bool set_miso_sampling;
  197. u32 miso_sampling_ctrl_val;
  198. bool le_gpi_reset_done;
  199. bool disable_dma;
  200. bool slave_setup;
  201. bool slave_state;
  202. bool slave_cross_connected;
  203. bool master_cross_connect;
  204. bool is_xfer_in_progress;
  205. u32 xfer_timeout_offset;
  206. bool is_deep_sleep; /* For deep sleep restore the config similar to the probe. */
  207. };
  208. /**
  209. * geni_spi_se_dump_dbg_regs() - Print relevant registers that capture most
  210. * accurately the state of an SE.
  211. * @se: Pointer to the concerned serial engine.
  212. * @iomem: Base address of the SE's register space.
  213. * @ipc: IPC log context handle.
  214. *
  215. * This function is used to print out all the registers that capture the state
  216. * of an SE to help debug any errors.
  217. *
  218. * Return: None
  219. */
  220. void geni_spi_se_dump_dbg_regs(struct geni_se *se, void __iomem *base,
  221. void *ipc)
  222. {
  223. u32 m_cmd0 = 0;
  224. u32 m_irq_status = 0;
  225. u32 s_cmd0 = 0;
  226. u32 s_irq_status = 0;
  227. u32 geni_status = 0;
  228. u32 geni_ios = 0;
  229. u32 dma_rx_irq = 0;
  230. u32 dma_tx_irq = 0;
  231. u32 rx_fifo_status = 0;
  232. u32 tx_fifo_status = 0;
  233. u32 se_dma_dbg = 0;
  234. u32 m_cmd_ctrl = 0;
  235. u32 se_dma_rx_len = 0;
  236. u32 se_dma_rx_len_in = 0;
  237. u32 se_dma_tx_len = 0;
  238. u32 se_dma_tx_len_in = 0;
  239. u32 geni_m_irq_en = 0;
  240. u32 geni_s_irq_en = 0;
  241. u32 geni_dma_tx_irq_en = 0;
  242. u32 geni_dma_rx_irq_en = 0;
  243. m_cmd0 = geni_read_reg(base, SE_GENI_M_CMD0);
  244. m_irq_status = geni_read_reg(base, SE_GENI_M_IRQ_STATUS);
  245. s_cmd0 = geni_read_reg(base, SE_GENI_S_CMD0);
  246. s_irq_status = geni_read_reg(base, SE_GENI_S_IRQ_STATUS);
  247. geni_status = geni_read_reg(base, SE_GENI_STATUS);
  248. geni_ios = geni_read_reg(base, SE_GENI_IOS);
  249. dma_tx_irq = geni_read_reg(base, SE_DMA_TX_IRQ_STAT);
  250. dma_rx_irq = geni_read_reg(base, SE_DMA_RX_IRQ_STAT);
  251. rx_fifo_status = geni_read_reg(base, SE_GENI_RX_FIFO_STATUS);
  252. tx_fifo_status = geni_read_reg(base, SE_GENI_TX_FIFO_STATUS);
  253. se_dma_dbg = geni_read_reg(base, SE_DMA_DEBUG_REG0);
  254. m_cmd_ctrl = geni_read_reg(base, SE_GENI_M_CMD_CTRL_REG);
  255. se_dma_rx_len = geni_read_reg(base, SE_DMA_RX_LEN);
  256. se_dma_rx_len_in = geni_read_reg(base, SE_DMA_RX_LEN_IN);
  257. se_dma_tx_len = geni_read_reg(base, SE_DMA_TX_LEN);
  258. se_dma_tx_len_in = geni_read_reg(base, SE_DMA_TX_LEN_IN);
  259. geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
  260. geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
  261. geni_dma_tx_irq_en = geni_read_reg(base, SE_DMA_TX_IRQ_EN);
  262. geni_dma_rx_irq_en = geni_read_reg(base, SE_DMA_RX_IRQ_EN);
  263. SPI_LOG_DBG(ipc, false, se->dev,
  264. "%s: m_cmd0:0x%x, m_irq_status:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
  265. __func__, m_cmd0, m_irq_status, geni_status, geni_ios);
  266. SPI_LOG_DBG(ipc, false, se->dev,
  267. "dma_rx_irq:0x%x, dma_tx_irq:0x%x, rx_fifo_sts:0x%x, tx_fifo_sts:0x%x\n",
  268. dma_rx_irq, dma_tx_irq, rx_fifo_status, tx_fifo_status);
  269. SPI_LOG_DBG(ipc, false, se->dev,
  270. "se_dma_dbg:0x%x, m_cmd_ctrl:0x%x, dma_rxlen:0x%x, dma_rxlen_in:0x%x\n",
  271. se_dma_dbg, m_cmd_ctrl, se_dma_rx_len, se_dma_rx_len_in);
  272. SPI_LOG_DBG(ipc, false, se->dev,
  273. "dma_txlen:0x%x, dma_txlen_in:0x%x s_irq_status:0x%x\n",
  274. se_dma_tx_len, se_dma_tx_len_in, s_irq_status);
  275. SPI_LOG_DBG(ipc, false, se->dev,
  276. "dma_txirq_en:0x%x, dma_rxirq_en:0x%x geni_m_irq_en:0x%x geni_s_irq_en:0x%x\n",
  277. geni_dma_tx_irq_en, geni_dma_rx_irq_en, geni_m_irq_en,
  278. geni_s_irq_en);
  279. }
  280. static void spi_slv_setup(struct spi_geni_master *mas);
  281. static void spi_master_setup(struct spi_geni_master *mas);
  282. static ssize_t spi_slave_state_show(struct device *dev,
  283. struct device_attribute *attr, char *buf)
  284. {
  285. ssize_t ret = 0;
  286. struct platform_device *pdev = container_of(dev, struct
  287. platform_device, dev);
  288. struct spi_master *spi = platform_get_drvdata(pdev);
  289. struct spi_geni_master *geni_mas;
  290. geni_mas = spi_master_get_devdata(spi);
  291. if (geni_mas)
  292. ret = scnprintf(buf, sizeof(int), "%d\n",
  293. geni_mas->slave_state);
  294. return ret;
  295. }
  296. static ssize_t spi_slave_state_store(struct device *dev,
  297. struct device_attribute *attr,
  298. const char *buf, size_t count)
  299. {
  300. return 1;
  301. }
  302. static DEVICE_ATTR_RW(spi_slave_state);
  303. /*
  304. * capture_kpi_show() - Prints the value stored in capture_kpi sysfs entry
  305. *
  306. * @dev: pointer to device
  307. * @attr: device attributes
  308. * @buf: buffer to store the capture_kpi_value
  309. *
  310. * Return: prints capture_kpi value or error value
  311. */
  312. static ssize_t capture_kpi_show(struct device *dev,
  313. struct device_attribute *attr, char *buf)
  314. {
  315. struct platform_device *pdev = to_platform_device(dev);
  316. struct spi_master *spi = platform_get_drvdata(pdev);
  317. struct spi_geni_master *geni_mas;
  318. geni_mas = spi_master_get_devdata(spi);
  319. if (!geni_mas)
  320. return -EINVAL;
  321. return scnprintf(buf, sizeof(int), "%d\n", geni_mas->spi_kpi);
  322. }
  323. /*
  324. * capture_kpi_store() - store the capture_kpi sysfs value
  325. *
  326. * @dev: pointer to device
  327. * @attr: device attributes
  328. * @buf: buffer to store the capture_kpi_value
  329. * @size: returns the value of size.
  330. *
  331. * Return: Size copied in the buffer or error value
  332. */
  333. static ssize_t capture_kpi_store(struct device *dev,
  334. struct device_attribute *attr, const char *buf,
  335. size_t size)
  336. {
  337. struct platform_device *pdev = to_platform_device(dev);
  338. struct spi_master *spi = platform_get_drvdata(pdev);
  339. struct spi_geni_master *geni_mas;
  340. char name[36];
  341. geni_mas = spi_master_get_devdata(spi);
  342. if (!geni_mas)
  343. return -EINVAL;
  344. if (kstrtoint(buf, 0, &geni_mas->spi_kpi)) {
  345. dev_err(dev, "Invalid input\n");
  346. return -EINVAL;
  347. }
  348. if (geni_mas->spi_kpi && !geni_mas->ipc_log_kpi) {
  349. memset(name, 0, sizeof(name));
  350. scnprintf(name, sizeof(name), "%s%s", dev_name(geni_mas->dev), "_kpi");
  351. geni_mas->ipc_log_kpi = ipc_log_context_create(IPC_LOG_KPI_PAGES, name, 0);
  352. if (!geni_mas->ipc_log_kpi && IS_ENABLED(CONFIG_IPC_LOGGING))
  353. dev_err(&pdev->dev, "Error creating kpi IPC logs\n");
  354. }
  355. return size;
  356. }
  357. static DEVICE_ATTR_RW(capture_kpi);
  358. static void spi_master_setup(struct spi_geni_master *mas)
  359. {
  360. geni_write_reg(OTHER_IO_OE | IO2_DATA_IN_SEL | RX_DATA_IN_SEL |
  361. IO_MACRO_IO3_SEL | IO_MACRO_IO2_SEL | IO_MACRO_IO0_SEL_BIT,
  362. mas->base, GENI_CFG_REG80);
  363. geni_write_reg(START_TRIGGER, mas->base, SE_GENI_CFG_SEQ_START);
  364. /* ensure data is written to hardware register */
  365. wmb();
  366. }
  367. static void spi_slv_setup(struct spi_geni_master *mas)
  368. {
  369. geni_write_reg(SPI_SLAVE_EN, mas->base, SE_SPI_SLAVE_EN);
  370. if (mas->slave_cross_connected) {
  371. geni_write_reg(GENI_IO_MUX_1_EN, mas->base, GENI_OUTPUT_CTRL);
  372. geni_write_reg(IO1_SEL_TX | IO2_DATA_IN_SEL_PAD2 |
  373. IO3_DATA_IN_SEL_PAD2, mas->base, GENI_CFG_REG80);
  374. } else {
  375. geni_write_reg(GENI_IO_MUX_0_EN, mas->base, GENI_OUTPUT_CTRL);
  376. }
  377. geni_write_reg(START_TRIGGER, mas->base, SE_GENI_CFG_SEQ_START);
  378. /* ensure data is written to hardware register */
  379. wmb();
  380. dev_info(mas->dev, "spi slave setup done\n");
  381. }
  382. static int spi_slv_abort(struct spi_master *spi)
  383. {
  384. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  385. complete_all(&mas->tx_cb);
  386. complete_all(&mas->rx_cb);
  387. return 0;
  388. }
  389. static struct spi_master *get_spi_master(struct device *dev)
  390. {
  391. struct platform_device *pdev = to_platform_device(dev);
  392. struct spi_master *spi = platform_get_drvdata(pdev);
  393. return spi;
  394. }
  395. static int get_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas,
  396. int *clk_idx, int *clk_div)
  397. {
  398. unsigned long sclk_freq;
  399. unsigned long res_freq;
  400. struct geni_se *se = &mas->spi_rsc;
  401. int ret = 0;
  402. ret = geni_se_clk_freq_match(&mas->spi_rsc,
  403. (speed_hz * mas->oversampling), clk_idx,
  404. &sclk_freq, false);
  405. if (ret) {
  406. dev_err(mas->dev, "%s: Failed(%d) to find src clk for 0x%x\n",
  407. __func__, ret, speed_hz);
  408. return ret;
  409. }
  410. *clk_div = DIV_ROUND_UP(sclk_freq, (mas->oversampling*speed_hz));
  411. if (!(*clk_div)) {
  412. dev_err(mas->dev, "%s:Err:sclk:%lu oversampling:%d speed:%u\n",
  413. __func__, sclk_freq, mas->oversampling, speed_hz);
  414. return -EINVAL;
  415. }
  416. res_freq = (sclk_freq / (*clk_div));
  417. dev_dbg(mas->dev, "%s: req %u resultant %lu sclk %lu, idx %d, div %d\n",
  418. __func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
  419. ret = clk_set_rate(se->clk, sclk_freq);
  420. if (ret) {
  421. dev_err(mas->dev, "%s: clk_set_rate failed %d\n",
  422. __func__, ret);
  423. return ret;
  424. }
  425. return 0;
  426. }
  427. static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
  428. int bits_per_word)
  429. {
  430. int pack_words = 1;
  431. bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
  432. u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
  433. /*
  434. * If bits_per_word isn't a byte aligned value, set the packing to be
  435. * 1 SPI word per FIFO word.
  436. */
  437. if (!(mas->tx_fifo_width % bits_per_word))
  438. pack_words = mas->tx_fifo_width / bits_per_word;
  439. word_len &= ~WORD_LEN_MSK;
  440. word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
  441. geni_se_config_packing(&mas->spi_rsc, bits_per_word, pack_words, msb_first, true, true);
  442. geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
  443. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  444. "%s: %lu bpw %d pack_words %d\n", __func__, word_len,
  445. bits_per_word, pack_words);
  446. }
  447. static int setup_fifo_params(struct spi_device *spi_slv,
  448. struct spi_master *spi)
  449. {
  450. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  451. u16 mode = spi_slv->mode;
  452. u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
  453. u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
  454. u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
  455. u32 demux_sel = 0;
  456. u32 demux_output_inv = 0;
  457. u32 clk_sel = 0;
  458. u32 m_clk_cfg = 0;
  459. int ret = 0;
  460. int idx;
  461. int div;
  462. struct spi_geni_qcom_ctrl_data *delay_params = NULL;
  463. u32 spi_delay_params = 0;
  464. loopback_cfg &= ~LOOPBACK_MSK;
  465. cpol &= ~CPOL;
  466. cpha &= ~CPHA;
  467. if (mode & SPI_LOOP)
  468. loopback_cfg |= LOOPBACK_ENABLE;
  469. if (mode & SPI_CPOL)
  470. cpol |= CPOL;
  471. if (mode & SPI_CPHA)
  472. cpha |= CPHA;
  473. if (spi_slv->mode & SPI_CS_HIGH)
  474. demux_output_inv |= BIT(spi_slv->chip_select);
  475. if (spi_slv->controller_data) {
  476. u32 cs_clk_delay = 0;
  477. u32 inter_words_delay = 0;
  478. delay_params =
  479. (struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
  480. cs_clk_delay =
  481. (delay_params->spi_cs_clk_delay << SPI_CS_CLK_DELAY_SHFT)
  482. & SPI_CS_CLK_DELAY_MSK;
  483. inter_words_delay =
  484. delay_params->spi_inter_words_delay &
  485. SPI_INTER_WORDS_DELAY_MSK;
  486. spi_delay_params =
  487. (inter_words_delay | cs_clk_delay);
  488. }
  489. demux_sel = spi_slv->chip_select;
  490. mas->cur_speed_hz = spi_slv->max_speed_hz;
  491. mas->cur_word_len = spi_slv->bits_per_word;
  492. ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
  493. if (ret) {
  494. dev_err(mas->dev, "Err setting clks ret(%d) for %d\n",
  495. ret, mas->cur_speed_hz);
  496. goto setup_fifo_params_exit;
  497. }
  498. clk_sel |= (idx & CLK_SEL_MSK);
  499. m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
  500. spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
  501. geni_write_reg(loopback_cfg, mas->base, SE_SPI_LOOPBACK);
  502. geni_write_reg(demux_sel, mas->base, SE_SPI_DEMUX_SEL);
  503. geni_write_reg(cpha, mas->base, SE_SPI_CPHA);
  504. geni_write_reg(cpol, mas->base, SE_SPI_CPOL);
  505. geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
  506. geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
  507. geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
  508. geni_write_reg(spi_delay_params, mas->base, SE_SPI_DELAY_COUNTERS);
  509. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  510. "%s:Loopback%d demux_sel0x%x demux_op_inv 0x%x clk_cfg 0x%x\n",
  511. __func__, loopback_cfg, demux_sel, demux_output_inv, m_clk_cfg);
  512. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  513. "%s:clk_sel 0x%x cpol %d cpha %d delay 0x%x\n", __func__,
  514. clk_sel, cpol, cpha, spi_delay_params);
  515. /* Ensure message level attributes are written before returning */
  516. mb();
  517. setup_fifo_params_exit:
  518. return ret;
  519. }
  520. static int select_xfer_mode(struct spi_master *spi,
  521. struct spi_message *spi_msg)
  522. {
  523. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  524. int mode = GENI_SE_DMA;
  525. int fifo_disable = (geni_read_reg(mas->base, GENI_IF_DISABLE_RO) &
  526. FIFO_IF_DISABLE);
  527. bool dma_chan_valid =
  528. !(IS_ERR_OR_NULL(mas->tx) || IS_ERR_OR_NULL(mas->rx));
  529. /*
  530. * If FIFO Interface is disabled and there are no DMA channels then we
  531. * can't do this transfer.
  532. * If FIFO interface is disabled, we can do GSI only,
  533. * else pick FIFO mode.
  534. */
  535. if (fifo_disable && !dma_chan_valid)
  536. mode = -EINVAL;
  537. else if (!fifo_disable)
  538. mode = GENI_SE_DMA;
  539. else if (dma_chan_valid)
  540. mode = GENI_GPI_DMA;
  541. return mode;
  542. }
  543. static struct msm_gpi_tre *setup_lock_tre(struct spi_geni_master *mas)
  544. {
  545. struct msm_gpi_tre *lock_t = &mas->gsi_lock_unlock->lock_t;
  546. lock_t->dword[0] = MSM_GPI_LOCK_TRE_DWORD0;
  547. lock_t->dword[1] = MSM_GPI_LOCK_TRE_DWORD1;
  548. lock_t->dword[2] = MSM_GPI_LOCK_TRE_DWORD2;
  549. /* lock tre: ieob set */
  550. lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 1, 0);
  551. return lock_t;
  552. }
  553. static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
  554. struct spi_geni_master *mas, u16 mode,
  555. u32 cs_clk_delay, u32 inter_words_delay)
  556. {
  557. struct msm_gpi_tre *c0_tre = &mas->gsi[mas->num_xfers].config0_tre;
  558. u8 flags = 0;
  559. u8 word_len = 0;
  560. u8 pack = 0;
  561. int div = 0;
  562. int idx = 0;
  563. int ret = 0;
  564. int m_clk_cfg;
  565. if (IS_ERR_OR_NULL(c0_tre))
  566. return c0_tre;
  567. if (mode & SPI_LOOP)
  568. flags |= GSI_LOOPBACK_EN;
  569. if (mode & SPI_CPOL)
  570. flags |= GSI_CPOL;
  571. if (mode & SPI_CPHA)
  572. flags |= GSI_CPHA;
  573. word_len = xfer->bits_per_word - MIN_WORD_LEN;
  574. pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
  575. if (mas->is_le_vm) {
  576. idx = geni_read_reg(mas->base, SE_GENI_CLK_SEL);
  577. m_clk_cfg = geni_read_reg(mas->base, GENI_SER_M_CLK_CFG);
  578. div = (m_clk_cfg & CLK_DIV_MSK) >> CLK_DIV_SHFT;
  579. } else {
  580. ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
  581. if (ret) {
  582. dev_err(mas->dev, "%s:Err setting clks:%d\n",
  583. __func__, ret);
  584. return ERR_PTR(ret);
  585. }
  586. }
  587. c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
  588. word_len);
  589. c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, cs_clk_delay,
  590. inter_words_delay);
  591. c0_tre->dword[2] = MSM_GPI_SPI_CONFIG0_TRE_DWORD2(idx, div);
  592. c0_tre->dword[3] = MSM_GPI_SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1);
  593. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  594. "%s: flags 0x%x word %d pack %d freq %d idx %d div %d\n",
  595. __func__, flags, word_len, pack, mas->cur_speed_hz, idx, div);
  596. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  597. "%s: cs_clk_delay %d inter_words_delay %d\n", __func__,
  598. cs_clk_delay, inter_words_delay);
  599. return c0_tre;
  600. }
  601. static struct msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags,
  602. struct spi_geni_master *mas)
  603. {
  604. struct msm_gpi_tre *go_tre = &mas->gsi[mas->num_xfers].go_tre;
  605. int chain;
  606. int eot;
  607. int eob;
  608. int link_rx = 0;
  609. if (IS_ERR_OR_NULL(go_tre))
  610. return go_tre;
  611. go_tre->dword[0] = MSM_GPI_SPI_GO_TRE_DWORD0(flags, cs, cmd);
  612. go_tre->dword[1] = MSM_GPI_SPI_GO_TRE_DWORD1;
  613. go_tre->dword[2] = MSM_GPI_SPI_GO_TRE_DWORD2(rx_len);
  614. if (cmd == SPI_RX_ONLY) {
  615. eot = 0;
  616. chain = 0;
  617. eob = 1; /* GO TRE on TX: processing needed */
  618. } else {
  619. eot = 0;
  620. chain = 1;
  621. eob = 0;
  622. }
  623. if (cmd & SPI_RX_ONLY)
  624. link_rx = 1;
  625. go_tre->dword[3] = MSM_GPI_SPI_GO_TRE_DWORD3(link_rx, 0, eot, eob,
  626. chain);
  627. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  628. "%s: rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
  629. __func__, rx_len, flags, cs, cmd, eot, eob, chain);
  630. return go_tre;
  631. }
  632. static struct msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre,
  633. dma_addr_t buf, u32 len,
  634. struct spi_geni_master *mas,
  635. bool is_tx)
  636. {
  637. if (IS_ERR_OR_NULL(tre))
  638. return tre;
  639. tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf);
  640. tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
  641. tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
  642. tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, is_tx, 0, 0);
  643. return tre;
  644. }
  645. static struct msm_gpi_tre *setup_unlock_tre(struct spi_geni_master *mas)
  646. {
  647. struct msm_gpi_tre *unlock_t = &mas->gsi_lock_unlock->unlock_t;
  648. /* unlock tre: ieob set */
  649. unlock_t->dword[0] = MSM_GPI_UNLOCK_TRE_DWORD0;
  650. unlock_t->dword[1] = MSM_GPI_UNLOCK_TRE_DWORD1;
  651. unlock_t->dword[2] = MSM_GPI_UNLOCK_TRE_DWORD2;
  652. unlock_t->dword[3] = MSM_GPI_UNLOCK_TRE_DWORD3(0, 0, 0, 1, 0);
  653. return unlock_t;
  654. }
  655. static void spi_gsi_ch_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb,
  656. void *ptr)
  657. {
  658. struct spi_master *spi = ptr;
  659. struct spi_geni_master *mas;
  660. if (!ptr || !cb) {
  661. pr_err("%s: Invalid ev_cb buffer\n", __func__);
  662. return;
  663. }
  664. mas = spi_master_get_devdata(spi);
  665. switch (cb->cb_event) {
  666. case MSM_GPI_QUP_NOTIFY:
  667. case MSM_GPI_QUP_MAX_EVENT:
  668. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  669. "%s:cb_ev%d status%llu ts%llu count%llu\n",
  670. __func__, cb->cb_event, cb->status,
  671. cb->timestamp, cb->count);
  672. break;
  673. case MSM_GPI_QUP_ERROR:
  674. case MSM_GPI_QUP_CH_ERROR:
  675. case MSM_GPI_QUP_FW_ERROR:
  676. case MSM_GPI_QUP_PENDING_EVENT:
  677. case MSM_GPI_QUP_EOT_DESC_MISMATCH:
  678. case MSM_GPI_QUP_SW_ERROR:
  679. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  680. "%s: cb_ev %d status %llu ts %llu count %llu\n",
  681. __func__, cb->cb_event, cb->status,
  682. cb->timestamp, cb->count);
  683. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  684. "err.routine %u, err.type %u, err.code %u\n",
  685. cb->error_log.routine,
  686. cb->error_log.type,
  687. cb->error_log.error_code);
  688. mas->qn_err = true;
  689. complete_all(&mas->tx_cb);
  690. complete_all(&mas->rx_cb);
  691. break;
  692. default:
  693. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  694. "%s: Unsupported event: %d\n", __func__, cb->cb_event);
  695. break;
  696. }
  697. }
  698. static void spi_gsi_rx_callback(void *cb)
  699. {
  700. struct msm_gpi_dma_async_tx_cb_param *cb_param =
  701. (struct msm_gpi_dma_async_tx_cb_param *)cb;
  702. struct gsi_desc_cb *desc_cb;
  703. struct spi_master *spi;
  704. struct spi_transfer *xfer;
  705. struct spi_geni_master *mas;
  706. if (!(cb_param && cb_param->userdata)) {
  707. pr_err("%s: Invalid rx_cb buffer\n", __func__);
  708. return;
  709. }
  710. desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
  711. spi = desc_cb->spi;
  712. xfer = desc_cb->xfer;
  713. mas = spi_master_get_devdata(spi);
  714. if (xfer->rx_buf) {
  715. if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
  716. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  717. "%s: Unexpected GSI CB error\n", __func__);
  718. return;
  719. }
  720. if (cb_param->length == xfer->len) {
  721. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  722. "%s\n", __func__);
  723. complete(&mas->rx_cb);
  724. } else {
  725. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  726. "%s: Length mismatch. Expected %d Callback %d\n",
  727. __func__, xfer->len, cb_param->length);
  728. }
  729. }
  730. }
  731. static void spi_gsi_tx_callback(void *cb)
  732. {
  733. struct msm_gpi_dma_async_tx_cb_param *cb_param = cb;
  734. struct gsi_desc_cb *desc_cb;
  735. struct spi_master *spi;
  736. struct spi_transfer *xfer;
  737. struct spi_geni_master *mas;
  738. if (!(cb_param && cb_param->userdata)) {
  739. pr_err("%s: Invalid tx_cb buffer\n", __func__);
  740. return;
  741. }
  742. desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
  743. spi = desc_cb->spi;
  744. xfer = desc_cb->xfer;
  745. mas = spi_master_get_devdata(spi);
  746. /*
  747. * Case when lock/unlock support is required:
  748. * The callback comes on tx channel as lock/unlock
  749. * tres are submitted on tx channel. Check if there's
  750. * no xfer scheduled, that specifies a gsi completion
  751. * callback for lock/unlock tre being submitted.
  752. */
  753. if (!xfer) {
  754. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  755. "Lock/unlock IEOB received %s\n", __func__);
  756. complete(&mas->tx_cb);
  757. return;
  758. }
  759. if (xfer->tx_buf) {
  760. if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
  761. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  762. "%s: Unexpected GSI CB error\n", __func__);
  763. return;
  764. }
  765. if (cb_param->length == xfer->len) {
  766. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  767. "%s\n", __func__);
  768. complete(&mas->tx_cb);
  769. } else {
  770. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  771. "%s: Length mismatch. Expected %d Callback %d\n",
  772. __func__, xfer->len, cb_param->length);
  773. }
  774. }
  775. }
  776. /*
  777. * Locking the GPII:
  778. * For a shared_se usecase, lock the bus per message.
  779. * Lock bus is done in prepare_message and unlock bus
  780. * is done in unprepare_message.
  781. * For an LE-VM usecase, lock the bus per session.
  782. * Lock bus is done in runtime_resume and unlock
  783. * bus is done in runtime_suspend.
  784. */
  785. static int spi_geni_lock_bus(struct spi_master *spi)
  786. {
  787. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  788. struct msm_gpi_tre *lock_t = NULL;
  789. int ret = 0, timeout = 0;
  790. struct scatterlist *xfer_tx_sg = mas->gsi_lock_unlock->tx_sg;
  791. unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  792. reinit_completion(&mas->tx_cb);
  793. SPI_LOG_DBG(mas->ipc, false, mas->dev, "%s %d\n", __func__, ret);
  794. lock_t = setup_lock_tre(mas);
  795. sg_init_table(xfer_tx_sg, 1);
  796. sg_set_buf(xfer_tx_sg, lock_t, sizeof(*lock_t));
  797. mas->gsi_lock_unlock->desc_cb.spi = spi;
  798. mas->gsi_lock_unlock->tx_desc = dmaengine_prep_slave_sg(mas->tx,
  799. mas->gsi_lock_unlock->tx_sg, 1,
  800. DMA_MEM_TO_DEV, flags);
  801. if (IS_ERR_OR_NULL(mas->gsi_lock_unlock->tx_desc)) {
  802. dev_err(mas->dev, "Err setting up tx desc\n");
  803. ret = -EIO;
  804. goto err_spi_geni_lock_bus;
  805. }
  806. mas->gsi_lock_unlock->tx_desc->callback = spi_gsi_tx_callback;
  807. mas->gsi_lock_unlock->tx_desc->callback_param =
  808. &mas->gsi_lock_unlock->tx_cb_param;
  809. mas->gsi_lock_unlock->tx_cb_param.userdata =
  810. &mas->gsi_lock_unlock->desc_cb;
  811. /* Issue TX */
  812. mas->gsi_lock_unlock->tx_cookie =
  813. dmaengine_submit(mas->gsi_lock_unlock->tx_desc);
  814. if (dma_submit_error(mas->gsi_lock_unlock->tx_cookie)) {
  815. dev_err(mas->dev, "%s: dmaengine_submit failed (%d)\n",
  816. __func__, mas->gsi_lock_unlock->tx_cookie);
  817. ret = -EINVAL;
  818. goto err_spi_geni_lock_bus;
  819. }
  820. dma_async_issue_pending(mas->tx);
  821. timeout = wait_for_completion_timeout(&mas->tx_cb,
  822. msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
  823. if (timeout <= 0) {
  824. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  825. "%s failed\n", __func__);
  826. geni_spi_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
  827. ret = -ETIMEDOUT;
  828. goto err_spi_geni_lock_bus;
  829. }
  830. return ret;
  831. err_spi_geni_lock_bus:
  832. if (ret)
  833. dmaengine_terminate_all(mas->tx);
  834. return ret;
  835. }
  836. static void spi_geni_unlock_bus(struct spi_master *spi)
  837. {
  838. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  839. struct msm_gpi_tre *unlock_t = NULL;
  840. int ret = 0, timeout = 0;
  841. struct scatterlist *xfer_tx_sg = mas->gsi_lock_unlock->tx_sg;
  842. unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  843. /* if gpi reset happened for levm, no need to do unlock */
  844. if (mas->is_le_vm && mas->le_gpi_reset_done) {
  845. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  846. "%s:gpi reset happened for levm, no need to do unlock\n", __func__);
  847. return;
  848. }
  849. reinit_completion(&mas->tx_cb);
  850. SPI_LOG_DBG(mas->ipc, false, mas->dev, "%s %d\n", __func__, ret);
  851. unlock_t = setup_unlock_tre(mas);
  852. sg_init_table(xfer_tx_sg, 1);
  853. sg_set_buf(xfer_tx_sg, unlock_t, sizeof(*unlock_t));
  854. mas->gsi_lock_unlock->desc_cb.spi = spi;
  855. mas->gsi_lock_unlock->tx_desc = dmaengine_prep_slave_sg(mas->tx,
  856. mas->gsi_lock_unlock->tx_sg, 1,
  857. DMA_MEM_TO_DEV, flags);
  858. if (IS_ERR_OR_NULL(mas->gsi_lock_unlock->tx_desc)) {
  859. dev_err(mas->dev, "Err setting up tx desc\n");
  860. ret = -EIO;
  861. goto err_spi_geni_unlock_bus;
  862. }
  863. mas->gsi_lock_unlock->tx_desc->callback = spi_gsi_tx_callback;
  864. mas->gsi_lock_unlock->tx_desc->callback_param =
  865. &mas->gsi_lock_unlock->tx_cb_param;
  866. mas->gsi_lock_unlock->tx_cb_param.userdata =
  867. &mas->gsi_lock_unlock->desc_cb;
  868. /* Issue TX */
  869. mas->gsi_lock_unlock->tx_cookie =
  870. dmaengine_submit(mas->gsi_lock_unlock->tx_desc);
  871. if (dma_submit_error(mas->gsi_lock_unlock->tx_cookie)) {
  872. dev_err(mas->dev, "%s: dmaengine_submit failed (%d)\n",
  873. __func__, mas->gsi_lock_unlock->tx_cookie);
  874. ret = -EINVAL;
  875. goto err_spi_geni_unlock_bus;
  876. }
  877. dma_async_issue_pending(mas->tx);
  878. timeout = wait_for_completion_timeout(&mas->tx_cb,
  879. msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
  880. if (timeout <= 0) {
  881. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  882. "%s failed\n", __func__);
  883. geni_spi_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
  884. ret = -ETIMEDOUT;
  885. goto err_spi_geni_unlock_bus;
  886. }
  887. err_spi_geni_unlock_bus:
  888. if (ret)
  889. dmaengine_terminate_all(mas->tx);
  890. }
  891. static int setup_gsi_xfer(struct spi_transfer *xfer,
  892. struct spi_geni_master *mas,
  893. struct spi_device *spi_slv,
  894. struct spi_master *spi)
  895. {
  896. int ret = 0;
  897. struct msm_gpi_tre *c0_tre = NULL;
  898. struct msm_gpi_tre *go_tre = NULL;
  899. struct msm_gpi_tre *tx_tre = NULL;
  900. struct msm_gpi_tre *rx_tre = NULL;
  901. struct scatterlist *xfer_tx_sg = mas->gsi[mas->num_xfers].tx_sg;
  902. struct scatterlist *xfer_rx_sg = &mas->gsi[mas->num_xfers].rx_sg;
  903. int rx_nent = 0;
  904. int tx_nent = 0;
  905. u8 cmd = 0;
  906. u8 cs = 0;
  907. u32 rx_len = 0;
  908. int go_flags = 0;
  909. unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  910. struct spi_geni_qcom_ctrl_data *delay_params = NULL;
  911. u32 cs_clk_delay = 0;
  912. u32 inter_words_delay = 0;
  913. if (mas->is_le_vm && mas->le_gpi_reset_done) {
  914. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  915. "%s doing gsi lock, due to levm gsi reset\n", __func__);
  916. ret = spi_geni_lock_bus(spi);
  917. if (ret) {
  918. SPI_LOG_DBG(mas->ipc, true, mas->dev,
  919. "%s lock bus failed: %d\n", __func__, ret);
  920. return ret;
  921. }
  922. mas->le_gpi_reset_done = false;
  923. }
  924. if (spi_slv->controller_data) {
  925. delay_params =
  926. (struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
  927. cs_clk_delay =
  928. delay_params->spi_cs_clk_delay;
  929. inter_words_delay =
  930. delay_params->spi_inter_words_delay;
  931. }
  932. if ((xfer->bits_per_word != mas->cur_word_len) ||
  933. (xfer->speed_hz != mas->cur_speed_hz)) {
  934. mas->cur_word_len = xfer->bits_per_word;
  935. mas->cur_speed_hz = xfer->speed_hz;
  936. tx_nent++;
  937. c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode,
  938. cs_clk_delay, inter_words_delay);
  939. if (IS_ERR_OR_NULL(c0_tre)) {
  940. dev_err(mas->dev, "%s:Err setting c0tre:%d\n",
  941. __func__, ret);
  942. return PTR_ERR(c0_tre);
  943. }
  944. }
  945. if (!(mas->cur_word_len % MIN_WORD_LEN)) {
  946. rx_len = ((xfer->len << 3) / mas->cur_word_len);
  947. } else {
  948. int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
  949. rx_len = (xfer->len / bytes_per_word);
  950. }
  951. if (xfer->tx_buf && xfer->rx_buf) {
  952. cmd = SPI_FULL_DUPLEX;
  953. tx_nent += 2;
  954. rx_nent++;
  955. } else if (xfer->tx_buf) {
  956. cmd = SPI_TX_ONLY;
  957. tx_nent += 2;
  958. rx_len = 0;
  959. } else if (xfer->rx_buf) {
  960. cmd = SPI_RX_ONLY;
  961. tx_nent++;
  962. rx_nent++;
  963. }
  964. cs |= spi_slv->chip_select;
  965. if (!xfer->cs_change) {
  966. if (!list_is_last(&xfer->transfer_list,
  967. &spi->cur_msg->transfers))
  968. go_flags |= FRAGMENTATION;
  969. }
  970. go_tre = setup_go_tre(cmd, cs, rx_len, go_flags, mas);
  971. sg_init_table(xfer_tx_sg, tx_nent);
  972. if (rx_nent)
  973. sg_init_table(xfer_rx_sg, rx_nent);
  974. if (c0_tre)
  975. sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre));
  976. sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre));
  977. mas->gsi[mas->num_xfers].desc_cb.spi = spi;
  978. mas->gsi[mas->num_xfers].desc_cb.xfer = xfer;
  979. if (cmd & SPI_RX_ONLY) {
  980. rx_tre = &mas->gsi[mas->num_xfers].rx_dma_tre;
  981. rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->len, mas, 0);
  982. if (IS_ERR_OR_NULL(rx_tre)) {
  983. dev_err(mas->dev, "Err setting up rx tre\n");
  984. return PTR_ERR(rx_tre);
  985. }
  986. sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
  987. mas->gsi[mas->num_xfers].rx_desc =
  988. dmaengine_prep_slave_sg(mas->rx,
  989. &mas->gsi[mas->num_xfers].rx_sg, rx_nent,
  990. DMA_DEV_TO_MEM, flags);
  991. if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].rx_desc)) {
  992. dev_err(mas->dev, "Err setting up rx desc\n");
  993. return -EIO;
  994. }
  995. mas->gsi[mas->num_xfers].rx_desc->callback =
  996. spi_gsi_rx_callback;
  997. mas->gsi[mas->num_xfers].rx_desc->callback_param =
  998. &mas->gsi[mas->num_xfers].rx_cb_param;
  999. mas->gsi[mas->num_xfers].rx_cb_param.userdata =
  1000. &mas->gsi[mas->num_xfers].desc_cb;
  1001. mas->num_rx_eot++;
  1002. }
  1003. if (cmd & SPI_TX_ONLY) {
  1004. tx_tre = &mas->gsi[mas->num_xfers].tx_dma_tre;
  1005. tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->len, mas, 1);
  1006. if (IS_ERR_OR_NULL(tx_tre)) {
  1007. dev_err(mas->dev, "Err setting up tx tre\n");
  1008. return PTR_ERR(tx_tre);
  1009. }
  1010. sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre));
  1011. mas->num_tx_eot++;
  1012. }
  1013. mas->gsi[mas->num_xfers].tx_desc = dmaengine_prep_slave_sg(mas->tx,
  1014. mas->gsi[mas->num_xfers].tx_sg, tx_nent,
  1015. DMA_MEM_TO_DEV, flags);
  1016. if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].tx_desc)) {
  1017. dev_err(mas->dev, "Err setting up tx desc\n");
  1018. return -EIO;
  1019. }
  1020. mas->gsi[mas->num_xfers].tx_desc->callback = spi_gsi_tx_callback;
  1021. mas->gsi[mas->num_xfers].tx_desc->callback_param =
  1022. &mas->gsi[mas->num_xfers].tx_cb_param;
  1023. mas->gsi[mas->num_xfers].tx_cb_param.userdata =
  1024. &mas->gsi[mas->num_xfers].desc_cb;
  1025. mas->gsi[mas->num_xfers].tx_cookie =
  1026. dmaengine_submit(mas->gsi[mas->num_xfers].tx_desc);
  1027. if (dma_submit_error(mas->gsi[mas->num_xfers].tx_cookie)) {
  1028. dev_err(mas->dev, "%s: dmaengine_submit failed (%d)\n",
  1029. __func__, mas->gsi[mas->num_xfers].tx_cookie);
  1030. dmaengine_terminate_all(mas->tx);
  1031. return -EINVAL;
  1032. }
  1033. if (cmd & SPI_RX_ONLY) {
  1034. mas->gsi[mas->num_xfers].rx_cookie =
  1035. dmaengine_submit(mas->gsi[mas->num_xfers].rx_desc);
  1036. if (dma_submit_error(mas->gsi[mas->num_xfers].rx_cookie)) {
  1037. dev_err(mas->dev, "%s: dmaengine_submit failed (%d)\n",
  1038. __func__, mas->gsi[mas->num_xfers].rx_cookie);
  1039. dmaengine_terminate_all(mas->rx);
  1040. return -EINVAL;
  1041. }
  1042. }
  1043. dma_async_issue_pending(mas->tx);
  1044. if (cmd & SPI_RX_ONLY)
  1045. dma_async_issue_pending(mas->rx);
  1046. mas->num_xfers++;
  1047. return ret;
  1048. }
  1049. static int spi_geni_map_buf(struct spi_geni_master *mas,
  1050. struct spi_message *msg)
  1051. {
  1052. struct spi_transfer *xfer;
  1053. int ret = 0;
  1054. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1055. if (xfer->rx_buf) {
  1056. ret = geni_se_common_iommu_map_buf(mas->wrapper_dev,
  1057. &xfer->rx_dma, xfer->rx_buf,
  1058. xfer->len, DMA_FROM_DEVICE);
  1059. if (ret) {
  1060. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1061. "%s: Mapping Rx buffer %d\n", __func__, ret);
  1062. return ret;
  1063. }
  1064. }
  1065. if (xfer->tx_buf) {
  1066. ret = geni_se_common_iommu_map_buf(mas->wrapper_dev,
  1067. &xfer->tx_dma,
  1068. (void *)xfer->tx_buf,
  1069. xfer->len, DMA_TO_DEVICE);
  1070. if (ret) {
  1071. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1072. "%s: Mapping Tx buffer %d\n", __func__, ret);
  1073. return ret;
  1074. }
  1075. }
  1076. }
  1077. return 0;
  1078. }
  1079. static void spi_geni_unmap_buf(struct spi_geni_master *mas,
  1080. struct spi_message *msg)
  1081. {
  1082. struct spi_transfer *xfer;
  1083. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1084. if (xfer->rx_buf)
  1085. geni_se_common_iommu_unmap_buf(mas->wrapper_dev, &xfer->rx_dma,
  1086. xfer->len, DMA_FROM_DEVICE);
  1087. if (xfer->tx_buf)
  1088. geni_se_common_iommu_unmap_buf(mas->wrapper_dev, &xfer->tx_dma,
  1089. xfer->len, DMA_TO_DEVICE);
  1090. }
  1091. }
  1092. static int spi_geni_prepare_message(struct spi_master *spi,
  1093. struct spi_message *spi_msg)
  1094. {
  1095. int ret = 0;
  1096. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  1097. int count;
  1098. unsigned long long start_time;
  1099. start_time = geni_capture_start_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1100. mas->spi_kpi);
  1101. if (mas->shared_ee) {
  1102. if (mas->setup) {
  1103. /* Client to respect system suspend */
  1104. if (!pm_runtime_enabled(mas->dev)) {
  1105. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1106. "%s: System suspended\n", __func__);
  1107. ret = -EACCES;
  1108. goto exit_prepare_message;
  1109. }
  1110. ret = pm_runtime_get_sync(mas->dev);
  1111. if (ret < 0) {
  1112. dev_err(mas->dev,
  1113. "%s:pm_runtime_get_sync failed %d\n",
  1114. __func__, ret);
  1115. WARN_ON_ONCE(1);
  1116. pm_runtime_put_noidle(mas->dev);
  1117. /* Set device in suspended since resume failed */
  1118. pm_runtime_set_suspended(mas->dev);
  1119. goto exit_prepare_message;
  1120. }
  1121. ret = 0;
  1122. if (mas->dis_autosuspend) {
  1123. count =
  1124. atomic_read(&mas->dev->power.usage_count);
  1125. if (count <= 0)
  1126. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1127. "resume usage count mismatch:%d",
  1128. count);
  1129. }
  1130. } else {
  1131. mas->setup = true;
  1132. }
  1133. if (mas->shared_se) {
  1134. ret = spi_geni_lock_bus(spi);
  1135. if (ret) {
  1136. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1137. "%s failed: %d\n", __func__, ret);
  1138. goto exit_prepare_message;
  1139. }
  1140. }
  1141. }
  1142. if (pm_runtime_status_suspended(mas->dev) && !mas->is_le_vm) {
  1143. if (!pm_runtime_enabled(mas->dev)) {
  1144. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1145. "%s: System suspended\n", __func__);
  1146. ret = -EACCES;
  1147. goto exit_prepare_message;
  1148. }
  1149. ret = pm_runtime_get_sync(mas->dev);
  1150. if (ret < 0) {
  1151. dev_err(mas->dev,
  1152. "%s:pm_runtime_get_sync failed %d\n", __func__, ret);
  1153. WARN_ON_ONCE(1);
  1154. pm_runtime_put_noidle(mas->dev);
  1155. /* Set device in suspended since resume failed */
  1156. pm_runtime_set_suspended(mas->dev);
  1157. goto exit_prepare_message;
  1158. }
  1159. }
  1160. mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
  1161. if (mas->cur_xfer_mode < 0) {
  1162. dev_err(mas->dev, "%s: Couldn't select mode %d\n", __func__,
  1163. mas->cur_xfer_mode);
  1164. ret = -EINVAL;
  1165. } else if (mas->cur_xfer_mode == GENI_GPI_DMA) {
  1166. memset(mas->gsi, 0,
  1167. (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
  1168. geni_se_select_mode(&mas->spi_rsc, GENI_GPI_DMA);
  1169. ret = spi_geni_map_buf(mas, spi_msg);
  1170. } else {
  1171. geni_se_select_mode(&mas->spi_rsc, mas->cur_xfer_mode);
  1172. ret = setup_fifo_params(spi_msg->spi, spi);
  1173. }
  1174. exit_prepare_message:
  1175. geni_capture_stop_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1176. mas->spi_kpi, start_time, 0, 0);
  1177. return ret;
  1178. }
  1179. static int spi_geni_unprepare_message(struct spi_master *spi_mas,
  1180. struct spi_message *spi_msg)
  1181. {
  1182. struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
  1183. int count = 0;
  1184. unsigned long long start_time;
  1185. start_time = geni_capture_start_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1186. mas->spi_kpi);
  1187. mas->cur_speed_hz = 0;
  1188. mas->cur_word_len = 0;
  1189. if (mas->cur_xfer_mode == GENI_GPI_DMA)
  1190. spi_geni_unmap_buf(mas, spi_msg);
  1191. if (mas->shared_ee) {
  1192. if (mas->shared_se)
  1193. spi_geni_unlock_bus(spi_mas);
  1194. if (mas->dis_autosuspend) {
  1195. pm_runtime_put_sync(mas->dev);
  1196. count = atomic_read(&mas->dev->power.usage_count);
  1197. if (count < 0)
  1198. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1199. "suspend usage count mismatch:%d",
  1200. count);
  1201. } else if (!pm_runtime_status_suspended(mas->dev) &&
  1202. pm_runtime_enabled(mas->dev)) {
  1203. pm_runtime_mark_last_busy(mas->dev);
  1204. pm_runtime_put_autosuspend(mas->dev);
  1205. }
  1206. }
  1207. geni_capture_stop_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1208. mas->spi_kpi, start_time, 0, 0);
  1209. return 0;
  1210. }
  1211. static void spi_geni_set_sampling_rate(struct spi_geni_master *mas,
  1212. unsigned int major, unsigned int minor)
  1213. {
  1214. u32 cpol, cpha, cfg_reg108, cfg_reg109, cfg_seq_start;
  1215. cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
  1216. cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
  1217. cfg_reg108 = geni_read_reg(mas->base, SE_GENI_CFG_REG108);
  1218. cfg_reg109 = geni_read_reg(mas->base, SE_GENI_CFG_REG109);
  1219. /* clear CPOL bit */
  1220. cfg_reg108 &= ~(1 << CPOL_CTRL_SHFT);
  1221. if (major == 1 && minor == 0) {
  1222. /* Write 1 to RX_SI_EN2IO_DELAY reg */
  1223. cfg_reg108 &= ~(0x7 << RX_SI_EN2IO_DELAY_SHFT);
  1224. cfg_reg108 |= (1 << RX_SI_EN2IO_DELAY_SHFT);
  1225. /* Write 0 to RX_IO_POS_FF_EN_SEL reg */
  1226. cfg_reg108 &= ~(1 << RX_IO_POS_FF_EN_SEL_SHFT);
  1227. } else if ((major < 2) || (major == 2 && minor < 5)) {
  1228. /* Write 0 to RX_IO_EN2CORE_EN_DELAY reg */
  1229. cfg_reg108 &= ~(0x7 << RX_IO_EN2CORE_EN_DELAY_SHFT);
  1230. } else {
  1231. /*
  1232. * Write miso_sampling_ctrl_set to
  1233. * RX_IO_EN2CORE_EN_DELAY reg
  1234. */
  1235. cfg_reg108 &= ~(0x7 << RX_IO_EN2CORE_EN_DELAY_SHFT);
  1236. cfg_reg108 |= (mas->miso_sampling_ctrl_val <<
  1237. RX_IO_EN2CORE_EN_DELAY_SHFT);
  1238. }
  1239. geni_write_reg(cfg_reg108, mas->base, SE_GENI_CFG_REG108);
  1240. if (cpol == 0 && cpha == 0)
  1241. cfg_reg109 = 1;
  1242. else if (cpol == 1 && cpha == 0)
  1243. cfg_reg109 = 0;
  1244. geni_write_reg(cfg_reg109, mas->base,
  1245. SE_GENI_CFG_REG109);
  1246. if (!(major == 1 && minor == 0))
  1247. geni_write_reg(1, mas->base, SE_GENI_CFG_SEQ_START);
  1248. cfg_reg108 = geni_read_reg(mas->base, SE_GENI_CFG_REG108);
  1249. cfg_reg109 = geni_read_reg(mas->base, SE_GENI_CFG_REG109);
  1250. cfg_seq_start = geni_read_reg(mas->base, SE_GENI_CFG_SEQ_START);
  1251. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  1252. "%s cfg108: 0x%x cfg109: 0x%x cfg_seq_start: 0x%x\n",
  1253. __func__, cfg_reg108, cfg_reg109, cfg_seq_start);
  1254. }
  1255. /*
  1256. * spi_geni_mas_setup is done once per spi session.
  1257. * In LA, it is called in prepare_transfer_hardware whereas
  1258. * in LE, it is called in runtime_resume. Make sure this api
  1259. * is called before any actual transfer begins as it involves
  1260. * generic SW/HW intializations required for a spi transfer.
  1261. */
  1262. static int spi_geni_mas_setup(struct spi_master *spi)
  1263. {
  1264. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  1265. int proto = geni_se_read_proto(&mas->spi_rsc);
  1266. unsigned int major = 0;
  1267. unsigned int minor = 0;
  1268. int hw_ver = 0;
  1269. int ret = 0;
  1270. if (spi->slave) {
  1271. if (mas->slave_setup)
  1272. goto setup_ipc;
  1273. if (unlikely(proto != GENI_SE_SPI_SLAVE)) {
  1274. dev_err(mas->dev, "Invalid proto %d\n", proto);
  1275. return -ENXIO;
  1276. }
  1277. }
  1278. if (unlikely(!mas->setup)) {
  1279. if ((unlikely(proto != GENI_SE_SPI)) && (!spi->slave)) {
  1280. dev_err(mas->dev, "Invalid proto %d\n", proto);
  1281. return -ENXIO;
  1282. }
  1283. if (spi->slave)
  1284. spi_slv_setup(mas);
  1285. if (mas->master_cross_connect)
  1286. spi_master_setup(mas);
  1287. }
  1288. mas->oversampling = 1;
  1289. mas->gsi_mode =
  1290. (geni_read_reg(mas->base, GENI_IF_DISABLE_RO) &
  1291. FIFO_IF_DISABLE);
  1292. if (mas->gsi_mode && !mas->is_deep_sleep) {
  1293. mas->tx = dma_request_slave_channel(mas->dev, "tx");
  1294. if (IS_ERR_OR_NULL(mas->tx)) {
  1295. dev_info(mas->dev, "Failed to get tx DMA ch %ld\n",
  1296. PTR_ERR(mas->tx));
  1297. goto setup_ipc;
  1298. }
  1299. mas->rx = dma_request_slave_channel(mas->dev, "rx");
  1300. if (IS_ERR_OR_NULL(mas->rx)) {
  1301. dev_info(mas->dev, "Failed to get rx DMA ch %ld\n",
  1302. PTR_ERR(mas->rx));
  1303. dma_release_channel(mas->tx);
  1304. goto setup_ipc;
  1305. }
  1306. mas->gsi = devm_kzalloc(mas->dev,
  1307. (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER),
  1308. GFP_KERNEL);
  1309. if (IS_ERR_OR_NULL(mas->gsi)) {
  1310. dev_err(mas->dev, "Failed to get GSI mem\n");
  1311. dma_release_channel(mas->tx);
  1312. dma_release_channel(mas->rx);
  1313. mas->tx = NULL;
  1314. mas->rx = NULL;
  1315. goto setup_ipc;
  1316. }
  1317. if (mas->shared_se || mas->is_le_vm) {
  1318. mas->gsi_lock_unlock = devm_kzalloc(mas->dev,
  1319. (sizeof(struct spi_geni_gsi)),
  1320. GFP_KERNEL);
  1321. if (IS_ERR_OR_NULL(mas->gsi_lock_unlock)) {
  1322. dev_err(mas->dev, "Failed to get GSI lock mem\n");
  1323. dma_release_channel(mas->tx);
  1324. dma_release_channel(mas->rx);
  1325. mas->tx = NULL;
  1326. mas->rx = NULL;
  1327. goto setup_ipc;
  1328. }
  1329. }
  1330. mas->tx_event.init.callback = spi_gsi_ch_cb;
  1331. mas->tx_event.init.cb_param = spi;
  1332. mas->tx_event.cmd = MSM_GPI_INIT;
  1333. mas->tx->private = &mas->tx_event;
  1334. mas->rx_event.init.callback = spi_gsi_ch_cb;
  1335. mas->rx_event.init.cb_param = spi;
  1336. mas->rx_event.cmd = MSM_GPI_INIT;
  1337. mas->rx->private = &mas->rx_event;
  1338. ret = dmaengine_slave_config(mas->tx, NULL);
  1339. if (ret) {
  1340. dev_err(mas->dev, "Failed to Config Tx, ret:%d\n", ret);
  1341. dma_release_channel(mas->tx);
  1342. dma_release_channel(mas->rx);
  1343. mas->tx = NULL;
  1344. mas->rx = NULL;
  1345. goto setup_ipc;
  1346. }
  1347. ret = dmaengine_slave_config(mas->rx, NULL);
  1348. if (ret) {
  1349. dev_err(mas->dev, "Failed to Config Rx, ret:%d\n", ret);
  1350. dma_release_channel(mas->tx);
  1351. dma_release_channel(mas->rx);
  1352. mas->tx = NULL;
  1353. mas->rx = NULL;
  1354. goto setup_ipc;
  1355. }
  1356. } else {
  1357. mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(&mas->spi_rsc);
  1358. mas->rx_fifo_depth = geni_se_get_rx_fifo_depth(&mas->spi_rsc);
  1359. mas->tx_fifo_width = geni_se_get_tx_fifo_width(&mas->spi_rsc);
  1360. geni_se_init(&mas->spi_rsc, 0x0, (mas->tx_fifo_depth - 2));
  1361. /* Transmit an entire FIFO worth of data per IRQ */
  1362. mas->tx_wm = 1;
  1363. }
  1364. setup_ipc:
  1365. /* we should avoid reallocation of ipc context during deepsleep */
  1366. if (!mas->ipc)
  1367. mas->ipc = ipc_log_context_create(4, dev_name(mas->dev), 0);
  1368. dev_info(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
  1369. mas->tx_fifo_depth, mas->rx_fifo_depth,
  1370. mas->tx_fifo_width);
  1371. if (!mas->shared_ee)
  1372. mas->setup = true;
  1373. /*
  1374. * Bypass hw_version read for LE. QUP common registers
  1375. * should not be accessed from SVM as that memory is
  1376. * assigned to PVM. So, bypass the reading of hw version
  1377. * registers and rely on PVM for the specific HW initialization
  1378. * done based on different hw versions.
  1379. */
  1380. if (mas->is_le_vm)
  1381. return ret;
  1382. if (!mas->is_deep_sleep) {
  1383. hw_ver = geni_se_get_qup_hw_version(&mas->spi_rsc);
  1384. if (!hw_ver) {
  1385. dev_err(mas->dev, "%s:Err getting HW version %d\n",
  1386. __func__, hw_ver);
  1387. } else {
  1388. major = GENI_SE_VERSION_MAJOR(hw_ver);
  1389. minor = GENI_SE_VERSION_MINOR(hw_ver);
  1390. if ((major == 1) && (minor == 0))
  1391. mas->oversampling = 2;
  1392. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  1393. "%s:Major:%d Minor:%d os%d\n",
  1394. __func__, major, minor, mas->oversampling);
  1395. }
  1396. }
  1397. if (mas->set_miso_sampling)
  1398. spi_geni_set_sampling_rate(mas, major, minor);
  1399. if (mas->dis_autosuspend)
  1400. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  1401. "Auto Suspend is disabled\n");
  1402. mas->is_deep_sleep = false;
  1403. return ret;
  1404. }
  1405. static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
  1406. {
  1407. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  1408. int ret = 0, count = 0;
  1409. unsigned long long start_time;
  1410. start_time = geni_capture_start_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1411. mas->spi_kpi);
  1412. /*
  1413. * Not required for LE as below intializations are specific
  1414. * to usecases. For LE, client takes care of get_sync.
  1415. */
  1416. if (mas->is_le_vm)
  1417. return 0;
  1418. mas->is_xfer_in_progress = true;
  1419. /* Client to respect system suspend */
  1420. if (!pm_runtime_enabled(mas->dev)) {
  1421. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1422. "%s: System suspended\n", __func__);
  1423. mas->is_xfer_in_progress = false;
  1424. return -EACCES;
  1425. }
  1426. if (mas->gsi_mode && !mas->shared_ee) {
  1427. int ret = 0;
  1428. if (!mas->is_la_vm) {
  1429. /* Do this only for non TVM LA usecase */
  1430. /* May not be needed here, but maintain parity */
  1431. ret = pinctrl_select_state(mas->geni_pinctrl,
  1432. mas->geni_gpio_active);
  1433. }
  1434. if (ret)
  1435. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1436. "%s: Error %d pinctrl_select_state\n", __func__, ret);
  1437. }
  1438. if (!mas->setup || !mas->shared_ee) {
  1439. ret = pm_runtime_get_sync(mas->dev);
  1440. if (ret < 0) {
  1441. dev_err(mas->dev,
  1442. "%s:pm_runtime_get_sync failed %d\n",
  1443. __func__, ret);
  1444. WARN_ON_ONCE(1);
  1445. pm_runtime_put_noidle(mas->dev);
  1446. /* Set device in suspended since resume failed */
  1447. pm_runtime_set_suspended(mas->dev);
  1448. mas->is_xfer_in_progress = false;
  1449. return ret;
  1450. }
  1451. if (!mas->setup) {
  1452. ret = spi_geni_mas_setup(spi);
  1453. if (ret) {
  1454. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1455. "%s mas_setup failed: %d\n", __func__, ret);
  1456. mas->is_xfer_in_progress = false;
  1457. return ret;
  1458. }
  1459. }
  1460. ret = 0;
  1461. if (mas->dis_autosuspend) {
  1462. count = atomic_read(&mas->dev->power.usage_count);
  1463. if (count <= 0)
  1464. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1465. "resume usage count mismatch:%d", count);
  1466. }
  1467. }
  1468. geni_capture_stop_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1469. mas->spi_kpi, start_time, 0, 0);
  1470. return ret;
  1471. }
  1472. static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
  1473. {
  1474. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  1475. int count = 0;
  1476. unsigned long long start_time;
  1477. start_time = geni_capture_start_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1478. mas->spi_kpi);
  1479. if (mas->shared_ee || mas->is_le_vm) {
  1480. mas->is_xfer_in_progress = false;
  1481. return 0;
  1482. }
  1483. if (mas->gsi_mode) {
  1484. int ret = 0;
  1485. if (!mas->is_la_vm) {
  1486. /* Do this only for non TVM LA usecase */
  1487. ret = pinctrl_select_state(mas->geni_pinctrl,
  1488. mas->geni_gpio_sleep);
  1489. }
  1490. if (ret)
  1491. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1492. "%s: Error %d pinctrl_select_state\n", __func__, ret);
  1493. }
  1494. if (mas->dis_autosuspend) {
  1495. pm_runtime_put_sync(mas->dev);
  1496. count = atomic_read(&mas->dev->power.usage_count);
  1497. if (count < 0)
  1498. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1499. "suspend usage count mismatch:%d", count);
  1500. } else if (!pm_runtime_status_suspended(mas->dev) &&
  1501. pm_runtime_enabled(mas->dev)) {
  1502. pm_runtime_mark_last_busy(mas->dev);
  1503. pm_runtime_put_autosuspend(mas->dev);
  1504. }
  1505. mas->is_xfer_in_progress = false;
  1506. geni_capture_stop_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1507. mas->spi_kpi, start_time, 0, 0);
  1508. return 0;
  1509. }
  1510. static int setup_fifo_xfer(struct spi_transfer *xfer,
  1511. struct spi_geni_master *mas, u16 mode,
  1512. struct spi_master *spi)
  1513. {
  1514. int ret = 0;
  1515. u32 m_cmd = 0;
  1516. u32 m_param = 0;
  1517. u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
  1518. u32 trans_len = 0, fifo_size = 0;
  1519. if (xfer->bits_per_word != mas->cur_word_len) {
  1520. spi_setup_word_len(mas, mode, xfer->bits_per_word);
  1521. mas->cur_word_len = xfer->bits_per_word;
  1522. }
  1523. /* Speed and bits per word can be overridden per transfer */
  1524. if (xfer->speed_hz != mas->cur_speed_hz) {
  1525. u32 clk_sel = 0;
  1526. u32 m_clk_cfg = 0;
  1527. int idx = 0;
  1528. int div = 0;
  1529. ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
  1530. if (ret) {
  1531. dev_err(mas->dev, "%s:Err setting clks:%d\n",
  1532. __func__, ret);
  1533. return ret;
  1534. }
  1535. mas->cur_speed_hz = xfer->speed_hz;
  1536. clk_sel |= (idx & CLK_SEL_MSK);
  1537. m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
  1538. geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
  1539. geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
  1540. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  1541. "%s: freq %d idx %d div %d\n", __func__, xfer->speed_hz, idx, div);
  1542. }
  1543. mas->tx_rem_bytes = 0;
  1544. mas->rx_rem_bytes = 0;
  1545. if (xfer->tx_buf && xfer->rx_buf)
  1546. m_cmd = SPI_FULL_DUPLEX;
  1547. else if (xfer->tx_buf)
  1548. m_cmd = SPI_TX_ONLY;
  1549. else if (xfer->rx_buf)
  1550. m_cmd = SPI_RX_ONLY;
  1551. if (!spi->slave)
  1552. spi_tx_cfg &= ~CS_TOGGLE;
  1553. if (!(mas->cur_word_len % MIN_WORD_LEN)) {
  1554. trans_len =
  1555. ((xfer->len << 3) / mas->cur_word_len) & TRANS_LEN_MSK;
  1556. } else {
  1557. int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
  1558. trans_len = (xfer->len / bytes_per_word) & TRANS_LEN_MSK;
  1559. }
  1560. if (!xfer->cs_change) {
  1561. if (!list_is_last(&xfer->transfer_list,
  1562. &spi->cur_msg->transfers))
  1563. m_param |= FRAGMENTATION;
  1564. }
  1565. mas->cur_xfer = xfer;
  1566. if (m_cmd & SPI_TX_ONLY) {
  1567. mas->tx_rem_bytes = xfer->len;
  1568. geni_write_reg(trans_len, mas->base, SE_SPI_TX_TRANS_LEN);
  1569. }
  1570. if (m_cmd & SPI_RX_ONLY) {
  1571. geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
  1572. mas->rx_rem_bytes = xfer->len;
  1573. }
  1574. fifo_size =
  1575. (mas->tx_fifo_depth * mas->tx_fifo_width / mas->cur_word_len);
  1576. /*
  1577. * Controller has support to transfer data either in FIFO mode
  1578. * or in SE_DMA mode. Either force the controller to choose FIFO
  1579. * mode for transfers or select the mode dynamically based on
  1580. * size of data.
  1581. */
  1582. if (spi->slave)
  1583. mas->cur_xfer_mode = GENI_SE_DMA;
  1584. if (mas->disable_dma || trans_len <= fifo_size)
  1585. mas->cur_xfer_mode = GENI_SE_FIFO;
  1586. geni_se_select_mode(&mas->spi_rsc, mas->cur_xfer_mode);
  1587. if (!spi->slave)
  1588. geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
  1589. geni_se_setup_m_cmd(&mas->spi_rsc, m_cmd, m_param);
  1590. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  1591. "%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs%d mode%d freq %d\n",
  1592. __func__, trans_len, xfer->len, spi_tx_cfg, m_cmd, xfer->cs_change,
  1593. mas->cur_xfer_mode, xfer->speed_hz);
  1594. if ((m_cmd & SPI_RX_ONLY) && (mas->cur_xfer_mode == GENI_SE_DMA)) {
  1595. ret = geni_se_rx_dma_prep(&mas->spi_rsc,
  1596. xfer->rx_buf, xfer->len, &xfer->rx_dma);
  1597. if (ret || !xfer->rx_buf) {
  1598. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1599. "Failed to setup Rx dma %d\n", ret);
  1600. xfer->rx_dma = 0;
  1601. return ret;
  1602. }
  1603. }
  1604. if (m_cmd & SPI_TX_ONLY) {
  1605. if (mas->cur_xfer_mode == GENI_SE_FIFO) {
  1606. geni_write_reg(mas->tx_wm, mas->base,
  1607. SE_GENI_TX_WATERMARK_REG);
  1608. } else if (mas->cur_xfer_mode == GENI_SE_DMA) {
  1609. ret = geni_se_tx_dma_prep(&mas->spi_rsc,
  1610. (void *)xfer->tx_buf, xfer->len,
  1611. &xfer->tx_dma);
  1612. if (ret || !xfer->tx_buf) {
  1613. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1614. "Failed to setup tx dma %d\n", ret);
  1615. xfer->tx_dma = 0;
  1616. return ret;
  1617. }
  1618. }
  1619. }
  1620. /* Ensure all writes are done before the WM interrupt */
  1621. mb();
  1622. return ret;
  1623. }
  1624. static void handle_fifo_timeout(struct spi_master *spi,
  1625. struct spi_transfer *xfer)
  1626. {
  1627. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  1628. unsigned long timeout;
  1629. u32 rx_fifo_status;
  1630. int rx_wc, i;
  1631. geni_spi_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
  1632. if (mas->cur_xfer_mode == GENI_SE_FIFO)
  1633. geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
  1634. if (spi->slave)
  1635. goto dma_unprep;
  1636. reinit_completion(&mas->xfer_done);
  1637. /* Dummy read the rx fifo for any spurious data*/
  1638. if (xfer->rx_buf) {
  1639. rx_fifo_status = geni_read_reg(mas->base,
  1640. SE_GENI_RX_FIFO_STATUS);
  1641. rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
  1642. for (i = 0; i < rx_wc; i++)
  1643. geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
  1644. }
  1645. geni_se_cancel_m_cmd(&mas->spi_rsc);
  1646. /* Ensure cmd cancel is written */
  1647. mb();
  1648. timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
  1649. if (!timeout) {
  1650. reinit_completion(&mas->xfer_done);
  1651. geni_se_abort_m_cmd(&mas->spi_rsc);
  1652. /* Ensure cmd abort is written */
  1653. mb();
  1654. timeout = wait_for_completion_timeout(&mas->xfer_done,
  1655. HZ);
  1656. if (!timeout)
  1657. dev_err(mas->dev,
  1658. "Failed to cancel/abort m_cmd\n");
  1659. }
  1660. dma_unprep:
  1661. if (mas->cur_xfer_mode == GENI_SE_DMA) {
  1662. if (xfer->tx_buf && xfer->tx_dma) {
  1663. reinit_completion(&mas->xfer_done);
  1664. writel_relaxed(1, mas->base +
  1665. SE_DMA_TX_FSM_RST);
  1666. timeout =
  1667. wait_for_completion_timeout(&mas->xfer_done, HZ);
  1668. if (!timeout)
  1669. dev_err(mas->dev,
  1670. "DMA TX RESET failed\n");
  1671. geni_se_tx_dma_unprep(&mas->spi_rsc,
  1672. xfer->tx_dma, xfer->len);
  1673. }
  1674. if (xfer->rx_buf && xfer->rx_dma) {
  1675. reinit_completion(&mas->xfer_done);
  1676. writel_relaxed(1, mas->base +
  1677. SE_DMA_RX_FSM_RST);
  1678. timeout =
  1679. wait_for_completion_timeout(&mas->xfer_done, HZ);
  1680. if (!timeout)
  1681. dev_err(mas->dev,
  1682. "DMA RX RESET failed\n");
  1683. geni_se_rx_dma_unprep(&mas->spi_rsc,
  1684. xfer->rx_dma, xfer->len);
  1685. }
  1686. }
  1687. if (spi->slave && !mas->dis_autosuspend)
  1688. pm_runtime_put_sync_suspend(mas->dev);
  1689. }
  1690. static int spi_geni_transfer_one(struct spi_master *spi,
  1691. struct spi_device *slv,
  1692. struct spi_transfer *xfer)
  1693. {
  1694. int ret = 0;
  1695. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  1696. unsigned long timeout, xfer_timeout;
  1697. unsigned long long start_time;
  1698. start_time = geni_capture_start_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1699. mas->spi_kpi);
  1700. if ((xfer->tx_buf == NULL) && (xfer->rx_buf == NULL)) {
  1701. dev_err(mas->dev, "Invalid xfer both tx rx are NULL\n");
  1702. return -EINVAL;
  1703. }
  1704. /* Check for zero length transfer */
  1705. if (xfer->len < 1) {
  1706. dev_err(mas->dev, "Zero length transfer\n");
  1707. return -EINVAL;
  1708. }
  1709. /* Double check PM status, client might have not taken wakelock and
  1710. * continue to queue more transfers. Post auto-suspend, system suspend
  1711. * can keep driver to forced suspend, hence it's client's responsibility
  1712. * to not allow system suspend to trigger.
  1713. */
  1714. if (pm_runtime_status_suspended(mas->dev)) {
  1715. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1716. "%s: device is PM suspended\n", __func__);
  1717. return -EACCES;
  1718. }
  1719. xfer_timeout = (1000 * xfer->len * BITS_PER_BYTE) / xfer->speed_hz;
  1720. if (mas->xfer_timeout_offset) {
  1721. xfer_timeout += mas->xfer_timeout_offset;
  1722. } else {
  1723. /* Master <-> slave sync will be valid for smaller time */
  1724. if (spi->slave)
  1725. xfer_timeout += SPI_SLAVE_SYNC_XFER_TIMEOUT_OFFSET;
  1726. else
  1727. xfer_timeout += SPI_XFER_TIMEOUT_OFFSET;
  1728. }
  1729. SPI_LOG_ERR(mas->ipc, false, mas->dev,
  1730. "current xfer_timeout:%lu ms.\n", xfer_timeout);
  1731. xfer_timeout = msecs_to_jiffies(xfer_timeout);
  1732. if (mas->cur_xfer_mode != GENI_GPI_DMA) {
  1733. reinit_completion(&mas->xfer_done);
  1734. ret = setup_fifo_xfer(xfer, mas, slv->mode, spi);
  1735. if (ret) {
  1736. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1737. "setup_fifo_xfer failed: %d\n", ret);
  1738. mas->cur_xfer = NULL;
  1739. goto err_fifo_geni_transfer_one;
  1740. }
  1741. if (spi->slave)
  1742. mas->slave_state = true;
  1743. timeout = wait_for_completion_timeout(&mas->xfer_done, xfer_timeout);
  1744. if (spi->slave)
  1745. mas->slave_state = false;
  1746. if (!timeout) {
  1747. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1748. "Xfer[len %d tx %pK rx %pK n %d] timed out.\n",
  1749. xfer->len, xfer->tx_buf,
  1750. xfer->rx_buf,
  1751. xfer->bits_per_word);
  1752. mas->cur_xfer = NULL;
  1753. ret = -ETIMEDOUT;
  1754. goto err_fifo_geni_transfer_one;
  1755. }
  1756. if (mas->cur_xfer_mode == GENI_SE_DMA) {
  1757. if (xfer->tx_buf)
  1758. geni_se_tx_dma_unprep(&mas->spi_rsc,
  1759. xfer->tx_dma, xfer->len);
  1760. if (xfer->rx_buf)
  1761. geni_se_rx_dma_unprep(&mas->spi_rsc,
  1762. xfer->rx_dma, xfer->len);
  1763. }
  1764. } else {
  1765. mas->num_tx_eot = 0;
  1766. mas->num_rx_eot = 0;
  1767. mas->num_xfers = 0;
  1768. mas->qn_err = false;
  1769. reinit_completion(&mas->tx_cb);
  1770. reinit_completion(&mas->rx_cb);
  1771. ret = setup_gsi_xfer(xfer, mas, slv, spi);
  1772. if (ret) {
  1773. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1774. "setup_gsi_xfer failed: %d\n", ret);
  1775. mas->cur_xfer = NULL;
  1776. goto err_gsi_geni_transfer_one;
  1777. }
  1778. if ((mas->num_xfers >= NUM_SPI_XFER) ||
  1779. (list_is_last(&xfer->transfer_list,
  1780. &spi->cur_msg->transfers))) {
  1781. int i;
  1782. for (i = 0 ; i < mas->num_tx_eot; i++) {
  1783. timeout =
  1784. wait_for_completion_timeout(
  1785. &mas->tx_cb, xfer_timeout);
  1786. if (timeout <= 0) {
  1787. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1788. "Tx[%d] timeout%lu\n", i, timeout);
  1789. ret = -ETIMEDOUT;
  1790. goto err_gsi_geni_transfer_one;
  1791. }
  1792. }
  1793. for (i = 0 ; i < mas->num_rx_eot; i++) {
  1794. timeout =
  1795. wait_for_completion_timeout(
  1796. &mas->rx_cb, xfer_timeout);
  1797. if (timeout <= 0) {
  1798. SPI_LOG_ERR(mas->ipc, true, mas->dev,
  1799. "Rx[%d] timeout%lu\n", i, timeout);
  1800. ret = -ETIMEDOUT;
  1801. goto err_gsi_geni_transfer_one;
  1802. }
  1803. }
  1804. if (mas->qn_err) {
  1805. ret = -EIO;
  1806. mas->qn_err = false;
  1807. goto err_gsi_geni_transfer_one;
  1808. }
  1809. }
  1810. }
  1811. geni_capture_stop_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1812. mas->spi_kpi, start_time, xfer->len, mas->cur_speed_hz);
  1813. return ret;
  1814. err_gsi_geni_transfer_one:
  1815. geni_spi_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
  1816. dmaengine_terminate_all(mas->tx);
  1817. if (mas->is_le_vm)
  1818. mas->le_gpi_reset_done = true;
  1819. return ret;
  1820. err_fifo_geni_transfer_one:
  1821. handle_fifo_timeout(spi, xfer);
  1822. return ret;
  1823. }
  1824. static void geni_spi_handle_tx(struct spi_geni_master *mas)
  1825. {
  1826. int i = 0;
  1827. int tx_fifo_width = (mas->tx_fifo_width >> 3);
  1828. int max_bytes = 0;
  1829. const u8 *tx_buf = NULL;
  1830. if (!mas->cur_xfer)
  1831. return;
  1832. /*
  1833. * For non-byte aligned bits-per-word values:
  1834. * Assumption is that each SPI word will be accomodated in
  1835. * ceil (bits_per_word / bits_per_byte)
  1836. * and the next SPI word starts at the next byte.
  1837. * In such cases, we can fit 1 SPI word per FIFO word so adjust the
  1838. * max byte that can be sent per IRQ accordingly.
  1839. */
  1840. if ((mas->tx_fifo_width % mas->cur_word_len))
  1841. max_bytes = (mas->tx_fifo_depth - mas->tx_wm) *
  1842. ((mas->cur_word_len / BITS_PER_BYTE) + 1);
  1843. else
  1844. max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * tx_fifo_width;
  1845. tx_buf = mas->cur_xfer->tx_buf;
  1846. tx_buf += (mas->cur_xfer->len - mas->tx_rem_bytes);
  1847. max_bytes = min_t(int, mas->tx_rem_bytes, max_bytes);
  1848. while (i < max_bytes) {
  1849. int j;
  1850. u32 fifo_word = 0;
  1851. u8 *fifo_byte;
  1852. int bytes_per_fifo = tx_fifo_width;
  1853. int bytes_to_write = 0;
  1854. if ((mas->tx_fifo_width % mas->cur_word_len))
  1855. bytes_per_fifo =
  1856. (mas->cur_word_len / BITS_PER_BYTE) + 1;
  1857. bytes_to_write = min_t(int, (max_bytes - i), bytes_per_fifo);
  1858. fifo_byte = (u8 *)&fifo_word;
  1859. for (j = 0; j < bytes_to_write; j++)
  1860. fifo_byte[j] = tx_buf[i++];
  1861. geni_write_reg(fifo_word, mas->base, SE_GENI_TX_FIFOn);
  1862. /* Ensure FIFO writes are written in order */
  1863. mb();
  1864. }
  1865. mas->tx_rem_bytes -= max_bytes;
  1866. if (!mas->tx_rem_bytes) {
  1867. geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
  1868. /* Barrier here before return to prevent further ISRs */
  1869. mb();
  1870. }
  1871. }
  1872. static void geni_spi_handle_rx(struct spi_geni_master *mas)
  1873. {
  1874. int i = 0;
  1875. int fifo_width = (mas->tx_fifo_width >> 3);
  1876. u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
  1877. int rx_bytes = 0;
  1878. int rx_wc = 0;
  1879. u8 *rx_buf = NULL;
  1880. if (!mas->cur_xfer)
  1881. return;
  1882. rx_buf = mas->cur_xfer->rx_buf;
  1883. rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
  1884. if (rx_fifo_status & RX_LAST) {
  1885. int rx_last_byte_valid =
  1886. (rx_fifo_status & RX_LAST_BYTE_VALID_MSK)
  1887. >> RX_LAST_BYTE_VALID_SHFT;
  1888. if (rx_last_byte_valid && (rx_last_byte_valid < 4)) {
  1889. rx_wc -= 1;
  1890. rx_bytes += rx_last_byte_valid;
  1891. }
  1892. }
  1893. if (!(mas->tx_fifo_width % mas->cur_word_len))
  1894. rx_bytes += rx_wc * fifo_width;
  1895. else
  1896. rx_bytes += rx_wc *
  1897. ((mas->cur_word_len / BITS_PER_BYTE) + 1);
  1898. rx_bytes = min_t(int, mas->rx_rem_bytes, rx_bytes);
  1899. rx_buf += (mas->cur_xfer->len - mas->rx_rem_bytes);
  1900. while (i < rx_bytes) {
  1901. u32 fifo_word = 0;
  1902. u8 *fifo_byte;
  1903. int bytes_per_fifo = fifo_width;
  1904. int read_bytes = 0;
  1905. int j;
  1906. if ((mas->tx_fifo_width % mas->cur_word_len))
  1907. bytes_per_fifo =
  1908. (mas->cur_word_len / BITS_PER_BYTE) + 1;
  1909. read_bytes = min_t(int, (rx_bytes - i), bytes_per_fifo);
  1910. fifo_word = geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
  1911. fifo_byte = (u8 *)&fifo_word;
  1912. for (j = 0; j < read_bytes; j++)
  1913. rx_buf[i++] = fifo_byte[j];
  1914. }
  1915. mas->rx_rem_bytes -= rx_bytes;
  1916. }
  1917. static irqreturn_t geni_spi_irq(int irq, void *data)
  1918. {
  1919. struct spi_geni_master *mas = data;
  1920. u32 m_irq = 0;
  1921. unsigned long long start_time;
  1922. start_time = geni_capture_start_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1923. mas->spi_kpi);
  1924. if (pm_runtime_status_suspended(mas->dev)) {
  1925. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  1926. "%s: device is suspended\n", __func__);
  1927. goto exit_geni_spi_irq;
  1928. }
  1929. m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
  1930. if (mas->cur_xfer_mode == GENI_SE_FIFO) {
  1931. if ((m_irq & M_RX_FIFO_WATERMARK_EN) ||
  1932. (m_irq & M_RX_FIFO_LAST_EN))
  1933. geni_spi_handle_rx(mas);
  1934. if ((m_irq & M_TX_FIFO_WATERMARK_EN))
  1935. geni_spi_handle_tx(mas);
  1936. if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
  1937. (m_irq & M_CMD_ABORT_EN)) {
  1938. mas->cmd_done = true;
  1939. /*
  1940. * If this happens, then a CMD_DONE came before all the
  1941. * buffer bytes were sent out. This is unusual, log this
  1942. * condition and disable the WM interrupt to prevent the
  1943. * system from stalling due an interrupt storm.
  1944. * If this happens when all Rx bytes haven't been
  1945. * received, log the condition.
  1946. */
  1947. if (mas->tx_rem_bytes) {
  1948. geni_write_reg(0, mas->base,
  1949. SE_GENI_TX_WATERMARK_REG);
  1950. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  1951. "%s:Premature Done.tx_rem%d bpw%d\n",
  1952. __func__, mas->tx_rem_bytes,
  1953. mas->cur_word_len);
  1954. }
  1955. if (mas->rx_rem_bytes)
  1956. SPI_LOG_DBG(mas->ipc, false, mas->dev,
  1957. "%s:Premature Done.rx_rem%d bpw%d\n",
  1958. __func__, mas->rx_rem_bytes,
  1959. mas->cur_word_len);
  1960. }
  1961. } else if (mas->cur_xfer_mode == GENI_SE_DMA) {
  1962. u32 dma_tx_status = geni_read_reg(mas->base,
  1963. SE_DMA_TX_IRQ_STAT);
  1964. u32 dma_rx_status = geni_read_reg(mas->base,
  1965. SE_DMA_RX_IRQ_STAT);
  1966. if (dma_tx_status)
  1967. geni_write_reg(dma_tx_status, mas->base,
  1968. SE_DMA_TX_IRQ_CLR);
  1969. if (dma_rx_status)
  1970. geni_write_reg(dma_rx_status, mas->base,
  1971. SE_DMA_RX_IRQ_CLR);
  1972. if (dma_tx_status & TX_DMA_DONE)
  1973. mas->tx_rem_bytes = 0;
  1974. if (dma_rx_status & RX_DMA_DONE)
  1975. mas->rx_rem_bytes = 0;
  1976. if (!mas->tx_rem_bytes && !mas->rx_rem_bytes)
  1977. mas->cmd_done = true;
  1978. if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN))
  1979. mas->cmd_done = true;
  1980. }
  1981. exit_geni_spi_irq:
  1982. geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
  1983. if (mas->cmd_done) {
  1984. mas->cmd_done = false;
  1985. complete(&mas->xfer_done);
  1986. }
  1987. geni_capture_stop_time(&mas->spi_rsc, mas->ipc_log_kpi, __func__,
  1988. mas->spi_kpi, start_time, 0, 0);
  1989. return IRQ_HANDLED;
  1990. }
  1991. /**
  1992. * spi_get_dt_property: To read DTSI property.
  1993. * @pdev: structure to platform device.
  1994. * @geni_mas: structure to spi geni master.
  1995. * @spi: structure to spi master.
  1996. *
  1997. * This function will read SPI DTSI property.
  1998. *
  1999. * return: None.
  2000. */
  2001. static void spi_get_dt_property(struct platform_device *pdev,
  2002. struct spi_geni_master *geni_mas,
  2003. struct spi_master *spi)
  2004. {
  2005. if (of_property_read_bool(pdev->dev.of_node, "qcom,le-vm")) {
  2006. geni_mas->is_le_vm = true;
  2007. dev_info(&pdev->dev, "LE-VM usecase\n");
  2008. }
  2009. if (of_property_read_bool(pdev->dev.of_node, "qcom,la-vm")) {
  2010. geni_mas->is_la_vm = true;
  2011. dev_info(&pdev->dev, "LA-VM usecase\n");
  2012. }
  2013. spi->rt = of_property_read_bool(pdev->dev.of_node, "qcom,rt");
  2014. geni_mas->dis_autosuspend =
  2015. of_property_read_bool(pdev->dev.of_node, "qcom,disable-autosuspend");
  2016. /*
  2017. * shared_se property is set when spi is being used simultaneously
  2018. * from two Execution Environments.
  2019. */
  2020. if (of_property_read_bool(pdev->dev.of_node, "qcom,shared_se")) {
  2021. geni_mas->shared_se = true;
  2022. geni_mas->shared_ee = true;
  2023. } else {
  2024. /*
  2025. * shared_ee property will be set when spi is being used from
  2026. * dual Execution Environments unlike gsi_mode flag
  2027. * which is set if SE is in GSI mode.
  2028. */
  2029. geni_mas->shared_ee =
  2030. of_property_read_bool(pdev->dev.of_node, "qcom,shared_ee");
  2031. }
  2032. geni_mas->set_miso_sampling =
  2033. of_property_read_bool(pdev->dev.of_node, "qcom,set-miso-sampling");
  2034. if (geni_mas->set_miso_sampling) {
  2035. if (!of_property_read_u32(pdev->dev.of_node, "qcom,miso-sampling-ctrl-val",
  2036. &geni_mas->miso_sampling_ctrl_val))
  2037. dev_info(&pdev->dev, "MISO_SAMPLING_SET: %d\n",
  2038. geni_mas->miso_sampling_ctrl_val);
  2039. }
  2040. geni_mas->disable_dma =
  2041. of_property_read_bool(pdev->dev.of_node, "qcom,disable-dma");
  2042. of_property_read_u32(pdev->dev.of_node, "qcom,xfer-timeout-offset",
  2043. &geni_mas->xfer_timeout_offset);
  2044. if (geni_mas->xfer_timeout_offset)
  2045. dev_info(&pdev->dev, "%s: DT based xfer timeout offset: %d\n",
  2046. __func__, geni_mas->xfer_timeout_offset);
  2047. if (of_property_read_bool(pdev->dev.of_node, "qcom,master-cross-connect"))
  2048. geni_mas->master_cross_connect = true;
  2049. geni_mas->slave_cross_connected =
  2050. of_property_read_bool(pdev->dev.of_node, "slv-cross-connected");
  2051. }
  2052. static int spi_geni_probe(struct platform_device *pdev)
  2053. {
  2054. int ret;
  2055. struct spi_master *spi;
  2056. struct spi_geni_master *geni_mas;
  2057. struct resource *res;
  2058. bool slave_en;
  2059. struct device *dev = &pdev->dev;
  2060. struct geni_se *spi_rsc;
  2061. slave_en = of_property_read_bool(pdev->dev.of_node,
  2062. "qcom,slv-ctrl");
  2063. spi = __spi_alloc_controller(&pdev->dev, sizeof(struct spi_geni_master), slave_en);
  2064. if (!spi) {
  2065. ret = -ENOMEM;
  2066. dev_err(&pdev->dev, "Failed to alloc spi struct\n");
  2067. goto spi_geni_probe_err;
  2068. }
  2069. if (slave_en)
  2070. spi->slave_abort = spi_slv_abort;
  2071. pr_info("boot_kpi: M - DRIVER GENI_SPI Init\n");
  2072. platform_set_drvdata(pdev, spi);
  2073. geni_mas = spi_master_get_devdata(spi);
  2074. geni_mas->dev = dev;
  2075. geni_mas->spi_rsc.dev = dev;
  2076. geni_mas->spi_rsc.wrapper = dev_get_drvdata(dev->parent);
  2077. spi->dev.of_node = pdev->dev.of_node;
  2078. if (!geni_mas->spi_rsc.wrapper) {
  2079. dev_err(&pdev->dev, "SE Wrapper is NULL, deferring probe\n");
  2080. return -EPROBE_DEFER;
  2081. }
  2082. spi_get_dt_property(pdev, geni_mas, spi);
  2083. geni_mas->wrapper_dev = dev->parent;
  2084. /*
  2085. * For LE, clocks, gpio and icb voting will be provided by
  2086. * LA. The SPI operates in GSI mode only for LE usecase,
  2087. * se irq not required. Below properties will not be present
  2088. * in SPI LE dt.
  2089. */
  2090. if (!geni_mas->is_le_vm) {
  2091. /* set voting values for path: core, config and DDR */
  2092. spi_rsc = &geni_mas->spi_rsc;
  2093. ret = geni_se_common_resources_init(spi_rsc,
  2094. SPI_CORE2X_VOTE, APPS_PROC_TO_QUP_VOTE,
  2095. (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
  2096. if (ret) {
  2097. dev_err(&pdev->dev, "Error geni_se_resources_init\n");
  2098. goto spi_geni_probe_err;
  2099. }
  2100. /* call set_bw for once, then do icc_enable/disable */
  2101. ret = geni_icc_set_bw(spi_rsc);
  2102. if (ret) {
  2103. dev_err(&pdev->dev, "%s: icc set bw failed ret:%d\n",
  2104. __func__, ret);
  2105. return ret;
  2106. }
  2107. /* to remove the votes doing icc enable/disable */
  2108. ret = geni_icc_enable(spi_rsc);
  2109. if (ret) {
  2110. dev_err(&pdev->dev, "%s: icc enable failed ret:%d\n",
  2111. __func__, ret);
  2112. return ret;
  2113. }
  2114. geni_mas->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
  2115. if (IS_ERR_OR_NULL(geni_mas->geni_pinctrl)) {
  2116. dev_err(&pdev->dev, "No pinctrl config specified!\n");
  2117. ret = PTR_ERR(geni_mas->geni_pinctrl);
  2118. goto spi_geni_probe_err;
  2119. }
  2120. geni_mas->geni_gpio_active = pinctrl_lookup_state(geni_mas->geni_pinctrl,
  2121. PINCTRL_DEFAULT);
  2122. if (IS_ERR_OR_NULL(geni_mas->geni_gpio_active)) {
  2123. dev_err(&pdev->dev, "No default config specified!\n");
  2124. ret = PTR_ERR(geni_mas->geni_gpio_active);
  2125. goto spi_geni_probe_err;
  2126. }
  2127. geni_mas->geni_gpio_sleep = pinctrl_lookup_state(geni_mas->geni_pinctrl,
  2128. PINCTRL_SLEEP);
  2129. if (IS_ERR_OR_NULL(geni_mas->geni_gpio_sleep)) {
  2130. dev_err(&pdev->dev, "No sleep config specified!\n");
  2131. ret = PTR_ERR(geni_mas->geni_gpio_sleep);
  2132. goto spi_geni_probe_err;
  2133. }
  2134. ret = pinctrl_select_state(geni_mas->geni_pinctrl,
  2135. geni_mas->geni_gpio_sleep);
  2136. if (ret) {
  2137. dev_err(&pdev->dev, "Failed to set sleep configuration\n");
  2138. goto spi_geni_probe_err;
  2139. }
  2140. geni_mas->spi_rsc.clk = devm_clk_get(&pdev->dev, "se-clk");
  2141. if (IS_ERR(geni_mas->spi_rsc.clk)) {
  2142. ret = PTR_ERR(geni_mas->spi_rsc.clk);
  2143. dev_err(&pdev->dev,
  2144. "Err getting SE Core clk %d\n", ret);
  2145. goto spi_geni_probe_err;
  2146. }
  2147. geni_mas->m_ahb_clk = devm_clk_get(dev->parent, "m-ahb");
  2148. if (IS_ERR(geni_mas->m_ahb_clk)) {
  2149. ret = PTR_ERR(geni_mas->m_ahb_clk);
  2150. dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
  2151. goto spi_geni_probe_err;
  2152. }
  2153. geni_mas->s_ahb_clk = devm_clk_get(dev->parent, "s-ahb");
  2154. if (IS_ERR(geni_mas->s_ahb_clk)) {
  2155. ret = PTR_ERR(geni_mas->s_ahb_clk);
  2156. dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
  2157. goto spi_geni_probe_err;
  2158. }
  2159. geni_mas->irq = platform_get_irq(pdev, 0);
  2160. if (geni_mas->irq < 0) {
  2161. dev_err(&pdev->dev, "Err getting IRQ\n");
  2162. ret = geni_mas->irq;
  2163. goto spi_geni_probe_err;
  2164. }
  2165. irq_set_status_flags(geni_mas->irq, IRQ_NOAUTOEN);
  2166. ret = devm_request_irq(&pdev->dev, geni_mas->irq,
  2167. geni_spi_irq, IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
  2168. if (ret) {
  2169. dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
  2170. geni_mas->irq, ret);
  2171. goto spi_geni_probe_err;
  2172. }
  2173. }
  2174. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2175. if (ret) {
  2176. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2177. if (ret) {
  2178. dev_err(&pdev->dev, "could not set DMA mask\n");
  2179. goto spi_geni_probe_err;
  2180. }
  2181. }
  2182. if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
  2183. &spi->max_speed_hz)) {
  2184. dev_err(&pdev->dev, "Max frequency not specified.\n");
  2185. ret = -ENXIO;
  2186. goto spi_geni_probe_err;
  2187. }
  2188. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "se_phys");
  2189. if (!res) {
  2190. ret = -ENXIO;
  2191. dev_err(&pdev->dev, "Err getting IO region\n");
  2192. goto spi_geni_probe_err;
  2193. }
  2194. geni_mas->phys_addr = res->start;
  2195. geni_mas->size = resource_size(res);
  2196. geni_mas->base = devm_ioremap(&pdev->dev, res->start,
  2197. resource_size(res));
  2198. if (!geni_mas->base) {
  2199. ret = -ENOMEM;
  2200. dev_err(&pdev->dev, "Err IO mapping iomem\n");
  2201. goto spi_geni_probe_err;
  2202. }
  2203. geni_mas->spi_rsc.base = geni_mas->base;
  2204. geni_mas->is_deep_sleep = false;
  2205. spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
  2206. spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  2207. spi->num_chipselect = SPI_NUM_CHIPSELECT;
  2208. spi->prepare_transfer_hardware = spi_geni_prepare_transfer_hardware;
  2209. spi->prepare_message = spi_geni_prepare_message;
  2210. spi->unprepare_message = spi_geni_unprepare_message;
  2211. spi->transfer_one = spi_geni_transfer_one;
  2212. spi->unprepare_transfer_hardware
  2213. = spi_geni_unprepare_transfer_hardware;
  2214. spi->auto_runtime_pm = false;
  2215. init_completion(&geni_mas->xfer_done);
  2216. init_completion(&geni_mas->tx_cb);
  2217. init_completion(&geni_mas->rx_cb);
  2218. pm_runtime_set_suspended(&pdev->dev);
  2219. /* for levm skip auto suspend timer */
  2220. if (!geni_mas->is_le_vm && !geni_mas->dis_autosuspend) {
  2221. pm_runtime_set_autosuspend_delay(&pdev->dev,
  2222. SPI_AUTO_SUSPEND_DELAY);
  2223. pm_runtime_use_autosuspend(&pdev->dev);
  2224. }
  2225. pm_runtime_enable(&pdev->dev);
  2226. geni_mas->ipc = ipc_log_context_create(4, dev_name(geni_mas->dev), 0);
  2227. if (!geni_mas->ipc && IS_ENABLED(CONFIG_IPC_LOGGING))
  2228. dev_err(&pdev->dev, "Error creating IPC logs\n");
  2229. if (!geni_mas->is_le_vm)
  2230. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2231. "%s: GENI_TO_CORE:%d CPU_TO_GENI:%d GENI_TO_DDR:%d\n", __func__,
  2232. spi_rsc->icc_paths[GENI_TO_CORE].avg_bw,
  2233. spi_rsc->icc_paths[CPU_TO_GENI].avg_bw,
  2234. spi_rsc->icc_paths[GENI_TO_DDR].avg_bw);
  2235. if (!geni_mas->is_le_vm) {
  2236. ret = geni_icc_disable(spi_rsc);
  2237. if (ret) {
  2238. dev_err(&pdev->dev, "%s: icc disable failed ret:%d\n",
  2239. __func__, ret);
  2240. return ret;
  2241. }
  2242. }
  2243. ret = spi_register_master(spi);
  2244. if (ret) {
  2245. dev_err(&pdev->dev, "Failed to register SPI master\n");
  2246. goto spi_geni_probe_err;
  2247. }
  2248. ret = sysfs_create_file(&(geni_mas->dev->kobj),
  2249. &dev_attr_spi_slave_state.attr);
  2250. device_create_file(geni_mas->dev, &dev_attr_capture_kpi);
  2251. geni_mas->is_xfer_in_progress = false;
  2252. dev_info(&pdev->dev, "%s: completed %d\n", __func__, ret);
  2253. pr_info("boot_kpi: M - DRIVER GENI_SPI_%d Ready\n", spi->bus_num);
  2254. return ret;
  2255. spi_geni_probe_err:
  2256. dev_info(&pdev->dev, "%s: ret:%d\n", __func__, ret);
  2257. spi_master_put(spi);
  2258. return ret;
  2259. }
  2260. static int spi_geni_remove(struct platform_device *pdev)
  2261. {
  2262. int ret;
  2263. struct spi_master *master = platform_get_drvdata(pdev);
  2264. struct spi_geni_master *geni_mas = spi_master_get_devdata(master);
  2265. sysfs_remove_file(&pdev->dev.kobj, &dev_attr_spi_slave_state.attr);
  2266. device_remove_file(geni_mas->dev, &dev_attr_capture_kpi);
  2267. geni_se_common_clks_off(geni_mas->spi_rsc.clk, geni_mas->m_ahb_clk, geni_mas->s_ahb_clk);
  2268. ret = geni_icc_disable(&geni_mas->spi_rsc);
  2269. if (ret)
  2270. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2271. "%s failing at geni_icc_disable ret=%d\n", __func__, ret);
  2272. spi_unregister_master(master);
  2273. pm_runtime_put_noidle(&pdev->dev);
  2274. pm_runtime_disable(&pdev->dev);
  2275. if (geni_mas->ipc)
  2276. ipc_log_context_destroy(geni_mas->ipc);
  2277. if (geni_mas->ipc_log_kpi)
  2278. ipc_log_context_destroy(geni_mas->ipc_log_kpi);
  2279. return ret;
  2280. }
  2281. #if IS_ENABLED(CONFIG_PM)
  2282. static int spi_geni_gpi_pause_resume(struct spi_geni_master *geni_mas, bool is_suspend)
  2283. {
  2284. int tx_ret = 0;
  2285. /* Do dma operations only for tx channel here, as it takes care of rx channel
  2286. * also internally from the GPI driver functions. if we call for both channels,
  2287. * will see channels in wrong state due to double operations.
  2288. */
  2289. if (geni_mas->tx) {
  2290. if (is_suspend) {
  2291. /* For deep sleep need to restore the config similar to the probe,
  2292. * hence using MSM_GPI_DEEP_SLEEP_INIT flag, in gpi_resume it wil
  2293. * do similar to the probe. After this we should set this flag to
  2294. * MSM_GPI_DEFAULT, means gpi probe state is restored.
  2295. */
  2296. if (geni_mas->is_deep_sleep)
  2297. geni_mas->tx_event.cmd = MSM_GPI_DEEP_SLEEP_INIT;
  2298. tx_ret = dmaengine_pause(geni_mas->tx);
  2299. } else {
  2300. /* For deep sleep need to restore the config similar to the probe,
  2301. * hence using MSM_GPI_DEEP_SLEEP_INIT flag, in gpi_resume it wil
  2302. * do similar to the probe. After this we should set this flag to
  2303. * MSM_GPI_DEFAULT, means gpi probe state is restored.
  2304. */
  2305. if (geni_mas->is_deep_sleep)
  2306. geni_mas->tx_event.cmd = MSM_GPI_DEEP_SLEEP_INIT;
  2307. tx_ret = dmaengine_resume(geni_mas->tx);
  2308. if (geni_mas->is_deep_sleep) {
  2309. geni_mas->tx_event.cmd = MSM_GPI_DEFAULT;
  2310. geni_mas->is_deep_sleep = false;
  2311. }
  2312. }
  2313. if (tx_ret) {
  2314. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2315. "%s failed: tx:%d status:%d\n",
  2316. __func__, tx_ret, is_suspend);
  2317. return -EINVAL;
  2318. }
  2319. }
  2320. return 0;
  2321. }
  2322. static int spi_geni_levm_suspend_proc(struct spi_geni_master *geni_mas, struct spi_master *spi,
  2323. unsigned long long start_time)
  2324. {
  2325. int ret = 0;
  2326. spi_geni_unlock_bus(spi);
  2327. if (!geni_mas->setup) {
  2328. /* It will take care of all GPI /DMA initialization and generic SW/HW
  2329. * initializations required for a spi transfer. Gets called once per
  2330. * Bootup session.
  2331. */
  2332. ret = spi_geni_mas_setup(spi);
  2333. if (ret) {
  2334. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2335. "%s mas_setup failed: %d\n", __func__, ret);
  2336. return ret;
  2337. }
  2338. }
  2339. if (geni_mas->gsi_mode) {
  2340. ret = spi_geni_gpi_pause_resume(geni_mas, true);
  2341. if (ret) {
  2342. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2343. "%s: ret:%d\n", __func__, ret);
  2344. return ret;
  2345. }
  2346. }
  2347. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev, "%s: ret:%d\n", __func__, ret);
  2348. geni_capture_stop_time(&geni_mas->spi_rsc, geni_mas->ipc_log_kpi, __func__,
  2349. geni_mas->spi_kpi, start_time, 0, 0);
  2350. return 0;
  2351. }
  2352. static int spi_geni_runtime_suspend(struct device *dev)
  2353. {
  2354. int ret = 0;
  2355. struct spi_master *spi = get_spi_master(dev);
  2356. struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
  2357. unsigned long long start_time;
  2358. start_time = geni_capture_start_time(&geni_mas->spi_rsc, geni_mas->ipc_log_kpi, __func__,
  2359. geni_mas->spi_kpi);
  2360. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev, "%s: %d\n", __func__, ret);
  2361. disable_irq(geni_mas->irq);
  2362. if (geni_mas->is_le_vm) {
  2363. return spi_geni_levm_suspend_proc(geni_mas, spi, start_time);
  2364. }
  2365. if (geni_mas->gsi_mode) {
  2366. ret = spi_geni_gpi_pause_resume(geni_mas, true);
  2367. if (ret)
  2368. return ret;
  2369. }
  2370. /* For tui usecase LA should control clk/gpio/icb */
  2371. if (geni_mas->is_la_vm)
  2372. goto exit_rt_suspend;
  2373. /* Do not unconfigure the GPIOs for a shared_se usecase */
  2374. if (geni_mas->shared_ee && !geni_mas->shared_se)
  2375. goto exit_rt_suspend;
  2376. if (geni_mas->gsi_mode) {
  2377. geni_se_common_clks_off(geni_mas->spi_rsc.clk, geni_mas->m_ahb_clk,
  2378. geni_mas->s_ahb_clk);
  2379. ret = geni_icc_disable(&geni_mas->spi_rsc);
  2380. if (ret)
  2381. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2382. "%s failing at geni_icc_disable ret=%d\n", __func__, ret);
  2383. return ret;
  2384. }
  2385. exit_rt_suspend:
  2386. ret = geni_se_resources_off(&geni_mas->spi_rsc);
  2387. ret = geni_icc_disable(&geni_mas->spi_rsc);
  2388. if (ret)
  2389. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2390. "%s failing at geni_icc_disable ret=%d\n", __func__, ret);
  2391. geni_capture_stop_time(&geni_mas->spi_rsc, geni_mas->ipc_log_kpi, __func__,
  2392. geni_mas->spi_kpi, start_time, 0, 0);
  2393. return ret;
  2394. }
  2395. static int spi_geni_levm_resume_proc(struct spi_geni_master *geni_mas, struct spi_master *spi,
  2396. unsigned long long start_time)
  2397. {
  2398. int ret = 0;
  2399. if (!geni_mas->setup) {
  2400. ret = spi_geni_mas_setup(spi);
  2401. if (ret) {
  2402. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2403. "%s mas_setup failed: %d\n", __func__, ret);
  2404. return ret;
  2405. }
  2406. }
  2407. if (geni_mas->gsi_mode) {
  2408. ret = spi_geni_gpi_pause_resume(geni_mas, false);
  2409. if (ret) {
  2410. SPI_LOG_ERR(geni_mas->ipc, false, geni_mas->dev,
  2411. "%s: ret:%d\n", __func__, ret);
  2412. return ret;
  2413. }
  2414. }
  2415. ret = spi_geni_lock_bus(spi);
  2416. if (ret) {
  2417. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2418. "%s lock_bus failed: %d\n", __func__, ret);
  2419. return ret;
  2420. }
  2421. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev, "%s: ret:%d\n", __func__, ret);
  2422. geni_capture_stop_time(&geni_mas->spi_rsc, geni_mas->ipc_log_kpi, __func__,
  2423. geni_mas->spi_kpi, start_time, 0, 0);
  2424. /* Return here as LE VM doesn't need resourc/clock management */
  2425. return ret;
  2426. }
  2427. static int spi_geni_runtime_resume(struct device *dev)
  2428. {
  2429. int ret = 0;
  2430. struct spi_master *spi = get_spi_master(dev);
  2431. struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
  2432. unsigned long long start_time;
  2433. start_time = geni_capture_start_time(&geni_mas->spi_rsc, geni_mas->ipc_log_kpi, __func__,
  2434. geni_mas->spi_kpi);
  2435. if (geni_mas->is_le_vm)
  2436. return spi_geni_levm_resume_proc(geni_mas, spi, start_time);
  2437. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev, "%s: %d\n", __func__, ret);
  2438. if (geni_mas->shared_se) {
  2439. /* very first time mas->tx channel is not getting updated */
  2440. if (geni_mas->tx != NULL) {
  2441. ret = dmaengine_resume(geni_mas->tx);
  2442. if (ret) {
  2443. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2444. "%s dmaengine_resume failed: %d\n", __func__, ret);
  2445. }
  2446. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2447. "%s: Shared_SE dma_resume call\n", __func__);
  2448. }
  2449. }
  2450. if (geni_mas->shared_ee || geni_mas->is_la_vm)
  2451. goto exit_rt_resume;
  2452. if (geni_mas->gsi_mode) {
  2453. ret = geni_icc_enable(&geni_mas->spi_rsc);
  2454. if (ret) {
  2455. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2456. "%s failing at geni icc enable ret=%d\n", __func__, ret);
  2457. return ret;
  2458. }
  2459. ret = geni_se_common_clks_on(geni_mas->spi_rsc.clk, geni_mas->m_ahb_clk,
  2460. geni_mas->s_ahb_clk);
  2461. if (ret)
  2462. SPI_LOG_ERR(geni_mas->ipc, false, geni_mas->dev,
  2463. "%s: Error %d turning on clocks\n", __func__, ret);
  2464. ret = spi_geni_gpi_pause_resume(geni_mas, false);
  2465. return ret;
  2466. }
  2467. exit_rt_resume:
  2468. ret = geni_icc_enable(&geni_mas->spi_rsc);
  2469. if (ret) {
  2470. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2471. "%s failing at geni icc enable ret=%d\n", __func__, ret);
  2472. return ret;
  2473. }
  2474. ret = geni_se_resources_on(&geni_mas->spi_rsc);
  2475. geni_write_reg(0x7f, geni_mas->base, GENI_OUTPUT_CTRL);
  2476. /* Added 10 us delay to settle the write of the register as per HW team recommendation */
  2477. udelay(10);
  2478. /* SPI Geni setup will happen for SPI Master/Slave after deep sleep exit */
  2479. if (geni_mas->is_deep_sleep && !geni_mas->setup) {
  2480. ret = spi_geni_mas_setup(spi);
  2481. if (ret) {
  2482. SPI_LOG_ERR(geni_mas->ipc, false, geni_mas->dev,
  2483. "%s: Error %d deep sleep mas setup\n",
  2484. __func__, ret);
  2485. return ret;
  2486. }
  2487. }
  2488. if (geni_mas->gsi_mode)
  2489. ret = spi_geni_gpi_pause_resume(geni_mas, false);
  2490. enable_irq(geni_mas->irq);
  2491. if (geni_mas->gsi_mode)
  2492. ret = spi_geni_gpi_pause_resume(geni_mas, false);
  2493. geni_capture_stop_time(&geni_mas->spi_rsc, geni_mas->ipc_log_kpi, __func__,
  2494. geni_mas->spi_kpi, start_time, 0, 0);
  2495. return ret;
  2496. }
  2497. static int spi_geni_resume(struct device *dev)
  2498. {
  2499. return 0;
  2500. }
  2501. /**
  2502. * spi_geni_deep_sleep_enable_check() - spi geni deep sleep enable check
  2503. *
  2504. * @geni_mas: pointer to the spi geni master structure.
  2505. *
  2506. * Return: None
  2507. */
  2508. #ifdef CONFIG_DEEPSLEEP
  2509. void spi_geni_deep_sleep_enable_check(struct spi_geni_master *geni_mas)
  2510. {
  2511. if (pm_suspend_target_state == PM_SUSPEND_MEM) {
  2512. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2513. "%s:DEEP SLEEP ENTRY", __func__);
  2514. geni_mas->is_deep_sleep = true;
  2515. /* for dma/fifo mode, master setup config required */
  2516. if (!geni_mas->gsi_mode) {
  2517. geni_mas->setup = false;
  2518. geni_mas->slave_setup = false;
  2519. }
  2520. }
  2521. }
  2522. #else
  2523. void spi_geni_deep_sleep_enable_check(struct spi_geni_master *geni_mas)
  2524. {
  2525. }
  2526. #endif
  2527. static int spi_geni_suspend(struct device *dev)
  2528. {
  2529. int ret = 0;
  2530. struct spi_master *spi = get_spi_master(dev);
  2531. struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
  2532. unsigned long long start_time;
  2533. start_time = geni_capture_start_time(&geni_mas->spi_rsc, geni_mas->ipc_log_kpi, __func__,
  2534. geni_mas->spi_kpi);
  2535. if (geni_mas->is_xfer_in_progress) {
  2536. if (!pm_runtime_status_suspended(dev)) {
  2537. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2538. ":%s: runtime PM is active\n", __func__);
  2539. ret = -EBUSY;
  2540. return ret;
  2541. }
  2542. SPI_LOG_DBG(geni_mas->ipc, false, geni_mas->dev,
  2543. "%s System suspend not allowed while xfer in progress=%d\n",
  2544. __func__, ret);
  2545. return ret;
  2546. }
  2547. spi_geni_deep_sleep_enable_check(geni_mas);
  2548. if (!pm_runtime_status_suspended(dev)) {
  2549. if (list_empty(&spi->queue) && !spi->cur_msg) {
  2550. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2551. "%s: Force suspend", __func__);
  2552. ret = spi_geni_runtime_suspend(dev);
  2553. if (ret) {
  2554. SPI_LOG_ERR(geni_mas->ipc, true, geni_mas->dev,
  2555. "Force suspend Failed:%d", ret);
  2556. } else {
  2557. pm_runtime_disable(dev);
  2558. pm_runtime_set_suspended(dev);
  2559. pm_runtime_enable(dev);
  2560. }
  2561. } else {
  2562. ret = -EBUSY;
  2563. }
  2564. }
  2565. geni_capture_stop_time(&geni_mas->spi_rsc, geni_mas->ipc_log_kpi, __func__,
  2566. geni_mas->spi_kpi, start_time, 0, 0);
  2567. return ret;
  2568. }
  2569. #else
  2570. static int spi_geni_runtime_suspend(struct device *dev)
  2571. {
  2572. return 0;
  2573. }
  2574. static int spi_geni_runtime_resume(struct device *dev)
  2575. {
  2576. return 0;
  2577. }
  2578. static int spi_geni_resume(struct device *dev)
  2579. {
  2580. return 0;
  2581. }
  2582. static int spi_geni_suspend(struct device *dev)
  2583. {
  2584. return 0;
  2585. }
  2586. #endif
  2587. static const struct dev_pm_ops spi_geni_pm_ops = {
  2588. SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
  2589. spi_geni_runtime_resume, NULL)
  2590. SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
  2591. };
  2592. static const struct of_device_id spi_geni_dt_match[] = {
  2593. { .compatible = "qcom,spi-geni" },
  2594. {}
  2595. };
  2596. static struct platform_driver spi_geni_driver = {
  2597. .probe = spi_geni_probe,
  2598. .remove = spi_geni_remove,
  2599. .driver = {
  2600. .name = "spi_geni",
  2601. .pm = &spi_geni_pm_ops,
  2602. .of_match_table = spi_geni_dt_match,
  2603. },
  2604. };
  2605. static int __init spi_dev_init(void)
  2606. {
  2607. return platform_driver_register(&spi_geni_driver);
  2608. }
  2609. static void __exit spi_dev_exit(void)
  2610. {
  2611. platform_driver_unregister(&spi_geni_driver);
  2612. }
  2613. module_init(spi_dev_init);
  2614. module_exit(spi_dev_exit);
  2615. MODULE_LICENSE("GPL");
  2616. MODULE_ALIAS("platform:spi_geni");