i2c-msm-geni.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/delay.h>
  8. #include <linux/err.h>
  9. #include <linux/i2c.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/irq.h>
  13. #include <linux/module.h>
  14. #include <linux/of.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/soc/qcom/geni-se.h>
  20. #include <linux/qcom-geni-se-common.h>
  21. #include <linux/ipc_logging.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/msm_gpi.h>
  24. #include <linux/ioctl.h>
  25. #include <linux/pinctrl/consumer.h>
  26. #include <linux/slab.h>
  27. #define SE_GENI_TEST_BUS_CTRL 0x44
  28. #define SE_NUM_FOR_TEST_BUS 5
  29. #define SE_GENI_CFG_REG68 (0x210)
  30. #define SE_I2C_TX_TRANS_LEN (0x26C)
  31. #define SE_I2C_RX_TRANS_LEN (0x270)
  32. #define SE_I2C_SCL_COUNTERS (0x278)
  33. #define SE_GENI_M_GP_LENGTH (0x910)
  34. /* M_CMD OP codes for I2C */
  35. #define I2C_WRITE (0x1)
  36. #define I2C_READ (0x2)
  37. #define I2C_WRITE_READ (0x3)
  38. #define I2C_ADDR_ONLY (0x4)
  39. #define I2C_BUS_CLEAR (0x6)
  40. #define I2C_STOP_ON_BUS (0x7)
  41. /* M_CMD params for I2C */
  42. #define PRE_CMD_DELAY (BIT(0))
  43. #define TIMESTAMP_BEFORE (BIT(1))
  44. #define STOP_STRETCH (BIT(2))
  45. #define TIMESTAMP_AFTER (BIT(3))
  46. #define POST_COMMAND_DELAY (BIT(4))
  47. #define IGNORE_ADD_NACK (BIT(6))
  48. #define READ_FINISHED_WITH_ACK (BIT(7))
  49. #define BYPASS_ADDR_PHASE (BIT(8))
  50. #define SLV_ADDR_MSK (GENMASK(15, 9))
  51. #define SLV_ADDR_SHFT (9)
  52. #define I2C_PACK_EN (BIT(0) | BIT(1))
  53. #define GP_IRQ0 0
  54. #define GP_IRQ1 1
  55. #define GP_IRQ2 2
  56. #define GP_IRQ3 3
  57. #define GP_IRQ4 4
  58. #define GP_IRQ5 5
  59. #define GENI_OVERRUN 6
  60. #define GENI_ILLEGAL_CMD 7
  61. #define GENI_ABORT_DONE 8
  62. #define GENI_TIMEOUT 9
  63. #define GENI_HW_PARAM 0x50
  64. #define GENI_SPURIOUS_IRQ 10
  65. #define I2C_ADDR_NACK 11
  66. #define I2C_DATA_NACK 12
  67. #define GENI_M_CMD_FAILURE 13
  68. #define GSI_TRE_FULL 14
  69. #define I2C_NACK GP_IRQ1
  70. #define I2C_BUS_PROTO GP_IRQ3
  71. #define I2C_ARB_LOST GP_IRQ4
  72. #define DM_I2C_CB_ERR ((BIT(GP_IRQ1) | BIT(GP_IRQ3) | BIT(GP_IRQ4)) \
  73. << 5)
  74. #define I2C_MASTER_HUB (BIT(0))
  75. #define KHz(freq) (1000 * freq)
  76. #define I2C_AUTO_SUSPEND_DELAY 250
  77. #define I2C_TIMEOUT_SAFETY_COEFFICIENT 10
  78. #define I2C_TIMEOUT_MIN_USEC 500000
  79. #define MAX_SE 20
  80. #define I2C_LOG_DBG(log_ctx, print, dev, x...) do { \
  81. GENI_SE_DBG(log_ctx, print, dev, x); \
  82. if (dev) \
  83. i2c_trace_log(dev, x); \
  84. } while (0)
  85. #define I2C_LOG_ERR(log_ctx, print, dev, x...) do { \
  86. GENI_SE_ERR(log_ctx, print, dev, x); \
  87. if (dev) \
  88. i2c_trace_log(dev, x); \
  89. } while (0)
  90. #define CREATE_TRACE_POINTS
  91. #include "i2c-qup-trace.h"
  92. #define I2C_HUB_DEF 0
  93. /* As per dtsi max tre's configured as 1024
  94. * for multi descriptor usecase we can submit upto 512 tre's
  95. * this includes go tre and dma tre.
  96. * we need additional space for config tre's and lock/unlock tre's sometimes.
  97. * so to support other config related tre's provided 64 tre's extra space
  98. * for data xfers using 512 - 64 = 448 tre's, rest 64 for config/lock/unlock etc.
  99. */
  100. #define MAX_NUM_TRE_MSGS 448
  101. #define NUM_TRE_MSGS_PER_INTR 64
  102. #define IMMEDIATE_DMA_LEN 8
  103. /* FTRACE Logging */
  104. void i2c_trace_log(struct device *dev, const char *fmt, ...)
  105. {
  106. struct va_format vaf = {
  107. .fmt = fmt,
  108. };
  109. va_list args;
  110. va_start(args, fmt);
  111. vaf.va = &args;
  112. trace_i2c_log_info(dev_name(dev), &vaf);
  113. va_end(args);
  114. }
  115. enum i2c_se_mode {
  116. UNINITIALIZED,
  117. FIFO_SE_DMA,
  118. GSI_ONLY,
  119. };
  120. struct dbg_buf_ctxt {
  121. void *virt_buf;
  122. void *map_buf;
  123. };
  124. struct gsi_i2c_tre_queue {
  125. u32 msg_cnt; /* transmitted tre msg count */
  126. u32 tre_freed_cnt;
  127. bool is_multi_descriptor;
  128. atomic_t irq_cnt;
  129. u32 unmap_cnt;
  130. u8 *dma_buf[MAX_NUM_TRE_MSGS];
  131. };
  132. struct geni_i2c_dev {
  133. struct device *dev;
  134. void __iomem *base;
  135. unsigned int tx_wm;
  136. int irq;
  137. int err;
  138. u32 xfer_timeout;
  139. struct i2c_adapter adap;
  140. struct completion xfer;
  141. struct completion m_cancel_cmd;
  142. struct i2c_msg *cur;
  143. struct i2c_msg *msgs;
  144. struct gsi_i2c_tre_queue gsi_tx;
  145. struct geni_se i2c_rsc;
  146. struct clk *m_ahb_clk;
  147. struct clk *s_ahb_clk;
  148. struct clk *core_clk;
  149. int cur_wr;
  150. int cur_rd;
  151. struct device *wrapper_dev;
  152. void *ipcl;
  153. void *ipc_log_kpi;
  154. int i2c_kpi;
  155. int clk_fld_idx;
  156. struct dma_chan *tx_c;
  157. struct dma_chan *rx_c;
  158. struct msm_gpi_tre lock_t;
  159. struct msm_gpi_tre unlock_t;
  160. struct msm_gpi_tre cfg0_t;
  161. struct msm_gpi_tre go_t;
  162. struct msm_gpi_tre tx_t;
  163. struct msm_gpi_tre rx_t;
  164. dma_addr_t tx_ph[MAX_NUM_TRE_MSGS];
  165. dma_addr_t rx_ph;
  166. struct msm_gpi_ctrl tx_ev;
  167. struct msm_gpi_ctrl rx_ev;
  168. struct scatterlist *tx_sg; /* lock, cfg0, go, TX, unlock */
  169. struct scatterlist *rx_sg;
  170. dma_addr_t tx_sg_dma;
  171. dma_addr_t rx_sg_dma;
  172. int cfg_sent;
  173. int clk_freq_out;
  174. struct dma_async_tx_descriptor *tx_desc;
  175. struct dma_async_tx_descriptor *rx_desc;
  176. struct msm_gpi_dma_async_tx_cb_param tx_cb;
  177. struct msm_gpi_dma_async_tx_cb_param rx_cb;
  178. enum i2c_se_mode se_mode;
  179. bool cmd_done;
  180. bool is_shared;
  181. bool is_high_perf; /* To increase the performance voting for higher BW valuest */
  182. u32 dbg_num;
  183. struct dbg_buf_ctxt *dbg_buf_ptr;
  184. bool is_le_vm;
  185. bool pm_ctrl_client;
  186. bool req_chan;
  187. bool first_xfer_done; /* for le-vm doing lock/unlock, after first xfer initiated. */
  188. bool le_gpi_reset_done;
  189. bool is_i2c_hub;
  190. bool prev_cancel_pending; //Halt cancel till IOS in good state
  191. bool gsi_err; /* For every gsi error performing gsi reset */
  192. bool is_i2c_rtl_based; /* doing pending cancel only for rtl based SE's */
  193. bool skip_bw_vote; /* Used for PMIC over i2c use case to skip the BW vote */
  194. atomic_t is_xfer_in_progress; /* Used to maintain xfer inprogress status */
  195. bool bus_recovery_enable; /* To be enabled by client if needed */
  196. bool i2c_test_dev; /* Set this DT flag to enable test bus dump for an SE */
  197. };
  198. static struct geni_i2c_dev *gi2c_dev_dbg[MAX_SE];
  199. static int arr_idx;
  200. static int geni_i2c_runtime_suspend(struct device *dev);
  201. struct geni_i2c_err_log {
  202. int err;
  203. const char *msg;
  204. };
  205. static struct geni_i2c_err_log gi2c_log[] = {
  206. [GP_IRQ0] = {-EINVAL, "Unknown I2C err GP_IRQ0"},
  207. [I2C_ADDR_NACK] = {-ENOTCONN,
  208. "Address NACK: slv unresponsive, check its power/reset-ln"},
  209. [I2C_DATA_NACK] = {-ENOTCONN,
  210. "Data NACK: Device NACK before end of TX transfer"},
  211. [GP_IRQ2] = {-EINVAL, "Unknown I2C err GP IRQ2"},
  212. [I2C_BUS_PROTO] = {-EPROTO,
  213. "Bus proto err, noisy/unepxected start/stop"},
  214. [I2C_ARB_LOST] = {-EBUSY,
  215. "Bus arbitration lost, clock line undriveable"},
  216. [GP_IRQ5] = {-EINVAL, "Unknown I2C err GP IRQ5"},
  217. [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
  218. [GENI_ILLEGAL_CMD] = {-EILSEQ,
  219. "Illegal cmd, check GENI cmd-state machine"},
  220. [GENI_ABORT_DONE] = {-ETIMEDOUT, "Abort after timeout successful"},
  221. [GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
  222. [GENI_SPURIOUS_IRQ] = {-EINVAL, "Received unexpected interrupt"},
  223. [GENI_M_CMD_FAILURE] = {-EINVAL, "Master command failure"},
  224. [GSI_TRE_FULL] = {-EINVAL, "GSI TRE FULL NO SPACE"},
  225. };
  226. struct geni_i2c_clk_fld {
  227. u32 clk_freq_out;
  228. u8 clk_div;
  229. u8 t_high;
  230. u8 t_low;
  231. u8 t_cycle;
  232. };
  233. static struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
  234. {KHz(100), 7, 10, 11, 26},
  235. {KHz(400), 2, 7, 10, 24},
  236. {KHz(1000), 1, 2, 8, 18},
  237. };
  238. static struct geni_i2c_clk_fld geni_i2c_hub_clk_map[] = {
  239. {KHz(100), 7, 10, 11, 26},
  240. {KHz(400), 2, 7, 10, 24},
  241. {KHz(1000), 1, 3, 9, 18},
  242. };
  243. static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
  244. {
  245. int i;
  246. int ret = 0;
  247. bool clk_map_present = false;
  248. struct geni_i2c_clk_fld *itr;
  249. itr = (gi2c->is_i2c_rtl_based) ? geni_i2c_hub_clk_map : geni_i2c_clk_map;
  250. for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) {
  251. if (itr->clk_freq_out == gi2c->clk_freq_out) {
  252. clk_map_present = true;
  253. break;
  254. }
  255. }
  256. if (clk_map_present)
  257. gi2c->clk_fld_idx = i;
  258. else
  259. ret = -EINVAL;
  260. return ret;
  261. }
  262. /**
  263. * geni_i2c_se_dump_dbg_regs() - Print relevant registers that capture most
  264. * accurately the state of an SE.
  265. * @se: Pointer to the concerned serial engine.
  266. * @iomem: Base address of the SE's register space.
  267. * @ipc: IPC log context handle.
  268. *
  269. * This function is used to print out all the registers that capture the state
  270. * of an SE to help debug any errors.
  271. *
  272. * Return: None
  273. */
  274. void geni_i2c_se_dump_dbg_regs(struct geni_se *se, void __iomem *base,
  275. void *ipc)
  276. {
  277. u32 m_cmd0 = 0;
  278. u32 m_irq_status = 0;
  279. u32 s_cmd0 = 0;
  280. u32 s_irq_status = 0;
  281. u32 geni_status = 0;
  282. u32 geni_ios = 0;
  283. u32 dma_rx_irq = 0;
  284. u32 dma_tx_irq = 0;
  285. u32 rx_fifo_status = 0;
  286. u32 tx_fifo_status = 0;
  287. u32 se_dma_dbg = 0;
  288. u32 m_cmd_ctrl = 0;
  289. u32 se_dma_rx_len = 0;
  290. u32 se_dma_rx_len_in = 0;
  291. u32 se_dma_tx_len = 0;
  292. u32 se_dma_tx_len_in = 0;
  293. u32 geni_m_irq_en = 0;
  294. u32 geni_s_irq_en = 0;
  295. u32 geni_dma_tx_irq_en = 0;
  296. u32 geni_dma_rx_irq_en = 0;
  297. m_cmd0 = geni_read_reg(base, SE_GENI_M_CMD0);
  298. m_irq_status = geni_read_reg(base, SE_GENI_M_IRQ_STATUS);
  299. s_cmd0 = geni_read_reg(base, SE_GENI_S_CMD0);
  300. s_irq_status = geni_read_reg(base, SE_GENI_S_IRQ_STATUS);
  301. geni_status = geni_read_reg(base, SE_GENI_STATUS);
  302. geni_ios = geni_read_reg(base, SE_GENI_IOS);
  303. dma_tx_irq = geni_read_reg(base, SE_DMA_TX_IRQ_STAT);
  304. dma_rx_irq = geni_read_reg(base, SE_DMA_RX_IRQ_STAT);
  305. rx_fifo_status = geni_read_reg(base, SE_GENI_RX_FIFO_STATUS);
  306. tx_fifo_status = geni_read_reg(base, SE_GENI_TX_FIFO_STATUS);
  307. se_dma_dbg = geni_read_reg(base, SE_DMA_DEBUG_REG0);
  308. m_cmd_ctrl = geni_read_reg(base, SE_GENI_M_CMD_CTRL_REG);
  309. se_dma_rx_len = geni_read_reg(base, SE_DMA_RX_LEN);
  310. se_dma_rx_len_in = geni_read_reg(base, SE_DMA_RX_LEN_IN);
  311. se_dma_tx_len = geni_read_reg(base, SE_DMA_TX_LEN);
  312. se_dma_tx_len_in = geni_read_reg(base, SE_DMA_TX_LEN_IN);
  313. geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
  314. geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
  315. geni_dma_tx_irq_en = geni_read_reg(base, SE_DMA_TX_IRQ_EN);
  316. geni_dma_rx_irq_en = geni_read_reg(base, SE_DMA_RX_IRQ_EN);
  317. I2C_LOG_DBG(ipc, false, se->dev,
  318. "%s: m_cmd0:0x%x, m_irq_status:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
  319. __func__, m_cmd0, m_irq_status, geni_status, geni_ios);
  320. I2C_LOG_DBG(ipc, false, se->dev,
  321. "dma_rx_irq:0x%x, dma_tx_irq:0x%x, rx_fifo_sts:0x%x, tx_fifo_sts:0x%x\n",
  322. dma_rx_irq, dma_tx_irq, rx_fifo_status, tx_fifo_status);
  323. I2C_LOG_DBG(ipc, false, se->dev,
  324. "se_dma_dbg:0x%x, m_cmd_ctrl:0x%x, dma_rxlen:0x%x, dma_rxlen_in:0x%x\n",
  325. se_dma_dbg, m_cmd_ctrl, se_dma_rx_len, se_dma_rx_len_in);
  326. I2C_LOG_DBG(ipc, false, se->dev,
  327. "dma_txlen:0x%x, dma_txlen_in:0x%x s_irq_status:0x%x\n",
  328. se_dma_tx_len, se_dma_tx_len_in, s_irq_status);
  329. I2C_LOG_DBG(ipc, false, se->dev,
  330. "dma_txirq_en:0x%x, dma_rxirq_en:0x%x geni_m_irq_en:0x%x geni_s_irq_en:0x%x\n",
  331. geni_dma_tx_irq_en, geni_dma_rx_irq_en, geni_m_irq_en,
  332. geni_s_irq_en);
  333. }
  334. /*
  335. * capture_kpi_show() - Prints the value stored in capture_kpi sysfs entry
  336. *
  337. * @dev: pointer to device
  338. * @attr: device attributes
  339. * @buf: buffer to store the capture_kpi_value
  340. *
  341. * Return: prints capture_kpi value or error value
  342. */
  343. static ssize_t capture_kpi_show(struct device *dev,
  344. struct device_attribute *attr, char *buf)
  345. {
  346. struct platform_device *pdev = to_platform_device(dev);
  347. struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
  348. if (!gi2c)
  349. return -EINVAL;
  350. return scnprintf(buf, sizeof(int), "%d\n", gi2c->i2c_kpi);
  351. }
  352. /*
  353. * capture_kpi_store() - store the capture_kpi sysfs value
  354. *
  355. * @dev: pointer to device
  356. * @attr: device attributes
  357. * @buf: buffer to store the capture_kpi_value
  358. * @size: returns the value of size.
  359. *
  360. * Return: Size copied in the buffer or error value
  361. */
  362. static ssize_t capture_kpi_store(struct device *dev,
  363. struct device_attribute *attr, const char *buf,
  364. size_t size)
  365. {
  366. struct platform_device *pdev = to_platform_device(dev);
  367. struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
  368. char name[36];
  369. if (!gi2c)
  370. return -EINVAL;
  371. if (kstrtoint(buf, 0, &gi2c->i2c_kpi)) {
  372. dev_err(dev, "Invalid input\n");
  373. return -EINVAL;
  374. }
  375. /* ipc logs for kpi's measure */
  376. if (gi2c->i2c_kpi && !gi2c->ipc_log_kpi) {
  377. memset(name, 0, sizeof(name));
  378. scnprintf(name, sizeof(name), "%s%s", dev_name(gi2c->dev), "_kpi");
  379. gi2c->ipc_log_kpi = ipc_log_context_create(IPC_LOG_KPI_PAGES, name, 0);
  380. if (!gi2c->ipc_log_kpi && IS_ENABLED(CONFIG_IPC_LOGGING))
  381. dev_err(&pdev->dev, "Error creating kpi IPC logs\n");
  382. }
  383. return size;
  384. }
  385. static DEVICE_ATTR_RW(capture_kpi);
  386. static inline void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c, int dfs)
  387. {
  388. struct geni_i2c_clk_fld *itr;
  389. if (gi2c->is_i2c_rtl_based)
  390. itr = geni_i2c_hub_clk_map + gi2c->clk_fld_idx;
  391. else
  392. itr = geni_i2c_clk_map + gi2c->clk_fld_idx;
  393. /* do not configure the dfs index for i2c hub master */
  394. if (!gi2c->is_i2c_hub)
  395. geni_write_reg(dfs, gi2c->base, SE_GENI_CLK_SEL);
  396. geni_write_reg((itr->clk_div << 4) | 1, gi2c->base, GENI_SER_M_CLK_CFG);
  397. geni_write_reg(((itr->t_high << 20) | (itr->t_low << 10) |
  398. itr->t_cycle), gi2c->base, SE_I2C_SCL_COUNTERS);
  399. /*
  400. * Ensure Clk config completes before return.
  401. */
  402. mb();
  403. }
  404. static inline void qcom_geni_i2c_calc_timeout(struct geni_i2c_dev *gi2c)
  405. {
  406. struct geni_i2c_clk_fld *clk_itr;
  407. size_t bit_cnt = gi2c->cur->len*9;
  408. size_t bit_usec = 0;
  409. size_t xfer_max_usec = 0;
  410. if (gi2c->is_i2c_rtl_based)
  411. clk_itr = geni_i2c_hub_clk_map + gi2c->clk_fld_idx;
  412. else
  413. clk_itr = geni_i2c_clk_map + gi2c->clk_fld_idx;
  414. bit_usec = (bit_cnt * USEC_PER_SEC) / clk_itr->clk_freq_out;
  415. xfer_max_usec = (bit_usec * I2C_TIMEOUT_SAFETY_COEFFICIENT) +
  416. I2C_TIMEOUT_MIN_USEC;
  417. gi2c->xfer_timeout = usecs_to_jiffies(xfer_max_usec);
  418. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  419. "%s: us:%d jiffies:%d\n",
  420. __func__, xfer_max_usec, gi2c->xfer_timeout);
  421. }
  422. /*
  423. * geni_se_select_test_bus: Selects the test bus as required
  424. *
  425. * @gi2c_dev: Geni I2C device handle
  426. * test_bus_num: Test bus number to select (1 to 16)
  427. *
  428. * Return: Nogeni_se_select_test_busne
  429. */
  430. static void geni_se_select_test_bus(struct geni_i2c_dev *gi2c, u8 test_bus_num)
  431. {
  432. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  433. "%s: test_bus:%d\n", __func__, test_bus_num);
  434. writel_relaxed(test_bus_num, gi2c->base + SE_GENI_TEST_BUS_CTRL);
  435. }
  436. static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
  437. {
  438. if (err == I2C_DATA_NACK || err == I2C_ADDR_NACK
  439. || err == GENI_ABORT_DONE) {
  440. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n",
  441. gi2c_log[err].msg);
  442. goto err_ret;
  443. } else {
  444. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev, "%s\n",
  445. gi2c_log[err].msg);
  446. }
  447. geni_i2c_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base, gi2c->ipcl);
  448. err_ret:
  449. gi2c->err = gi2c_log[err].err;
  450. }
  451. /*
  452. * geni_i2c_test_bus_dump(): Dumps or reads test bus for selected SE test bus.
  453. *
  454. * @gi2c_i2c_dev: Handle to SE device
  455. * @se_num: SE number, which start from 0.
  456. *
  457. * Return: None
  458. *
  459. * Note: This function has added extra test buses for refrences.
  460. */
  461. static void geni_i2c_test_bus_dump(struct geni_i2c_dev *gi2c, u8 se_num)
  462. {
  463. /* Select test bus number and test bus, then read test bus.*/
  464. /* geni_m_comp_sig_test_bus */
  465. geni_se_select_test_bus(gi2c, 8);
  466. test_bus_select_per_qupv3(gi2c->wrapper_dev, se_num, gi2c->ipcl);
  467. test_bus_read_per_qupv3(gi2c->wrapper_dev, gi2c->ipcl);
  468. /* geni_m_branch_cond_1_test_bus */
  469. geni_se_select_test_bus(gi2c, 5);
  470. test_bus_select_per_qupv3(gi2c->wrapper_dev, se_num, gi2c->ipcl);
  471. test_bus_read_per_qupv3(gi2c->wrapper_dev, gi2c->ipcl);
  472. /* Can Add more here based on debug ask. */
  473. }
  474. static void do_reg68_war_for_rtl_se(struct geni_i2c_dev *gi2c)
  475. {
  476. u32 status;
  477. //Add REG68 WAR if stretch bit is set
  478. status = geni_read_reg(gi2c->base, SE_GENI_M_CMD0);
  479. GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
  480. "%s: SE_GENI_M_CMD0:0x%x\n", __func__, status);
  481. //BIT(2) - STOP/STRETCH set then configure REG68 register
  482. if ((status & 0x4) && gi2c->is_i2c_rtl_based) {
  483. status = geni_read_reg(gi2c->base, SE_GENI_CFG_REG68);
  484. GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
  485. "%s: Before WAR REG68:0x%x\n", __func__, status);
  486. if (status & 0x20) {
  487. //Toggle Bit#4, Bit#5 of REG68 to disable/enable stretch
  488. geni_write_reg(0x00100110, gi2c->base,
  489. SE_GENI_CFG_REG68);
  490. } else {
  491. //Restore FW to suggested value i.e. 0x00100120
  492. geni_write_reg(0x00100120, gi2c->base,
  493. SE_GENI_CFG_REG68);
  494. }
  495. status = geni_read_reg(gi2c->base, SE_GENI_CFG_REG68);
  496. GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
  497. "%s: After WAR REG68:0x%x\n", __func__, status);
  498. }
  499. }
  500. /**
  501. * geni_i2c_stop_with_cancel(): stops GENI SE with cancel command.
  502. * @gi2c: I2C dev handle
  503. *
  504. * This is a generic function to stop serial engine, to be called as required.
  505. *
  506. * Return: 0 if Success, non zero value if failed.
  507. */
  508. static int geni_i2c_stop_with_cancel(struct geni_i2c_dev *gi2c)
  509. {
  510. int timeout = 0;
  511. /* Issue point for e.g.: dump test bus/read test bus */
  512. if (gi2c->i2c_test_dev)
  513. /* For se4, its 5 as SE num starts from 0 */
  514. geni_i2c_test_bus_dump(gi2c, SE_NUM_FOR_TEST_BUS);
  515. reinit_completion(&gi2c->m_cancel_cmd);
  516. geni_se_cancel_m_cmd(&gi2c->i2c_rsc);
  517. timeout = wait_for_completion_timeout(&gi2c->m_cancel_cmd, HZ);
  518. if (!timeout) {
  519. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  520. "%s:Cancel failed\n", __func__);
  521. reinit_completion(&gi2c->xfer);
  522. geni_se_abort_m_cmd(&gi2c->i2c_rsc);
  523. timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
  524. if (!timeout) {
  525. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  526. "%s:Abort failed\n", __func__);
  527. return !timeout;
  528. }
  529. }
  530. return 0;
  531. }
  532. /**
  533. * geni_i2c_is_bus_recovery_required: Checks if Bus recovery enabled/required ?
  534. * @gi2c: Handle of the I2C device
  535. *
  536. * Return: TRUE if SDA is stuck LOW due to some issue else false.
  537. *
  538. */
  539. static bool geni_i2c_is_bus_recovery_required(struct geni_i2c_dev *gi2c)
  540. {
  541. u32 geni_ios = readl_relaxed(gi2c->base + SE_GENI_IOS);
  542. /*
  543. * SE_GENI_IOS will show I2C CLK/SDA line status, BIT 0 is SDA and
  544. * BIT 1 is clk status. SE_GENI_IOS register set when CLK/SDA line
  545. * is pulled high.
  546. */
  547. return (((geni_ios & 1) == 0) && (gi2c->err == -EPROTO ||
  548. gi2c->err == -EBUSY ||
  549. gi2c->err == -ETIMEDOUT));
  550. }
  551. /**
  552. * geni_i2c_bus_recovery(): Function to recover i2c bus when required
  553. * @gi2c: I2C device handle
  554. *
  555. * Use this function only when bus is bad for some reason and need to
  556. * reset the slave to bring slave into proper state. This should put
  557. * bus into proper state once executed successfully.
  558. *
  559. * Return: Success OR Respective error code/value.
  560. */
  561. static int geni_i2c_bus_recovery(struct geni_i2c_dev *gi2c)
  562. {
  563. int timeout = 0, ret = 0;
  564. u32 m_param = 0, m_cmd = 0;
  565. unsigned long long start_time;
  566. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  567. gi2c->i2c_kpi);
  568. /* Must be enabled by client "only" if required. */
  569. if (gi2c->bus_recovery_enable &&
  570. geni_i2c_is_bus_recovery_required(gi2c)) {
  571. GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
  572. "%d:SDA Line stuck\n", gi2c->err);
  573. } else {
  574. GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
  575. "Bus Recovery not required/enabled\n");
  576. return 0;
  577. }
  578. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s: start recovery\n",
  579. __func__);
  580. /* BUS_CLEAR */
  581. reinit_completion(&gi2c->xfer);
  582. m_cmd = I2C_BUS_CLEAR;
  583. geni_se_setup_m_cmd(&gi2c->i2c_rsc, m_cmd, m_param);
  584. timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
  585. if (!timeout) {
  586. geni_i2c_err(gi2c, GENI_TIMEOUT);
  587. gi2c->cur = NULL;
  588. ret = geni_i2c_stop_with_cancel(gi2c);
  589. if (ret) {
  590. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  591. "%s: Bus clear Failed\n", __func__);
  592. return ret;
  593. }
  594. }
  595. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  596. "%s: BUS_CLEAR success\n", __func__);
  597. /* BUS_STOP */
  598. reinit_completion(&gi2c->xfer);
  599. m_cmd = I2C_STOP_ON_BUS;
  600. geni_se_setup_m_cmd(&gi2c->i2c_rsc, m_cmd, m_param);
  601. timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
  602. if (!timeout) {
  603. geni_i2c_err(gi2c, GENI_TIMEOUT);
  604. gi2c->cur = NULL;
  605. ret = geni_i2c_stop_with_cancel(gi2c);
  606. if (ret) {
  607. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  608. "%s:Bus Stop Failed\n", __func__);
  609. return ret;
  610. }
  611. }
  612. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  613. "%s: success\n", __func__);
  614. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  615. gi2c->i2c_kpi, start_time, 0, 0);
  616. return 0;
  617. }
  618. static int do_pending_cancel(struct geni_i2c_dev *gi2c)
  619. {
  620. int timeout = 0;
  621. u32 geni_ios = 0;
  622. /* doing pending cancel only rtl based SE's */
  623. if (!gi2c->is_i2c_rtl_based)
  624. return 0;
  625. geni_ios = geni_read_reg(gi2c->base, SE_GENI_IOS);
  626. if ((geni_ios & 0x3) != 0x3) {
  627. /* Try to restore IOS with FORCE_DEFAULT */
  628. GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
  629. "%s: IOS:0x%x, bad state\n", __func__, geni_ios);
  630. geni_write_reg(FORCE_DEFAULT,
  631. gi2c->base, GENI_FORCE_DEFAULT_REG);
  632. geni_ios = geni_read_reg(gi2c->base, SE_GENI_IOS);
  633. if ((geni_ios & 0x3) != 0x3) {
  634. GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
  635. "%s: IOS:0x%x, Fix from Slave side\n",
  636. __func__, geni_ios);
  637. return -EINVAL;
  638. }
  639. GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
  640. "%s: IOS:0x%x restored properly\n", __func__, geni_ios);
  641. }
  642. if (gi2c->se_mode == GSI_ONLY) {
  643. dmaengine_terminate_all(gi2c->tx_c);
  644. gi2c->cfg_sent = 0;
  645. } else if (geni_i2c_stop_with_cancel(gi2c)) {
  646. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  647. "%s: geni_i2c_stop_with_cancel failed\n", __func__);
  648. }
  649. gi2c->prev_cancel_pending = false;
  650. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s: Pending Cancel done\n", __func__);
  651. return timeout;
  652. }
  653. static int geni_i2c_prepare(struct geni_i2c_dev *gi2c)
  654. {
  655. if (gi2c->se_mode == UNINITIALIZED) {
  656. int proto = geni_se_read_proto(&gi2c->i2c_rsc);
  657. u32 se_mode, geni_se_hw_param_2;
  658. if (proto != GENI_SE_I2C) {
  659. dev_err(gi2c->dev, "Invalid proto %d\n", proto);
  660. if (!gi2c->is_le_vm) {
  661. geni_se_resources_off(&gi2c->i2c_rsc);
  662. geni_icc_disable(&gi2c->i2c_rsc);
  663. if (gi2c->is_i2c_hub)
  664. clk_disable_unprepare(gi2c->core_clk);
  665. }
  666. return -ENXIO;
  667. }
  668. se_mode = readl_relaxed(gi2c->base +
  669. GENI_IF_DISABLE_RO);
  670. if (se_mode) {
  671. gi2c->se_mode = GSI_ONLY;
  672. geni_se_select_mode(&gi2c->i2c_rsc, GENI_GPI_DMA);
  673. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  674. "i2c in GSI ONLY mode\n");
  675. } else {
  676. int gi2c_tx_depth;
  677. if (!gi2c->is_i2c_hub)
  678. gi2c_tx_depth = geni_se_get_tx_fifo_depth(&gi2c->i2c_rsc);
  679. else
  680. gi2c_tx_depth = 16; /* i2c hub depth is fixed to 16 */
  681. gi2c->se_mode = FIFO_SE_DMA;
  682. gi2c->tx_wm = gi2c_tx_depth - 1;
  683. geni_se_init(&gi2c->i2c_rsc, gi2c->tx_wm, gi2c_tx_depth);
  684. qcom_geni_i2c_conf(gi2c, 0);
  685. geni_se_config_packing(&gi2c->i2c_rsc, 8, 4, true, true, true);
  686. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  687. "i2c fifo/se-dma mode. fifo depth:%d\n",
  688. gi2c_tx_depth);
  689. }
  690. if (!gi2c->is_i2c_hub) {
  691. /* Check if SE is RTL based SE */
  692. geni_se_hw_param_2 = readl_relaxed(gi2c->base + SE_HW_PARAM_2);
  693. if (geni_se_hw_param_2 & GEN_HW_FSM_I2C) {
  694. gi2c->is_i2c_rtl_based = true;
  695. dev_info(gi2c->dev, "%s: RTL based SE\n", __func__);
  696. }
  697. }
  698. if (gi2c->pm_ctrl_client)
  699. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  700. "%s: pm_runtime_get_sync bypassed\n", __func__);
  701. }
  702. return 0;
  703. }
  704. static void geni_i2c_irq_handle_watermark(struct geni_i2c_dev *gi2c, u32 m_stat)
  705. {
  706. struct i2c_msg *cur = gi2c->cur;
  707. int i, j;
  708. u32 rx_st = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
  709. if (!cur) {
  710. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s: Spurious irq\n", __func__);
  711. geni_i2c_err(gi2c, GENI_SPURIOUS_IRQ);
  712. return;
  713. }
  714. if (((m_stat & M_RX_FIFO_WATERMARK_EN) ||
  715. (m_stat & M_RX_FIFO_LAST_EN)) && (cur->flags & I2C_M_RD)) {
  716. u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
  717. for (j = 0; j < rxcnt; j++) {
  718. u32 temp;
  719. int p;
  720. temp = readl_relaxed(gi2c->base + SE_GENI_RX_FIFOn);
  721. for (i = gi2c->cur_rd, p = 0; (i < cur->len && p < 4);
  722. i++, p++)
  723. cur->buf[i] = (u8) ((temp >> (p * 8)) & 0xff);
  724. gi2c->cur_rd = i;
  725. if (gi2c->cur_rd == cur->len) {
  726. dev_dbg(gi2c->dev, "FIFO i:%d,read 0x%x\n",
  727. i, temp);
  728. break;
  729. }
  730. }
  731. } else if ((m_stat & M_TX_FIFO_WATERMARK_EN) &&
  732. !(cur->flags & I2C_M_RD)) {
  733. for (j = 0; j < gi2c->tx_wm; j++) {
  734. u32 temp = 0;
  735. int p;
  736. for (i = gi2c->cur_wr, p = 0; (i < cur->len && p < 4);
  737. i++, p++)
  738. temp |= (((u32)(cur->buf[i]) << (p * 8)));
  739. writel_relaxed(temp, gi2c->base + SE_GENI_TX_FIFOn);
  740. gi2c->cur_wr = i;
  741. dev_dbg(gi2c->dev, "FIFO i:%d,wrote 0x%x\n", i, temp);
  742. if (gi2c->cur_wr == cur->len) {
  743. dev_dbg(gi2c->dev, "FIFO i2c bytes done writing\n");
  744. writel_relaxed(0, (gi2c->base + SE_GENI_TX_WATERMARK_REG));
  745. break;
  746. }
  747. }
  748. } else {
  749. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  750. "%s: m_irq_status:0x%x cur->flags:%d\n", __func__, m_stat, cur->flags);
  751. }
  752. }
  753. /*
  754. * geni_i2c_check_addr_data_nack() - checks wheather it is Address Nack or Data Nack
  755. *
  756. * @gi2c: I2C device handle
  757. * @flags: gi2c cur flags
  758. *
  759. * Return: None
  760. */
  761. static void geni_i2c_check_addr_data_nack(struct geni_i2c_dev *gi2c, __u16 flags)
  762. {
  763. if (readl_relaxed(gi2c->base + SE_GENI_M_GP_LENGTH)) {
  764. /* only process for write operation. */
  765. if (!(flags & I2C_M_RD))
  766. geni_i2c_err(gi2c, I2C_DATA_NACK);
  767. } else {
  768. geni_i2c_err(gi2c, I2C_ADDR_NACK);
  769. }
  770. }
  771. static irqreturn_t geni_i2c_irq(int irq, void *dev)
  772. {
  773. struct geni_i2c_dev *gi2c = dev;
  774. bool is_clear_watermark = false;
  775. bool m_cancel_done = false;
  776. u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
  777. u32 dm_tx_st = readl_relaxed(gi2c->base + SE_DMA_TX_IRQ_STAT);
  778. u32 dm_rx_st = readl_relaxed(gi2c->base + SE_DMA_RX_IRQ_STAT);
  779. u32 dma = readl_relaxed(gi2c->base + SE_GENI_DMA_MODE_EN);
  780. struct i2c_msg *cur = gi2c->cur;
  781. unsigned long long start_time;
  782. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  783. gi2c->i2c_kpi);
  784. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  785. "%s: m_irq_status:0x%x\n", __func__, m_stat);
  786. if (!cur) {
  787. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "Spurious irq\n");
  788. geni_i2c_err(gi2c, GENI_SPURIOUS_IRQ);
  789. gi2c->cmd_done = true;
  790. is_clear_watermark = true;
  791. goto irqret;
  792. }
  793. if ((m_stat & M_CMD_FAILURE_EN) ||
  794. (dm_rx_st & (DM_I2C_CB_ERR)) ||
  795. (m_stat & M_CMD_CANCEL_EN) ||
  796. (m_stat & M_CMD_ABORT_EN) ||
  797. (m_stat & M_GP_IRQ_1_EN) ||
  798. (m_stat & M_GP_IRQ_3_EN) ||
  799. (m_stat & M_GP_IRQ_4_EN)) {
  800. if (m_stat & M_GP_IRQ_1_EN)
  801. geni_i2c_check_addr_data_nack(gi2c, gi2c->cur->flags);
  802. if (m_stat & M_GP_IRQ_3_EN)
  803. geni_i2c_err(gi2c, I2C_BUS_PROTO);
  804. if (m_stat & M_GP_IRQ_4_EN)
  805. geni_i2c_err(gi2c, I2C_ARB_LOST);
  806. if (m_stat & M_CMD_OVERRUN_EN)
  807. geni_i2c_err(gi2c, GENI_OVERRUN);
  808. if (m_stat & M_ILLEGAL_CMD_EN)
  809. geni_i2c_err(gi2c, GENI_ILLEGAL_CMD);
  810. if (m_stat & M_CMD_ABORT_EN)
  811. geni_i2c_err(gi2c, GENI_ABORT_DONE);
  812. /*
  813. * This bit(M_CMD_FAILURE_EN) is set when command execution has been
  814. * completed with failure.
  815. */
  816. if (m_stat & M_CMD_FAILURE_EN) {
  817. /* Log error else do not override previous set error */
  818. if (!gi2c->err)
  819. geni_i2c_err(gi2c, GENI_M_CMD_FAILURE);
  820. else
  821. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  822. "%s:GENI_M_CMD_FAILURE\n", __func__);
  823. }
  824. /* This bit is set when command cancel request by SW is completed */
  825. if (m_stat & M_CMD_CANCEL_EN)
  826. m_cancel_done = true;
  827. gi2c->cmd_done = true;
  828. is_clear_watermark = true;
  829. goto irqret;
  830. }
  831. geni_i2c_irq_handle_watermark(gi2c, m_stat);
  832. irqret:
  833. if (!dma && is_clear_watermark)
  834. writel_relaxed(0, (gi2c->base + SE_GENI_TX_WATERMARK_REG));
  835. if (m_stat)
  836. writel_relaxed(m_stat, gi2c->base + SE_GENI_M_IRQ_CLEAR);
  837. if (dma) {
  838. if (dm_tx_st)
  839. writel_relaxed(dm_tx_st, gi2c->base +
  840. SE_DMA_TX_IRQ_CLR);
  841. if (dm_rx_st)
  842. writel_relaxed(dm_rx_st, gi2c->base +
  843. SE_DMA_RX_IRQ_CLR);
  844. /* Ensure all writes are done before returning from ISR. */
  845. wmb();
  846. if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE))
  847. gi2c->cmd_done = true;
  848. } else if (m_stat & M_CMD_DONE_EN) {
  849. gi2c->cmd_done = true;
  850. }
  851. if (gi2c->cmd_done) {
  852. gi2c->cmd_done = false;
  853. complete(&gi2c->xfer);
  854. }
  855. if (m_cancel_done)
  856. complete(&gi2c->m_cancel_cmd);
  857. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  858. gi2c->i2c_kpi, start_time, 0, gi2c->clk_freq_out);
  859. return IRQ_HANDLED;
  860. }
  861. static void gi2c_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str,
  862. void *ptr)
  863. {
  864. struct geni_i2c_dev *gi2c = ptr;
  865. u32 m_stat = cb_str->status;
  866. switch (cb_str->cb_event) {
  867. case MSM_GPI_QUP_ERROR:
  868. case MSM_GPI_QUP_SW_ERROR:
  869. case MSM_GPI_QUP_MAX_EVENT:
  870. case MSM_GPI_QUP_FW_ERROR:
  871. case MSM_GPI_QUP_PENDING_EVENT:
  872. case MSM_GPI_QUP_EOT_DESC_MISMATCH:
  873. break;
  874. case MSM_GPI_QUP_NOTIFY:
  875. case MSM_GPI_QUP_CH_ERROR:
  876. if (m_stat & M_GP_IRQ_1_EN)
  877. geni_i2c_check_addr_data_nack(gi2c, gi2c->cur->flags);
  878. if (m_stat & M_GP_IRQ_3_EN)
  879. geni_i2c_err(gi2c, I2C_BUS_PROTO);
  880. if (m_stat & M_GP_IRQ_4_EN)
  881. geni_i2c_err(gi2c, I2C_ARB_LOST);
  882. complete(&gi2c->xfer);
  883. break;
  884. default:
  885. break;
  886. }
  887. if (cb_str->cb_event != MSM_GPI_QUP_NOTIFY) {
  888. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  889. "GSI QN err:0x%x, status:0x%x, err:%d\n",
  890. cb_str->error_log.error_code,
  891. m_stat, cb_str->cb_event);
  892. gi2c->gsi_err = true;
  893. complete(&gi2c->xfer);
  894. }
  895. }
  896. static void gi2c_gsi_cb_err(struct msm_gpi_dma_async_tx_cb_param *cb,
  897. char *xfer)
  898. {
  899. struct geni_i2c_dev *gi2c = cb->userdata;
  900. if (cb->status & DM_I2C_CB_ERR) {
  901. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  902. "%s TCE Unexpected Err, stat:0x%x\n",
  903. xfer, cb->status);
  904. if (cb->status & (BIT(GP_IRQ1) << 5))
  905. geni_i2c_check_addr_data_nack(gi2c, gi2c->cur->flags);
  906. if (cb->status & (BIT(GP_IRQ3) << 5))
  907. geni_i2c_err(gi2c, I2C_BUS_PROTO);
  908. if (cb->status & (BIT(GP_IRQ4) << 5))
  909. geni_i2c_err(gi2c, I2C_ARB_LOST);
  910. }
  911. }
  912. /**
  913. * gi2c_gsi_tx_unmap() - unmap gi2c gsi tx message
  914. * @gi2c: Base address of the gi2c dev structure.
  915. * @msg_idx: gi2c message index.
  916. * @wr_idx: gi2c buffer write index.
  917. *
  918. * This function is used to unmap gi2c gsi tx messages.
  919. *
  920. * Return: None.
  921. */
  922. void gi2c_gsi_tx_unmap(struct geni_i2c_dev *gi2c, u32 msg_idx, u32 wr_idx)
  923. {
  924. if (gi2c->msgs[msg_idx].len > IMMEDIATE_DMA_LEN) {
  925. geni_se_common_iommu_unmap_buf(gi2c->wrapper_dev, &gi2c->tx_ph[wr_idx],
  926. gi2c->msgs[msg_idx].len, DMA_TO_DEVICE);
  927. i2c_put_dma_safe_msg_buf(gi2c->gsi_tx.dma_buf[wr_idx],
  928. &gi2c->msgs[msg_idx], !gi2c->err);
  929. }
  930. }
  931. /**
  932. * gi2c_gsi_tre_process() - Process received TRE's from GSI HW
  933. * @gi2c: Base address of the gi2c dev structure.
  934. * @num: number of messages count.
  935. *
  936. * This function is used to process received TRE's from GSI HW.
  937. * And also used for error case, it will clear and unmap all pending transfers.
  938. *
  939. * Return: None.
  940. */
  941. static void gi2c_gsi_tre_process(struct geni_i2c_dev *gi2c, int num)
  942. {
  943. u32 msg_xfer_cnt;
  944. int wr_idx = 0;
  945. unsigned long long start_time;
  946. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  947. gi2c->i2c_kpi);
  948. /* Error case we need to unmap all messages.
  949. * Regular working case unmapping only processed messages.
  950. */
  951. if (gi2c->err)
  952. msg_xfer_cnt = gi2c->gsi_tx.msg_cnt;
  953. else
  954. msg_xfer_cnt = atomic_read(&gi2c->gsi_tx.irq_cnt) * NUM_TRE_MSGS_PER_INTR;
  955. for (; gi2c->gsi_tx.unmap_cnt < msg_xfer_cnt; gi2c->gsi_tx.unmap_cnt++) {
  956. if (gi2c->gsi_tx.unmap_cnt == num) {
  957. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  958. "%s:last %d msg unmapped\n", __func__, num);
  959. break;
  960. }
  961. gi2c->gsi_tx.tre_freed_cnt++;
  962. if (gi2c->msgs[gi2c->gsi_tx.unmap_cnt].len > IMMEDIATE_DMA_LEN) {
  963. wr_idx = gi2c->gsi_tx.unmap_cnt % MAX_NUM_TRE_MSGS;
  964. gi2c_gsi_tx_unmap(gi2c, gi2c->gsi_tx.unmap_cnt, wr_idx);
  965. }
  966. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  967. "%s:unmap_cnt %d freed_cnt:%d wr_idx:%d\n",
  968. __func__, gi2c->gsi_tx.unmap_cnt, gi2c->gsi_tx.tre_freed_cnt, wr_idx);
  969. }
  970. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  971. gi2c->i2c_kpi, start_time, 0, 0);
  972. }
  973. static void gi2c_gsi_tx_cb(void *ptr)
  974. {
  975. struct msm_gpi_dma_async_tx_cb_param *tx_cb = ptr;
  976. struct geni_i2c_dev *gi2c;
  977. if (!(tx_cb && tx_cb->userdata)) {
  978. pr_err("%s: Invalid tx_cb buffer\n", __func__);
  979. return;
  980. }
  981. gi2c = tx_cb->userdata;
  982. gi2c_gsi_cb_err(tx_cb, "TX");
  983. atomic_inc(&gi2c->gsi_tx.irq_cnt);
  984. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  985. "%s:tx_cnt:%d gsi_err:%d gi2c_err:%d irq_cnt:%d\n",
  986. __func__, gi2c->gsi_tx.msg_cnt, gi2c->gsi_err, gi2c->err,
  987. atomic_read(&gi2c->gsi_tx.irq_cnt));
  988. complete_all(&gi2c->xfer);
  989. }
  990. static void gi2c_gsi_rx_cb(void *ptr)
  991. {
  992. struct msm_gpi_dma_async_tx_cb_param *rx_cb = ptr;
  993. struct geni_i2c_dev *gi2c;
  994. if (!(rx_cb && rx_cb->userdata)) {
  995. pr_err("%s: Invalid rx_cb buffer\n", __func__);
  996. return;
  997. }
  998. gi2c = rx_cb->userdata;
  999. if (!gi2c->cur) {
  1000. geni_i2c_err(gi2c, GENI_SPURIOUS_IRQ);
  1001. complete(&gi2c->xfer);
  1002. return;
  1003. }
  1004. if (gi2c->cur->flags & I2C_M_RD) {
  1005. gi2c_gsi_cb_err(rx_cb, "RX");
  1006. complete(&gi2c->xfer);
  1007. }
  1008. }
  1009. static int geni_i2c_gsi_request_channel(struct geni_i2c_dev *gi2c)
  1010. {
  1011. int ret = 0;
  1012. if (!gi2c->tx_c) {
  1013. gi2c->tx_c = dma_request_slave_channel(gi2c->dev, "tx");
  1014. if (!gi2c->tx_c) {
  1015. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1016. "tx dma req slv chan ret :%d\n", ret);
  1017. return -EIO;
  1018. }
  1019. }
  1020. if (!gi2c->rx_c) {
  1021. gi2c->rx_c = dma_request_slave_channel(gi2c->dev, "rx");
  1022. if (!gi2c->rx_c) {
  1023. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1024. "rx dma req slv chan ret :%d\n", ret);
  1025. dma_release_channel(gi2c->tx_c);
  1026. return -EIO;
  1027. }
  1028. }
  1029. gi2c->tx_ev.init.callback = gi2c_ev_cb;
  1030. gi2c->tx_ev.init.cb_param = gi2c;
  1031. gi2c->tx_ev.cmd = MSM_GPI_INIT;
  1032. gi2c->tx_c->private = &gi2c->tx_ev;
  1033. ret = dmaengine_slave_config(gi2c->tx_c, NULL);
  1034. if (ret) {
  1035. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1036. "tx dma slave config ret :%d\n", ret);
  1037. goto dmaengine_slave_config_fail;
  1038. }
  1039. gi2c->rx_ev.init.cb_param = gi2c;
  1040. gi2c->rx_ev.init.callback = gi2c_ev_cb;
  1041. gi2c->rx_ev.cmd = MSM_GPI_INIT;
  1042. gi2c->rx_c->private = &gi2c->rx_ev;
  1043. ret = dmaengine_slave_config(gi2c->rx_c, NULL);
  1044. if (ret) {
  1045. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1046. "rx dma slave config ret :%d\n", ret);
  1047. goto dmaengine_slave_config_fail;
  1048. }
  1049. gi2c->tx_cb.userdata = gi2c;
  1050. gi2c->rx_cb.userdata = gi2c;
  1051. gi2c->req_chan = true;
  1052. return ret;
  1053. dmaengine_slave_config_fail:
  1054. dma_release_channel(gi2c->tx_c);
  1055. dma_release_channel(gi2c->rx_c);
  1056. gi2c->tx_c = NULL;
  1057. gi2c->rx_c = NULL;
  1058. return ret;
  1059. }
  1060. static struct msm_gpi_tre *setup_lock_tre(struct geni_i2c_dev *gi2c)
  1061. {
  1062. struct msm_gpi_tre *lock_t = &gi2c->lock_t;
  1063. /* lock: chain bit set */
  1064. lock_t->dword[0] = MSM_GPI_LOCK_TRE_DWORD0;
  1065. lock_t->dword[1] = MSM_GPI_LOCK_TRE_DWORD1;
  1066. lock_t->dword[2] = MSM_GPI_LOCK_TRE_DWORD2;
  1067. /* ieob for le-vm and chain for shared se */
  1068. if (gi2c->is_shared)
  1069. lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 0, 1);
  1070. else if (gi2c->is_le_vm)
  1071. lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 1, 0);
  1072. return lock_t;
  1073. }
  1074. static struct msm_gpi_tre *setup_cfg0_tre(struct geni_i2c_dev *gi2c)
  1075. {
  1076. struct geni_i2c_clk_fld *itr;
  1077. struct msm_gpi_tre *cfg0_t = &gi2c->cfg0_t;
  1078. bool gsi_bei = false;
  1079. if (gi2c->gsi_tx.is_multi_descriptor)
  1080. gsi_bei = true;
  1081. if (gi2c->is_i2c_rtl_based)
  1082. itr = geni_i2c_hub_clk_map + gi2c->clk_fld_idx;
  1083. else
  1084. itr = geni_i2c_clk_map + gi2c->clk_fld_idx;
  1085. /* config0 */
  1086. cfg0_t->dword[0] = MSM_GPI_I2C_CONFIG0_TRE_DWORD0(I2C_PACK_EN,
  1087. itr->t_cycle, itr->t_high, itr->t_low);
  1088. cfg0_t->dword[1] = MSM_GPI_I2C_CONFIG0_TRE_DWORD1(0, 0);
  1089. cfg0_t->dword[2] = MSM_GPI_I2C_CONFIG0_TRE_DWORD2(0, itr->clk_div);
  1090. cfg0_t->dword[3] = MSM_GPI_I2C_CONFIG0_TRE_DWORD3(0, gsi_bei, 0, 0, 1);
  1091. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "cfg_tre 0x%x 0x%x 0x%x 0x%x\n",
  1092. cfg0_t->dword[0], cfg0_t->dword[1], cfg0_t->dword[2], cfg0_t->dword[3]);
  1093. return cfg0_t;
  1094. }
  1095. static struct msm_gpi_tre *setup_go_tre(struct geni_i2c_dev *gi2c,
  1096. struct i2c_msg msgs[], int i, int num)
  1097. {
  1098. struct msm_gpi_tre *go_t = &gi2c->go_t;
  1099. u8 op = (msgs[i].flags & I2C_M_RD) ? 2 : 1;
  1100. int stretch = (i < (num - 1));
  1101. bool gsi_bei = false;
  1102. if (gi2c->gsi_tx.is_multi_descriptor)
  1103. gsi_bei = true;
  1104. go_t->dword[0] = MSM_GPI_I2C_GO_TRE_DWORD0((stretch << 2),
  1105. msgs[i].addr, op);
  1106. go_t->dword[1] = MSM_GPI_I2C_GO_TRE_DWORD1;
  1107. if (msgs[i].flags & I2C_M_RD) {
  1108. go_t->dword[2] = MSM_GPI_I2C_GO_TRE_DWORD2(msgs[i].len);
  1109. /*
  1110. * For Rx Go tre: Set ieob for non-shared se and for all
  1111. * but last transfer in shared se
  1112. */
  1113. if (!gi2c->is_shared || (gi2c->is_shared && i != num-1))
  1114. go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0, 0, 1, 0);
  1115. else
  1116. go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0, 0, 0, 0);
  1117. } else {
  1118. /* For Tx Go tre: ieob is not set, chain bit is set */
  1119. go_t->dword[2] = MSM_GPI_I2C_GO_TRE_DWORD2(0);
  1120. go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(0, gsi_bei, 0, 0, 1);
  1121. }
  1122. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "go_tre 0x%x 0x%x 0x%x 0x%x\n",
  1123. go_t->dword[0], go_t->dword[1], go_t->dword[2], go_t->dword[3]);
  1124. return go_t;
  1125. }
  1126. static struct msm_gpi_tre *setup_rx_tre(struct geni_i2c_dev *gi2c,
  1127. struct i2c_msg msgs[], int i, int num)
  1128. {
  1129. struct msm_gpi_tre *rx_t = &gi2c->rx_t;
  1130. rx_t->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->rx_ph);
  1131. rx_t->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(gi2c->rx_ph);
  1132. rx_t->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(msgs[i].len);
  1133. /* Set ieot for all Rx/Tx DMA tres */
  1134. rx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 0);
  1135. return rx_t;
  1136. }
  1137. void geni_i2c_get_immediate_dma_data(u8 *dword, int len, uint8_t *buf)
  1138. {
  1139. int i = 0;
  1140. for (i = 0; i < len; i++)
  1141. dword[i] = buf[i];
  1142. }
  1143. static struct msm_gpi_tre *setup_tx_tre(struct geni_i2c_dev *gi2c,
  1144. struct i2c_msg msgs[], int i, int num, bool *gsi_bei, int wr_idx)
  1145. {
  1146. struct msm_gpi_tre *tx_t = &gi2c->tx_t;
  1147. bool is_immediate_dma = false;
  1148. if (msgs[i].len <= IMMEDIATE_DMA_LEN)
  1149. is_immediate_dma = true;
  1150. if (gi2c->gsi_tx.is_multi_descriptor) {
  1151. if ((i + 1) % NUM_TRE_MSGS_PER_INTR)
  1152. *gsi_bei = true;
  1153. else
  1154. *gsi_bei = false;
  1155. /* BEI bit to be cleared for last TRE. */
  1156. if (i == (num - 1))
  1157. *gsi_bei = false;
  1158. }
  1159. if (is_immediate_dma) {
  1160. /* dowrd[0], dword[1] filled as per data length */
  1161. tx_t->dword[0] = 0;
  1162. tx_t->dword[1] = 0;
  1163. geni_i2c_get_immediate_dma_data((uint8_t *)&tx_t->dword[0],
  1164. msgs[i].len, msgs[i].buf);
  1165. tx_t->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(msgs[i].len);
  1166. if (gi2c->is_shared && i == (num - 1))
  1167. /*
  1168. * For Tx: unlock tre is send for last transfer
  1169. * so set chain bit for last transfer DMA tre.
  1170. */
  1171. tx_t->dword[3] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(0, 0, 1, 0, 1);
  1172. else
  1173. tx_t->dword[3] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(0, *gsi_bei, 1, 0, 0);
  1174. } else {
  1175. tx_t->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->tx_ph[wr_idx]);
  1176. tx_t->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(gi2c->tx_ph[wr_idx]);
  1177. tx_t->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(msgs[i].len);
  1178. if (gi2c->is_shared && (i == num - 1))
  1179. /*
  1180. * For Tx: unlock tre is send for last transfer
  1181. * so set chain bit for last transfer DMA tre.
  1182. */
  1183. tx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 1);
  1184. else
  1185. tx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, *gsi_bei, 1, 0, 0);
  1186. }
  1187. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "tx_tre 0x%x 0x%x 0x%x 0x%x imm_dma:%d bei:%d\n",
  1188. tx_t->dword[0], tx_t->dword[1], tx_t->dword[2], tx_t->dword[3],
  1189. is_immediate_dma, *gsi_bei);
  1190. return tx_t;
  1191. }
  1192. static struct msm_gpi_tre *setup_unlock_tre(struct geni_i2c_dev *gi2c)
  1193. {
  1194. struct msm_gpi_tre *unlock_t = &gi2c->unlock_t;
  1195. /* unlock tre: ieob set */
  1196. unlock_t->dword[0] = MSM_GPI_UNLOCK_TRE_DWORD0;
  1197. unlock_t->dword[1] = MSM_GPI_UNLOCK_TRE_DWORD1;
  1198. unlock_t->dword[2] = MSM_GPI_UNLOCK_TRE_DWORD2;
  1199. unlock_t->dword[3] = MSM_GPI_UNLOCK_TRE_DWORD3(0, 0, 0, 1, 0);
  1200. return unlock_t;
  1201. }
  1202. static struct dma_async_tx_descriptor *geni_i2c_prep_desc
  1203. (struct geni_i2c_dev *gi2c, struct dma_chan *chan, int segs, bool tx_chan)
  1204. {
  1205. struct dma_async_tx_descriptor *geni_desc = NULL;
  1206. if (tx_chan) {
  1207. geni_desc = dmaengine_prep_slave_sg(gi2c->tx_c, gi2c->tx_sg,
  1208. segs, DMA_MEM_TO_DEV,
  1209. (DMA_PREP_INTERRUPT | DMA_CTRL_ACK));
  1210. if (!geni_desc) {
  1211. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1212. "prep_slave_sg for tx failed\n");
  1213. gi2c->err = -ENOMEM;
  1214. return NULL;
  1215. }
  1216. geni_desc->callback = gi2c_gsi_tx_cb;
  1217. geni_desc->callback_param = &gi2c->tx_cb;
  1218. } else {
  1219. geni_desc = dmaengine_prep_slave_sg(gi2c->rx_c,
  1220. gi2c->rx_sg, 1, DMA_DEV_TO_MEM,
  1221. (DMA_PREP_INTERRUPT | DMA_CTRL_ACK));
  1222. if (!geni_desc) {
  1223. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1224. "prep_slave_sg for rx failed\n");
  1225. gi2c->err = -ENOMEM;
  1226. return NULL;
  1227. }
  1228. geni_desc->callback = gi2c_gsi_rx_cb;
  1229. geni_desc->callback_param = &gi2c->rx_cb;
  1230. }
  1231. return geni_desc;
  1232. }
  1233. static int geni_i2c_lock_bus(struct geni_i2c_dev *gi2c)
  1234. {
  1235. struct msm_gpi_tre *lock_t = NULL;
  1236. int ret = 0, timeout = 0;
  1237. dma_cookie_t tx_cookie;
  1238. bool tx_chan = true;
  1239. unsigned long long start_time;
  1240. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1241. gi2c->i2c_kpi);
  1242. if (!gi2c->req_chan) {
  1243. ret = geni_i2c_gsi_request_channel(gi2c);
  1244. if (ret)
  1245. return ret;
  1246. }
  1247. lock_t = setup_lock_tre(gi2c);
  1248. sg_init_table(gi2c->tx_sg, 1);
  1249. sg_set_buf(&gi2c->tx_sg[0], lock_t,
  1250. sizeof(gi2c->lock_t));
  1251. gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, 1, tx_chan);
  1252. if (!gi2c->tx_desc) {
  1253. gi2c->err = -ENOMEM;
  1254. goto geni_i2c_err_lock_bus;
  1255. }
  1256. reinit_completion(&gi2c->xfer);
  1257. /* Issue TX */
  1258. tx_cookie = dmaengine_submit(gi2c->tx_desc);
  1259. if (dma_submit_error(tx_cookie)) {
  1260. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1261. "%s: dmaengine_submit failed (%d)\n", __func__, tx_cookie);
  1262. gi2c->err = -EINVAL;
  1263. goto geni_i2c_err_lock_bus;
  1264. }
  1265. dma_async_issue_pending(gi2c->tx_c);
  1266. timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
  1267. if (!timeout) {
  1268. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  1269. "%s timedout\n", __func__);
  1270. geni_i2c_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base,
  1271. gi2c->ipcl);
  1272. gi2c->err = -ETIMEDOUT;
  1273. goto geni_i2c_err_lock_bus;
  1274. }
  1275. return 0;
  1276. geni_i2c_err_lock_bus:
  1277. if (gi2c->err) {
  1278. dmaengine_terminate_all(gi2c->tx_c);
  1279. gi2c->cfg_sent = 0;
  1280. }
  1281. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1282. gi2c->i2c_kpi, start_time, 0, 0);
  1283. return gi2c->err;
  1284. }
  1285. static void geni_i2c_unlock_bus(struct geni_i2c_dev *gi2c)
  1286. {
  1287. struct msm_gpi_tre *unlock_t = NULL;
  1288. int timeout = 0;
  1289. dma_cookie_t tx_cookie;
  1290. bool tx_chan = true;
  1291. unsigned long long start_time;
  1292. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1293. gi2c->i2c_kpi);
  1294. /* if gpi reset happened for levm, no need to do unlock */
  1295. if (gi2c->is_le_vm && gi2c->le_gpi_reset_done) {
  1296. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1297. "%s:gpi reset happened for levm, no need to do unlock\n", __func__);
  1298. return;
  1299. }
  1300. unlock_t = setup_unlock_tre(gi2c);
  1301. sg_init_table(gi2c->tx_sg, 1);
  1302. sg_set_buf(&gi2c->tx_sg[0], unlock_t,
  1303. sizeof(gi2c->unlock_t));
  1304. gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, 1, tx_chan);
  1305. if (!gi2c->tx_desc) {
  1306. gi2c->err = -ENOMEM;
  1307. goto geni_i2c_err_unlock_bus;
  1308. }
  1309. reinit_completion(&gi2c->xfer);
  1310. /* Issue TX */
  1311. tx_cookie = dmaengine_submit(gi2c->tx_desc);
  1312. if (dma_submit_error(tx_cookie)) {
  1313. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1314. "%s: dmaengine_submit failed (%d)\n", __func__, tx_cookie);
  1315. gi2c->err = -EINVAL;
  1316. goto geni_i2c_err_unlock_bus;
  1317. }
  1318. dma_async_issue_pending(gi2c->tx_c);
  1319. timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
  1320. if (!timeout) {
  1321. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  1322. "%s failed\n", __func__);
  1323. geni_i2c_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base,
  1324. gi2c->ipcl);
  1325. gi2c->err = -ETIMEDOUT;
  1326. goto geni_i2c_err_unlock_bus;
  1327. }
  1328. geni_i2c_err_unlock_bus:
  1329. if (gi2c->err) {
  1330. dmaengine_terminate_all(gi2c->tx_c);
  1331. gi2c->cfg_sent = 0;
  1332. gi2c->err = 0;
  1333. }
  1334. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1335. gi2c->i2c_kpi, start_time, 0, 0);
  1336. }
  1337. /**
  1338. * geni_i2c_gsi_tx_tre_optimization() - Process received TRE's from GSI HW
  1339. * @gi2c: Base address of the gi2c dev structure.
  1340. * @num: number of messages count.
  1341. * @msg_idx: gi2c message index.
  1342. * @wr_idx: gi2c buffer write index.
  1343. *
  1344. * This function is used to optimize dma tre's, it keeps always HW busy.
  1345. *
  1346. * Return: Returning timeout value
  1347. */
  1348. static int geni_i2c_gsi_tx_tre_optimization(struct geni_i2c_dev *gi2c, u32 num, u32 msg_idx,
  1349. u32 wr_idx)
  1350. {
  1351. int timeout = 1, i;
  1352. int max_irq_cnt;
  1353. unsigned long long start_time;
  1354. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1355. gi2c->i2c_kpi);
  1356. max_irq_cnt = num / NUM_TRE_MSGS_PER_INTR;
  1357. if (num % NUM_TRE_MSGS_PER_INTR)
  1358. max_irq_cnt++;
  1359. /**
  1360. * if it's last message, waiting for all pending tre's
  1361. * including last submitted tre as well.
  1362. */
  1363. if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared) {
  1364. for (i = 0; i < max_irq_cnt; i++) {
  1365. if (max_irq_cnt != atomic_read(&gi2c->gsi_tx.irq_cnt)) {
  1366. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1367. "%s: calling wait for_completion %d\n", __func__, i);
  1368. timeout = wait_for_completion_timeout(&gi2c->xfer,
  1369. gi2c->xfer_timeout);
  1370. reinit_completion(&gi2c->xfer);
  1371. if (!timeout) {
  1372. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1373. "%s: msg xfer timeout\n", __func__);
  1374. return timeout;
  1375. }
  1376. }
  1377. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1378. "%s: maxirq_cnt:%d i:%d\n", __func__, max_irq_cnt, i);
  1379. gi2c_gsi_tre_process(gi2c, num);
  1380. if (num > gi2c->gsi_tx.msg_cnt)
  1381. return timeout;
  1382. }
  1383. } else {
  1384. /**
  1385. * For shared SE and num of msgs < MAX_NUM_TRE_MSGS,
  1386. * go with regular approach
  1387. */
  1388. timeout = wait_for_completion_timeout(&gi2c->xfer, gi2c->xfer_timeout);
  1389. reinit_completion(&gi2c->xfer);
  1390. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1391. "%s: msg_idx:%d wr_idx:%d\n", __func__, msg_idx, wr_idx);
  1392. /* if tre processed without errors doing unmap here */
  1393. if (timeout && !gi2c->err)
  1394. gi2c_gsi_tx_unmap(gi2c, msg_idx, wr_idx);
  1395. }
  1396. /* process received tre's */
  1397. if (timeout) {
  1398. if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared)
  1399. gi2c_gsi_tre_process(gi2c, num);
  1400. }
  1401. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1402. "%s: timeout :%d\n", __func__, timeout);
  1403. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1404. gi2c->i2c_kpi, start_time, 0, 0);
  1405. return timeout;
  1406. }
  1407. /**
  1408. * geni_i2c_calc_xfer_time() - Caluclate transfer time
  1409. * @gi2c:geni i2c structure as a pointer
  1410. * @msgs[]: i2c_msg structure as a pointer
  1411. * @start_time: start time of the function
  1412. * @msg_idx: gi2c message index.
  1413. * @func: for which function kpi capture is used.
  1414. *
  1415. * Return: None.
  1416. */
  1417. static void geni_i2c_calc_xfer_time(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
  1418. unsigned long long start_time, u32 msg_idx, const char *func)
  1419. {
  1420. char fname[32];
  1421. if (msgs[msg_idx].flags & I2C_M_RD)
  1422. scnprintf(fname, sizeof(fname), "%s%s", func, "_rd");
  1423. else
  1424. scnprintf(fname, sizeof(fname), "%s%s", func, "_wr");
  1425. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, fname, gi2c->i2c_kpi,
  1426. start_time, msgs[msg_idx].len, gi2c->clk_freq_out);
  1427. }
  1428. static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
  1429. int num)
  1430. {
  1431. struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
  1432. u32 i = 0;
  1433. int ret = 0, timeout = 0;
  1434. struct msm_gpi_tre *lock_t = NULL;
  1435. struct msm_gpi_tre *unlock_t = NULL;
  1436. struct msm_gpi_tre *cfg0_t = NULL;
  1437. u8 *rd_dma_buf = NULL;
  1438. u8 op;
  1439. int segs;
  1440. u32 index = 0, wr_idx = 0;
  1441. dma_cookie_t tx_cookie, rx_cookie;
  1442. struct msm_gpi_tre *go_t = NULL;
  1443. struct msm_gpi_tre *rx_t = NULL;
  1444. struct msm_gpi_tre *tx_t = NULL;
  1445. bool tx_chan = true;
  1446. bool gsi_bei = false;
  1447. unsigned long long start_time;
  1448. unsigned long long start_time_xfer = sched_clock();
  1449. gi2c->gsi_err = false;
  1450. if (!gi2c->req_chan) {
  1451. ret = geni_i2c_gsi_request_channel(gi2c);
  1452. if (ret)
  1453. return ret;
  1454. }
  1455. if (gi2c->is_le_vm && gi2c->le_gpi_reset_done) {
  1456. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1457. "%s doing gsi lock, due to levm gsi reset\n", __func__);
  1458. ret = geni_i2c_lock_bus(gi2c);
  1459. if (ret) {
  1460. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1461. "%s lock bus failed: %d\n", __func__, ret);
  1462. return ret;
  1463. }
  1464. gi2c->le_gpi_reset_done = false;
  1465. }
  1466. if (gi2c->is_shared) {
  1467. lock_t = setup_lock_tre(gi2c);
  1468. unlock_t = setup_unlock_tre(gi2c);
  1469. }
  1470. gi2c->gsi_tx.is_multi_descriptor = false;
  1471. /* if num of msgs more than 4 checking for multi descriptor mode */
  1472. if (num >= 4) {
  1473. gi2c->gsi_tx.is_multi_descriptor = true;
  1474. /* assumes multi descriptor supports only for continuous writes */
  1475. for (i = 0; i < num; i++)
  1476. if (msgs[i].flags & I2C_M_RD)
  1477. gi2c->gsi_tx.is_multi_descriptor = false;
  1478. }
  1479. if (!gi2c->cfg_sent)
  1480. cfg0_t = setup_cfg0_tre(gi2c);
  1481. gi2c->msgs = msgs;
  1482. gi2c->gsi_tx.msg_cnt = 0;
  1483. gi2c->gsi_tx.unmap_cnt = 0;
  1484. gi2c->gsi_tx.tre_freed_cnt = 0;
  1485. atomic_set(&gi2c->gsi_tx.irq_cnt, 0);
  1486. reinit_completion(&gi2c->xfer);
  1487. for (i = 0; i < num; i++) {
  1488. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1489. gi2c->i2c_kpi);
  1490. op = (msgs[i].flags & I2C_M_RD) ? 2 : 1;
  1491. segs = 3 - op;
  1492. index = 0;
  1493. /**
  1494. * sometimes all tre's may process without
  1495. * waiting for timer thread, so declared
  1496. * timeout is non-zero value;
  1497. */
  1498. timeout = 1;
  1499. gi2c->cur = &msgs[i];
  1500. if (!gi2c->cur) {
  1501. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1502. "%s: Invalid buffer\n", __func__);
  1503. ret = -ENOMEM;
  1504. goto geni_i2c_gsi_xfer_out;
  1505. }
  1506. qcom_geni_i2c_calc_timeout(gi2c);
  1507. if (!gi2c->cfg_sent)
  1508. segs++;
  1509. if (gi2c->is_shared && (i == 0 || i == num-1)) {
  1510. segs++;
  1511. if (num == 1)
  1512. segs++;
  1513. sg_init_table(gi2c->tx_sg, segs);
  1514. if (i == 0)
  1515. /* Send lock tre for first transfer in a msg */
  1516. sg_set_buf(&gi2c->tx_sg[index++], lock_t,
  1517. sizeof(gi2c->lock_t));
  1518. } else {
  1519. sg_init_table(gi2c->tx_sg, segs);
  1520. }
  1521. /* Send cfg tre when cfg not sent already */
  1522. if (!gi2c->cfg_sent) {
  1523. sg_set_buf(&gi2c->tx_sg[index++], cfg0_t,
  1524. sizeof(gi2c->cfg0_t));
  1525. gi2c->cfg_sent = 1;
  1526. }
  1527. go_t = setup_go_tre(gi2c, msgs, i, num);
  1528. sg_set_buf(&gi2c->tx_sg[index++], go_t, sizeof(gi2c->go_t));
  1529. if (msgs[i].flags & I2C_M_RD) {
  1530. reinit_completion(&gi2c->xfer);
  1531. rd_dma_buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
  1532. if (!rd_dma_buf) {
  1533. ret = -ENOMEM;
  1534. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1535. "i2c_get_dma_safe_msg_buf failed :%d\n",
  1536. ret);
  1537. goto geni_i2c_gsi_xfer_out;
  1538. }
  1539. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1540. "msg[%d].len:%d R\n", i, gi2c->cur->len);
  1541. sg_init_table(gi2c->rx_sg, 1);
  1542. ret = geni_se_common_iommu_map_buf(gi2c->wrapper_dev,
  1543. &gi2c->rx_ph,
  1544. rd_dma_buf,
  1545. msgs[i].len,
  1546. DMA_FROM_DEVICE);
  1547. if (ret) {
  1548. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1549. "geni_se_common_iommu_map_buf for rx failed :%d\n", ret);
  1550. i2c_put_dma_safe_msg_buf(rd_dma_buf, &msgs[i], false);
  1551. goto geni_i2c_gsi_xfer_out;
  1552. } else if (gi2c->dbg_buf_ptr) {
  1553. gi2c->dbg_buf_ptr[i].virt_buf =
  1554. (void *)rd_dma_buf;
  1555. gi2c->dbg_buf_ptr[i].map_buf =
  1556. (void *)&gi2c->rx_ph;
  1557. }
  1558. rx_t = setup_rx_tre(gi2c, msgs, i, num);
  1559. sg_set_buf(gi2c->rx_sg, rx_t,
  1560. sizeof(gi2c->rx_t));
  1561. gi2c->rx_desc =
  1562. geni_i2c_prep_desc(gi2c, gi2c->rx_c, segs, !tx_chan);
  1563. if (!gi2c->rx_desc) {
  1564. gi2c->err = -ENOMEM;
  1565. goto geni_i2c_err_prep_sg;
  1566. }
  1567. /* Issue RX */
  1568. rx_cookie = dmaengine_submit(gi2c->rx_desc);
  1569. if (dma_submit_error(rx_cookie)) {
  1570. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1571. "%s: dmaengine_submit failed (%d)\n", __func__, rx_cookie);
  1572. gi2c->err = -EINVAL;
  1573. goto geni_i2c_err_prep_sg;
  1574. }
  1575. dma_async_issue_pending(gi2c->rx_c);
  1576. /* submit config/go tre through tx channel */
  1577. if (gi2c->is_shared && (i == (num - 1))) {
  1578. /* Send unlock tre at the end of last transfer */
  1579. sg_set_buf(&gi2c->tx_sg[index++],
  1580. unlock_t, sizeof(gi2c->unlock_t));
  1581. }
  1582. gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, segs, tx_chan);
  1583. if (!gi2c->tx_desc) {
  1584. gi2c->err = -ENOMEM;
  1585. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1586. "geni_i2c_prep_desc failed\n");
  1587. goto geni_i2c_err_prep_sg;
  1588. }
  1589. /* Issue TX */
  1590. tx_cookie = dmaengine_submit(gi2c->tx_desc);
  1591. if (dma_submit_error(tx_cookie)) {
  1592. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1593. "%s: dmaengine_submit failed (%d)\n",
  1594. __func__, tx_cookie);
  1595. gi2c->err = -EINVAL;
  1596. goto geni_i2c_err_prep_sg;
  1597. }
  1598. dma_async_issue_pending(gi2c->tx_c);
  1599. timeout = wait_for_completion_timeout(&gi2c->xfer,
  1600. gi2c->xfer_timeout);
  1601. } else {
  1602. if (msgs[i].len > IMMEDIATE_DMA_LEN) {
  1603. gi2c->gsi_tx.dma_buf[wr_idx] =
  1604. i2c_get_dma_safe_msg_buf(&msgs[i], 1);
  1605. if (!gi2c->gsi_tx.dma_buf[wr_idx]) {
  1606. ret = -ENOMEM;
  1607. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1608. "i2c_get_dma_safe_msg_buf failed :%d\n", ret);
  1609. goto geni_i2c_gsi_xfer_out;
  1610. }
  1611. ret = geni_se_common_iommu_map_buf(gi2c->wrapper_dev,
  1612. &gi2c->tx_ph[wr_idx],
  1613. gi2c->gsi_tx.dma_buf[wr_idx],
  1614. msgs[i].len, DMA_TO_DEVICE);
  1615. if (ret) {
  1616. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1617. "geni iommu_map_buf for tx failed :%d\n", ret);
  1618. i2c_put_dma_safe_msg_buf(gi2c->gsi_tx.dma_buf[wr_idx],
  1619. &msgs[i], false);
  1620. goto geni_i2c_gsi_xfer_out;
  1621. } else if (gi2c->dbg_buf_ptr) {
  1622. gi2c->dbg_buf_ptr[wr_idx].virt_buf =
  1623. (void *)gi2c->gsi_tx.dma_buf[wr_idx];
  1624. gi2c->dbg_buf_ptr[wr_idx].map_buf =
  1625. (void *)&gi2c->tx_ph[wr_idx];
  1626. }
  1627. }
  1628. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1629. "msg[%d].len:%d W cnt:%d idx:%d\n",
  1630. i, gi2c->cur->len, gi2c->gsi_tx.msg_cnt, wr_idx);
  1631. tx_t = setup_tx_tre(gi2c, msgs, i, num, &gsi_bei, wr_idx);
  1632. sg_set_buf(&gi2c->tx_sg[index++], tx_t, sizeof(gi2c->tx_t));
  1633. if (gi2c->is_shared && (i == (num - 1))) {
  1634. /* Send unlock tre at the end of last transfer */
  1635. sg_set_buf(&gi2c->tx_sg[index++],
  1636. unlock_t, sizeof(gi2c->unlock_t));
  1637. }
  1638. gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, segs, tx_chan);
  1639. if (!gi2c->tx_desc) {
  1640. gi2c->err = -ENOMEM;
  1641. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1642. "geni_i2c_prep_desc failed\n");
  1643. goto geni_i2c_err_prep_sg;
  1644. }
  1645. /* we don't need call back if bei bit is set */
  1646. if (gsi_bei) {
  1647. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1648. "geni tx desc call back null %d\n", i);
  1649. gi2c->tx_desc->callback = NULL;
  1650. gi2c->tx_desc->callback_param = NULL;
  1651. }
  1652. gi2c->gsi_tx.msg_cnt++;
  1653. wr_idx = (i + 1) % MAX_NUM_TRE_MSGS;
  1654. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1655. "tx_cnt:%d", gi2c->gsi_tx.msg_cnt);
  1656. /* Issue TX */
  1657. tx_cookie = dmaengine_submit(gi2c->tx_desc);
  1658. if (dma_submit_error(tx_cookie)) {
  1659. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1660. "%s: dmaengine_submit failed (%d)\n",
  1661. __func__, tx_cookie);
  1662. gi2c->err = -EINVAL;
  1663. goto geni_i2c_err_prep_sg;
  1664. }
  1665. dma_async_issue_pending(gi2c->tx_c);
  1666. /**
  1667. * if it's not last message, submitting MAX_NUM_TRE_MSGS
  1668. * continuously without waiting, in b/w if any one of the
  1669. * tre is received processing and queuing next tre.
  1670. */
  1671. if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared &&
  1672. (i != (num - 1)) &&
  1673. (gi2c->gsi_tx.msg_cnt < MAX_NUM_TRE_MSGS + gi2c->gsi_tx.tre_freed_cnt))
  1674. continue;
  1675. timeout = geni_i2c_gsi_tx_tre_optimization(gi2c, num, i, wr_idx - 1);
  1676. }
  1677. if (!timeout) {
  1678. u32 geni_ios = 0;
  1679. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1680. "I2C gsi xfer timeout:%u flags:%d addr:0x%x\n",
  1681. gi2c->xfer_timeout, gi2c->cur->flags,
  1682. gi2c->cur->addr);
  1683. geni_i2c_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base,
  1684. gi2c->ipcl);
  1685. gi2c->err = -ETIMEDOUT;
  1686. /* WAR: Set flag to mark cancel pending if IOS stuck */
  1687. geni_ios = geni_read_reg(gi2c->base, SE_GENI_IOS);
  1688. if ((geni_ios & 0x3) != 0x3) { //SCL:b'1, SDA:b'0
  1689. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  1690. "%s: IO lines not in good state\n",
  1691. __func__);
  1692. /* doing pending cancel only rtl based SE's */
  1693. if (gi2c->is_i2c_rtl_based) {
  1694. gi2c->prev_cancel_pending = true;
  1695. goto geni_i2c_gsi_cancel_pending;
  1696. }
  1697. }
  1698. }
  1699. geni_i2c_err_prep_sg:
  1700. if (gi2c->err || gi2c->gsi_err) {
  1701. ret = dmaengine_terminate_all(gi2c->tx_c);
  1702. if (ret)
  1703. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  1704. "%s: gpi terminate failed ret:%d\n", __func__, ret);
  1705. gi2c->cfg_sent = 0;
  1706. if (gi2c->is_le_vm)
  1707. gi2c->le_gpi_reset_done = true;
  1708. }
  1709. if (gi2c->gsi_err) {
  1710. /* if i2c error already present, no need to update error values */
  1711. if (!gi2c->err) {
  1712. gi2c->err = -EIO;
  1713. ret = gi2c->err;
  1714. }
  1715. gi2c->gsi_err = false;
  1716. }
  1717. if (gi2c->is_shared)
  1718. /* Resend cfg tre for every new message on shared se */
  1719. gi2c->cfg_sent = 0;
  1720. geni_i2c_gsi_cancel_pending:
  1721. if (msgs[i].flags & I2C_M_RD) {
  1722. geni_se_common_iommu_unmap_buf(gi2c->wrapper_dev, &gi2c->rx_ph,
  1723. msgs[i].len, DMA_FROM_DEVICE);
  1724. i2c_put_dma_safe_msg_buf(rd_dma_buf, &msgs[i], !gi2c->err);
  1725. } else if (gi2c->err) {
  1726. /* for multi descriptor unmap all submitted tre's */
  1727. if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared)
  1728. gi2c_gsi_tre_process(gi2c, num);
  1729. else
  1730. gi2c_gsi_tx_unmap(gi2c, i, wr_idx - 1);
  1731. }
  1732. if (gi2c->err)
  1733. goto geni_i2c_gsi_xfer_out;
  1734. geni_i2c_calc_xfer_time(gi2c, msgs, start_time, i, __func__);
  1735. }
  1736. geni_i2c_gsi_xfer_out:
  1737. if (!ret && gi2c->err)
  1738. ret = gi2c->err;
  1739. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1740. "%s Time took for %d xfers = %llu nsecs\n",
  1741. __func__, num, (sched_clock() - start_time_xfer));
  1742. return ret;
  1743. }
  1744. /**
  1745. * geni_i2c_execute_xfer() - Performs non GSI mode data transfer
  1746. * @adap: Master controller handle
  1747. * @msgs[]: i2c_msg structure as a pointer
  1748. * @num: Nos messages to sent as an arg.
  1749. *
  1750. * Return: 0 on success OR negative error code for failure.
  1751. */
  1752. static int geni_i2c_execute_xfer(struct geni_i2c_dev *gi2c,
  1753. struct i2c_msg msgs[], int num)
  1754. {
  1755. int i, ret = 0, timeout = 0;
  1756. u32 geni_ios = 0;
  1757. unsigned long long start_time;
  1758. unsigned long long start_time_xfer = sched_clock();
  1759. for (i = 0; i < num; i++) {
  1760. int stretch = (i < (num - 1));
  1761. u32 m_param = 0;
  1762. u32 m_cmd = 0;
  1763. u8 *dma_buf = NULL;
  1764. dma_addr_t tx_dma = 0;
  1765. dma_addr_t rx_dma = 0;
  1766. enum geni_se_xfer_mode mode = GENI_SE_FIFO;
  1767. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1768. gi2c->i2c_kpi);
  1769. reinit_completion(&gi2c->xfer);
  1770. m_param |= (stretch ? STOP_STRETCH : 0);
  1771. m_param |= ((msgs[i].addr & 0x7F) << SLV_ADDR_SHFT);
  1772. gi2c->cur = &msgs[i];
  1773. if (!gi2c->cur) {
  1774. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1775. "%s: Invalid buffer\n", __func__);
  1776. ret = -ENOMEM;
  1777. goto geni_i2c_execute_xfer_exit;
  1778. }
  1779. qcom_geni_i2c_calc_timeout(gi2c);
  1780. if (!gi2c->is_i2c_hub)
  1781. mode = msgs[i].len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
  1782. else
  1783. mode = GENI_SE_FIFO; /* i2c hub has only FIFO mode */
  1784. geni_se_select_mode(&gi2c->i2c_rsc, mode);
  1785. if (mode == GENI_SE_DMA) {
  1786. dma_buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
  1787. if (!dma_buf) {
  1788. ret = -ENOMEM;
  1789. goto geni_i2c_execute_xfer_exit;
  1790. }
  1791. }
  1792. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1793. "%s: stretch:%d, m_param:0x%x\n",
  1794. __func__, stretch, m_param);
  1795. if (msgs[i].flags & I2C_M_RD) {
  1796. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1797. "msgs[%d].len:%d R\n", i, gi2c->cur->len);
  1798. geni_write_reg(msgs[i].len,
  1799. gi2c->base, SE_I2C_RX_TRANS_LEN);
  1800. m_cmd = I2C_READ;
  1801. geni_se_setup_m_cmd(&gi2c->i2c_rsc, m_cmd, m_param);
  1802. if (mode == GENI_SE_DMA) {
  1803. ret = geni_se_rx_dma_prep(&gi2c->i2c_rsc,
  1804. dma_buf, msgs[i].len,
  1805. &rx_dma);
  1806. if (ret) {
  1807. i2c_put_dma_safe_msg_buf(dma_buf,
  1808. &msgs[i], false);
  1809. mode = GENI_SE_FIFO;
  1810. geni_se_select_mode(&gi2c->i2c_rsc,
  1811. mode);
  1812. } else if (gi2c->dbg_buf_ptr) {
  1813. gi2c->dbg_buf_ptr[i].virt_buf =
  1814. (void *)dma_buf;
  1815. gi2c->dbg_buf_ptr[i].map_buf =
  1816. (void *)&rx_dma;
  1817. }
  1818. }
  1819. } else {
  1820. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1821. "msgs[%d].len:%d W\n", i, gi2c->cur->len);
  1822. geni_write_reg(msgs[i].len, gi2c->base,
  1823. SE_I2C_TX_TRANS_LEN);
  1824. m_cmd = I2C_WRITE;
  1825. geni_se_setup_m_cmd(&gi2c->i2c_rsc, m_cmd, m_param);
  1826. if (mode == GENI_SE_DMA) {
  1827. ret = geni_se_tx_dma_prep(&gi2c->i2c_rsc,
  1828. dma_buf, msgs[i].len,
  1829. &tx_dma);
  1830. if (ret) {
  1831. i2c_put_dma_safe_msg_buf(dma_buf,
  1832. &msgs[i], false);
  1833. mode = GENI_SE_FIFO;
  1834. geni_se_select_mode(&gi2c->i2c_rsc,
  1835. mode);
  1836. } else if (gi2c->dbg_buf_ptr) {
  1837. gi2c->dbg_buf_ptr[i].virt_buf =
  1838. (void *)dma_buf;
  1839. gi2c->dbg_buf_ptr[i].map_buf =
  1840. (void *)&tx_dma;
  1841. }
  1842. }
  1843. if (mode == GENI_SE_FIFO) /* Get FIFO IRQ */
  1844. geni_write_reg(1, gi2c->base,
  1845. SE_GENI_TX_WATERMARK_REG);
  1846. }
  1847. /* Ensure FIFO write go through before waiting for Done evet */
  1848. mb();
  1849. timeout = wait_for_completion_timeout(&gi2c->xfer,
  1850. gi2c->xfer_timeout);
  1851. if (!timeout) {
  1852. u32 geni_ios = 0;
  1853. u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
  1854. /* clearing tx water mark and m_irq_status during delayed irq */
  1855. writel_relaxed(0, (gi2c->base + SE_GENI_TX_WATERMARK_REG));
  1856. if (m_stat)
  1857. writel_relaxed(m_stat, gi2c->base + SE_GENI_M_IRQ_CLEAR);
  1858. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1859. "I2C xfer timeout: %d\n", gi2c->xfer_timeout);
  1860. geni_i2c_err(gi2c, GENI_TIMEOUT);
  1861. /* WAR: Set flag to mark cancel pending if IOS bad */
  1862. geni_ios = geni_read_reg(gi2c->base, SE_GENI_IOS);
  1863. if ((geni_ios & 0x3) != 0x3) { //SCL:b'1, SDA:b'0
  1864. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  1865. "%s: IO lines not good: 0x%x\n",
  1866. __func__, geni_ios);
  1867. /* doing pending cancel only rtl based SE's */
  1868. if (gi2c->is_i2c_rtl_based) {
  1869. gi2c->prev_cancel_pending = true;
  1870. goto geni_i2c_execute_xfer_exit;
  1871. }
  1872. }
  1873. } else {
  1874. if (msgs[i].flags & I2C_M_RD)
  1875. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  1876. "%s: Read operation completed for len:%d\n",
  1877. __func__, msgs[i].len);
  1878. else
  1879. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  1880. "%s:Write operation completed for len:%d\n",
  1881. __func__, msgs[i].len);
  1882. }
  1883. if (gi2c->err) {
  1884. if (gi2c->is_i2c_rtl_based) {
  1885. /* WAR: Set flag to mark cancel pending if IOS bad */
  1886. geni_ios = geni_read_reg(gi2c->base, SE_GENI_IOS);
  1887. if ((geni_ios & 0x3) != 0x3) { //SCL:b'1, SDA:b'0
  1888. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  1889. "%s: IO lines not in good state\n",
  1890. __func__);
  1891. gi2c->prev_cancel_pending = true;
  1892. goto geni_i2c_execute_xfer_exit;
  1893. }
  1894. /* EBUSY set by ARB_LOST error condition */
  1895. if (gi2c->err == -EBUSY) {
  1896. I2C_LOG_DBG(gi2c->ipcl, true, gi2c->dev,
  1897. "%s:run reg68 war\n", __func__);
  1898. do_reg68_war_for_rtl_se(gi2c);
  1899. }
  1900. }
  1901. geni_i2c_stop_with_cancel(gi2c);
  1902. }
  1903. gi2c->cur_wr = 0;
  1904. gi2c->cur_rd = 0;
  1905. if (mode == GENI_SE_DMA) {
  1906. if (gi2c->err) {
  1907. reinit_completion(&gi2c->xfer);
  1908. if (msgs[i].flags != I2C_M_RD)
  1909. writel_relaxed(1, gi2c->base +
  1910. SE_DMA_TX_FSM_RST);
  1911. else
  1912. writel_relaxed(1, gi2c->base +
  1913. SE_DMA_RX_FSM_RST);
  1914. wait_for_completion_timeout(&gi2c->xfer, HZ);
  1915. }
  1916. if (rx_dma)
  1917. geni_se_rx_dma_unprep(&gi2c->i2c_rsc, rx_dma,
  1918. msgs[i].len);
  1919. if (tx_dma)
  1920. geni_se_tx_dma_unprep(&gi2c->i2c_rsc, tx_dma,
  1921. msgs[i].len);
  1922. i2c_put_dma_safe_msg_buf(dma_buf, &msgs[i], !gi2c->err);
  1923. }
  1924. ret = gi2c->err;
  1925. if (gi2c->err) {
  1926. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  1927. "i2c error :%d\n", gi2c->err);
  1928. if (geni_i2c_bus_recovery(gi2c))
  1929. GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
  1930. "%s:Bus Recovery failed\n", __func__);
  1931. break;
  1932. }
  1933. geni_i2c_calc_xfer_time(gi2c, msgs, start_time, i, __func__);
  1934. }
  1935. geni_i2c_execute_xfer_exit:
  1936. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  1937. "%s Time took for %d xfers = %llu nsecs\n",
  1938. __func__, num, (sched_clock() - start_time_xfer));
  1939. return ret;
  1940. }
  1941. /**
  1942. * geni_i2c_xfer() - Performs non GSI mode data transfer
  1943. * @adap: Master controller handle
  1944. * @msgs[]: i2c_msg structure as a pointer
  1945. * @num: Nos messages to sent as an arg.
  1946. *
  1947. * Return: 0 on success OR negative error code for failure.
  1948. */
  1949. static int geni_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
  1950. int num)
  1951. {
  1952. struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
  1953. int ret = 0;
  1954. u32 geni_ios = 0;
  1955. unsigned long long start_time;
  1956. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  1957. gi2c->i2c_kpi);
  1958. gi2c->err = 0;
  1959. atomic_set(&gi2c->is_xfer_in_progress, 1);
  1960. /* Client to respect system suspend */
  1961. if (!pm_runtime_enabled(gi2c->dev)) {
  1962. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  1963. "%s: System suspended\n", __func__);
  1964. atomic_set(&gi2c->is_xfer_in_progress, 0);
  1965. return -EACCES;
  1966. }
  1967. /* Do Not vote if is_le_vm: LA votes and pm_ctrl_client: client votes */
  1968. if (!gi2c->is_le_vm && !gi2c->pm_ctrl_client) {
  1969. ret = pm_runtime_get_sync(gi2c->dev);
  1970. if (ret < 0) {
  1971. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  1972. "error turning SE resources:%d\n", ret);
  1973. pm_runtime_put_noidle(gi2c->dev);
  1974. /* Set device in suspended since resume failed */
  1975. pm_runtime_set_suspended(gi2c->dev);
  1976. atomic_set(&gi2c->is_xfer_in_progress, 0);
  1977. return ret;
  1978. }
  1979. }
  1980. // WAR : Complete previous pending cancel cmd
  1981. if (gi2c->prev_cancel_pending) {
  1982. ret = do_pending_cancel(gi2c);
  1983. if (ret) {
  1984. /* for levm skip auto suspend timer */
  1985. if (!gi2c->is_le_vm) {
  1986. pm_runtime_mark_last_busy(gi2c->dev);
  1987. pm_runtime_put_autosuspend(gi2c->dev);
  1988. }
  1989. atomic_set(&gi2c->is_xfer_in_progress, 0);
  1990. return ret; //Don't perform xfer is cancel failed
  1991. }
  1992. }
  1993. geni_ios = geni_read_reg(gi2c->base, SE_GENI_IOS);
  1994. if (!gi2c->is_shared && ((geni_ios & 0x3) != 0x3)) {//SCL:b'1, SDA:b'0
  1995. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  1996. "IO lines in bad state, Power the slave\n");
  1997. /* for levm skip auto suspend timer */
  1998. if (!gi2c->is_le_vm) {
  1999. pm_runtime_mark_last_busy(gi2c->dev);
  2000. pm_runtime_put_autosuspend(gi2c->dev);
  2001. }
  2002. atomic_set(&gi2c->is_xfer_in_progress, 0);
  2003. return -ENXIO;
  2004. }
  2005. if (gi2c->is_le_vm && (!gi2c->first_xfer_done)) {
  2006. /*
  2007. * For le-vm we are doing resume operations during
  2008. * the first xfer, because we are seeing probe sequence
  2009. * issues from client and i2c-master driver, due to this
  2010. * multiple times i2c_resume invoking and we are seeing
  2011. * unclocked access. To avoid this added resume operations
  2012. * here very first time.
  2013. */
  2014. gi2c->first_xfer_done = true;
  2015. ret = geni_i2c_prepare(gi2c);
  2016. if (ret) {
  2017. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2018. "%s I2C prepare failed: %d\n", __func__, ret);
  2019. atomic_set(&gi2c->is_xfer_in_progress, 0);
  2020. return ret;
  2021. }
  2022. ret = geni_i2c_lock_bus(gi2c);
  2023. if (ret) {
  2024. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2025. "%s lock failed: %d\n", __func__, ret);
  2026. atomic_set(&gi2c->is_xfer_in_progress, 0);
  2027. return ret;
  2028. }
  2029. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2030. "LE-VM first xfer\n");
  2031. }
  2032. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2033. "n:%d addr:0x%x\n", num, msgs[0].addr);
  2034. gi2c->dbg_num = num;
  2035. kfree(gi2c->dbg_buf_ptr);
  2036. gi2c->dbg_buf_ptr =
  2037. kcalloc(num, sizeof(struct dbg_buf_ctxt), GFP_KERNEL);
  2038. if (!gi2c->dbg_buf_ptr)
  2039. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  2040. "Buf logging pointer not available\n");
  2041. if (gi2c->se_mode == GSI_ONLY) {
  2042. ret = geni_i2c_gsi_xfer(adap, msgs, num);
  2043. goto geni_i2c_txn_ret;
  2044. } else {
  2045. /* Don't set shared flag in non-GSI mode */
  2046. gi2c->is_shared = false;
  2047. }
  2048. ret = geni_i2c_execute_xfer(gi2c, msgs, num);
  2049. geni_i2c_txn_ret:
  2050. if (ret == 0)
  2051. ret = num;
  2052. /* Don't unvote if is_le_vm:LA voted and pm_ctrl_client:client voted
  2053. * Meaning autosuspend timer is only for regular usecase, not for the
  2054. * cases with is_le_vm and pm_ctrl_client flags.
  2055. */
  2056. if (!gi2c->is_le_vm && !gi2c->pm_ctrl_client) {
  2057. pm_runtime_mark_last_busy(gi2c->dev);
  2058. pm_runtime_put_autosuspend(gi2c->dev);
  2059. }
  2060. atomic_set(&gi2c->is_xfer_in_progress, 0);
  2061. gi2c->cur = NULL;
  2062. gi2c->err = 0;
  2063. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2064. "i2c txn ret:%d freq=%dHz\n", ret, gi2c->clk_freq_out);
  2065. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  2066. gi2c->i2c_kpi, start_time, 0, gi2c->clk_freq_out);
  2067. return ret;
  2068. }
  2069. static u32 geni_i2c_func(struct i2c_adapter *adap)
  2070. {
  2071. return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
  2072. }
  2073. static const struct i2c_algorithm geni_i2c_algo = {
  2074. .master_xfer = geni_i2c_xfer,
  2075. .functionality = geni_i2c_func,
  2076. };
  2077. #if I2C_HUB_DEF
  2078. static int get_geni_se_i2c_hub(struct geni_i2c_dev *gi2c)
  2079. {
  2080. int ret = 0;
  2081. int geni_hw_param;
  2082. ret = geni_se_common_clks_on(gi2c->i2c_rsc.clk, gi2c->m_ahb_clk, gi2c->s_ahb_clk);
  2083. if (ret) {
  2084. dev_err(gi2c->dev, "%s: Err in geni_se_clks_on %d\n", __func__, ret);
  2085. return ret;
  2086. }
  2087. geni_hw_param = geni_read_reg(gi2c->base, GENI_HW_PARAM);
  2088. if (geni_hw_param & I2C_MASTER_HUB)
  2089. gi2c->is_i2c_hub = true;
  2090. else
  2091. gi2c->is_i2c_hub = false;
  2092. geni_se_common_clks_off(gi2c->i2c_rsc.clk, gi2c->m_ahb_clk, gi2c->s_ahb_clk);
  2093. return ret;
  2094. }
  2095. #endif
  2096. /**
  2097. * geni_i2c_resources_init: initialize clk, icc vote, read dt property
  2098. * @pdev: Platform driver handle
  2099. * @gi2c: geni i2c structure as a pointer
  2100. *
  2101. * Function to initialize clock and icc vote configuration and read require
  2102. * DTSI property.
  2103. *
  2104. * Return: 0 on success OR negative error code for failure.
  2105. */
  2106. static int geni_i2c_resources_init(struct platform_device *pdev, struct geni_i2c_dev *gi2c)
  2107. {
  2108. int ret;
  2109. /*
  2110. * For LE, clocks, gpio and icb voting will be provided by
  2111. * LA. The I2C operates in GSI mode only for LE usecase,
  2112. * se irq not required. Below properties will not be present
  2113. * in I2C LE dt.
  2114. */
  2115. if (gi2c->is_le_vm)
  2116. return 0;
  2117. gi2c->i2c_rsc.clk = devm_clk_get(&pdev->dev, "se-clk");
  2118. if (IS_ERR(gi2c->i2c_rsc.clk)) {
  2119. ret = PTR_ERR(gi2c->i2c_rsc.clk);
  2120. dev_err(&pdev->dev, "Err getting SE Core clk %d\n",
  2121. ret);
  2122. return ret;
  2123. }
  2124. gi2c->m_ahb_clk = devm_clk_get(gi2c->dev->parent, "m-ahb");
  2125. if (IS_ERR(gi2c->m_ahb_clk)) {
  2126. ret = PTR_ERR(gi2c->m_ahb_clk);
  2127. dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
  2128. return ret;
  2129. }
  2130. gi2c->s_ahb_clk = devm_clk_get(gi2c->dev->parent, "s-ahb");
  2131. if (IS_ERR(gi2c->s_ahb_clk)) {
  2132. ret = PTR_ERR(gi2c->s_ahb_clk);
  2133. dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
  2134. return ret;
  2135. }
  2136. gi2c->is_i2c_hub = of_property_read_bool(pdev->dev.of_node,
  2137. "qcom,i2c-hub");
  2138. gi2c->is_high_perf = of_property_read_bool(pdev->dev.of_node,
  2139. "qcom,high-perf");
  2140. /*
  2141. * For I2C_HUB, qup-ddr voting not required and
  2142. * core clk should be voted explicitly.
  2143. */
  2144. if (gi2c->is_i2c_hub) {
  2145. gi2c->core_clk = devm_clk_get(&pdev->dev, "core-clk");
  2146. if (IS_ERR(gi2c->core_clk)) {
  2147. ret = PTR_ERR(gi2c->core_clk);
  2148. dev_err(&pdev->dev, "Err getting core-clk %d\n", ret);
  2149. return ret;
  2150. }
  2151. ret = geni_icc_get(&gi2c->i2c_rsc, NULL);
  2152. if (ret) {
  2153. dev_err(&pdev->dev, "%s: Error - geni_icc_get ret:%d\n",
  2154. __func__, ret);
  2155. return ret;
  2156. }
  2157. gi2c->i2c_rsc.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
  2158. gi2c->i2c_rsc.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
  2159. /* For I2C HUB, we don't have HW reg to identify RTL/SW base SE.
  2160. * Hence setting flag for all I2C HUB instances.
  2161. */
  2162. gi2c->is_i2c_rtl_based = true;
  2163. dev_info(gi2c->dev, "%s: RTL based SE\n", __func__);
  2164. } else {
  2165. if (gi2c->is_high_perf)
  2166. ret =
  2167. geni_se_common_resources_init(&gi2c->i2c_rsc,
  2168. I2C_CORE2X_VOTE, GENI_DEFAULT_BW,
  2169. (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
  2170. else
  2171. ret =
  2172. geni_se_common_resources_init(&gi2c->i2c_rsc,
  2173. GENI_DEFAULT_BW, GENI_DEFAULT_BW,
  2174. Bps_to_icc(gi2c->clk_freq_out));
  2175. if (ret) {
  2176. dev_err(&pdev->dev, "%s: Error - resources_init ret:%d\n",
  2177. __func__, ret);
  2178. return ret;
  2179. }
  2180. }
  2181. gi2c->irq = platform_get_irq(pdev, 0);
  2182. if (gi2c->irq < 0)
  2183. return gi2c->irq;
  2184. irq_set_status_flags(gi2c->irq, IRQ_NOAUTOEN);
  2185. ret = devm_request_irq(gi2c->dev, gi2c->irq, geni_i2c_irq,
  2186. 0, "i2c_geni", gi2c);
  2187. if (ret) {
  2188. dev_err(gi2c->dev, "Request_irq failed:%d: err:%d\n",
  2189. gi2c->irq, ret);
  2190. return ret;
  2191. }
  2192. return 0;
  2193. }
  2194. static int geni_i2c_probe(struct platform_device *pdev)
  2195. {
  2196. struct geni_i2c_dev *gi2c;
  2197. struct resource *res;
  2198. int ret;
  2199. struct device *dev = &pdev->dev;
  2200. gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
  2201. if (!gi2c)
  2202. return -ENOMEM;
  2203. if (arr_idx < MAX_SE)
  2204. /* Debug purpose */
  2205. gi2c_dev_dbg[arr_idx++] = gi2c;
  2206. gi2c->dev = dev;
  2207. pr_info("boot_kpi: M - DRIVER GENI_I2C Init\n");
  2208. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2209. if (!res)
  2210. return -EINVAL;
  2211. gi2c->base = devm_ioremap_resource(gi2c->dev, res);
  2212. if (IS_ERR(gi2c->base))
  2213. return PTR_ERR(gi2c->base);
  2214. if (of_property_read_bool(pdev->dev.of_node, "qcom,le-vm")) {
  2215. gi2c->is_le_vm = true;
  2216. gi2c->first_xfer_done = false;
  2217. dev_info(&pdev->dev, "LE-VM usecase\n");
  2218. }
  2219. if (of_property_read_bool(pdev->dev.of_node, "qcom,pm-ctrl-client")) {
  2220. gi2c->pm_ctrl_client = true;
  2221. dev_info(&pdev->dev, "Client controls the I2C PM\n");
  2222. }
  2223. if (of_property_read_bool(pdev->dev.of_node, "qcom,leica-used-i2c"))
  2224. gi2c->skip_bw_vote = true;
  2225. gi2c->i2c_test_dev = false;
  2226. if (of_property_read_bool(pdev->dev.of_node, "qcom,i2c-test-dev")) {
  2227. gi2c->i2c_test_dev = true;
  2228. dev_info(&pdev->dev, "%s: This is I2C device under test\n", __func__);
  2229. }
  2230. gi2c->i2c_rsc.dev = dev;
  2231. gi2c->i2c_rsc.wrapper = dev_get_drvdata(dev->parent);
  2232. gi2c->i2c_rsc.base = gi2c->base;
  2233. gi2c->wrapper_dev = dev->parent;
  2234. if (!gi2c->i2c_rsc.wrapper) {
  2235. dev_err(&pdev->dev, "SE Wrapper is NULL, deferring probe\n");
  2236. return -EPROBE_DEFER;
  2237. }
  2238. if (of_property_read_u32(pdev->dev.of_node, "qcom,clk-freq-out",
  2239. &gi2c->clk_freq_out))
  2240. gi2c->clk_freq_out = KHz(400);
  2241. dev_info(&pdev->dev, "Bus frequency is set to %dHz.\n",
  2242. gi2c->clk_freq_out);
  2243. //gi2c->is_deep_sleep = false;
  2244. ret = geni_i2c_clk_map_idx(gi2c);
  2245. if (ret) {
  2246. dev_err(gi2c->dev, "Invalid clk frequency %d KHz: %d\n",
  2247. gi2c->clk_freq_out, ret);
  2248. return ret;
  2249. }
  2250. /*
  2251. * For LE, clocks, gpio and icb voting will be provided by
  2252. * LA. The I2C operates in GSI mode only for LE usecase,
  2253. * se irq not required. Below properties will not be present
  2254. * in I2C LE dt.
  2255. */
  2256. ret = geni_i2c_resources_init(pdev, gi2c);
  2257. if (ret)
  2258. return ret;
  2259. if (of_property_read_bool(pdev->dev.of_node, "qcom,shared")) {
  2260. gi2c->is_shared = true;
  2261. dev_info(&pdev->dev, "Multi-EE usecase\n");
  2262. }
  2263. //Strictly only for debug, it's client/slave device decision for an SE.
  2264. if (of_property_read_bool(pdev->dev.of_node, "qcom,bus-recovery")) {
  2265. gi2c->bus_recovery_enable = true;
  2266. dev_dbg(&pdev->dev, "%s:I2C Bus recovery enabled\n", __func__);
  2267. }
  2268. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2269. if (ret) {
  2270. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2271. if (ret) {
  2272. dev_err(&pdev->dev, "could not set DMA mask\n");
  2273. return ret;
  2274. }
  2275. }
  2276. gi2c->tx_sg = dmam_alloc_coherent(gi2c->dev, 5*sizeof(struct scatterlist),
  2277. &gi2c->tx_sg_dma, GFP_KERNEL);
  2278. if (!gi2c->tx_sg) {
  2279. dev_err(&pdev->dev, "could not allocate for tx_sg\n");
  2280. return -ENOMEM;
  2281. }
  2282. gi2c->rx_sg = dmam_alloc_coherent(gi2c->dev, sizeof(struct scatterlist),
  2283. &gi2c->rx_sg_dma, GFP_KERNEL);
  2284. if (!gi2c->rx_sg) {
  2285. dev_err(&pdev->dev, "could not allocate for rx_sg\n");
  2286. return -ENOMEM;
  2287. }
  2288. gi2c->adap.algo = &geni_i2c_algo;
  2289. init_completion(&gi2c->xfer);
  2290. init_completion(&gi2c->m_cancel_cmd);
  2291. platform_set_drvdata(pdev, gi2c);
  2292. i2c_set_adapdata(&gi2c->adap, gi2c);
  2293. gi2c->adap.dev.parent = gi2c->dev;
  2294. gi2c->adap.dev.of_node = pdev->dev.of_node;
  2295. strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
  2296. pm_runtime_set_suspended(gi2c->dev);
  2297. /* for levm skip auto suspend timer */
  2298. if (!gi2c->is_le_vm) {
  2299. pm_runtime_set_autosuspend_delay(gi2c->dev, I2C_AUTO_SUSPEND_DELAY);
  2300. pm_runtime_use_autosuspend(gi2c->dev);
  2301. }
  2302. pm_runtime_enable(gi2c->dev);
  2303. ret = i2c_add_adapter(&gi2c->adap);
  2304. if (ret) {
  2305. dev_err(gi2c->dev, "Add adapter failed, ret=%d\n", ret);
  2306. return ret;
  2307. }
  2308. device_create_file(gi2c->dev, &dev_attr_capture_kpi);
  2309. atomic_set(&gi2c->is_xfer_in_progress, 0);
  2310. if (gi2c->i2c_test_dev) {
  2311. /* configure Test bus to dump test bus later, only once */
  2312. test_bus_enable_per_qupv3(gi2c->wrapper_dev, gi2c->ipcl);
  2313. }
  2314. pr_info("boot_kpi: M - DRIVER GENI_I2C_%d Ready\n", gi2c->adap.nr);
  2315. dev_info(gi2c->dev, "I2C probed\n");
  2316. return 0;
  2317. }
  2318. static int geni_i2c_remove(struct platform_device *pdev)
  2319. {
  2320. struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
  2321. int i;
  2322. if (atomic_read(&gi2c->is_xfer_in_progress)) {
  2323. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2324. "%s: Xfer is in progress\n", __func__);
  2325. return -EBUSY;
  2326. }
  2327. if (!pm_runtime_status_suspended(gi2c->dev)) {
  2328. if (geni_i2c_runtime_suspend(gi2c->dev))
  2329. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2330. "%s: runtime suspend failed\n", __func__);
  2331. }
  2332. if (gi2c->se_mode == GSI_ONLY) {
  2333. if (gi2c->tx_c) {
  2334. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2335. "%s: clearing tx dma resource\n", __func__);
  2336. dma_release_channel(gi2c->tx_c);
  2337. }
  2338. if (gi2c->rx_c) {
  2339. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2340. "%s: clearing rx dma resource\n", __func__);
  2341. dma_release_channel(gi2c->rx_c);
  2342. }
  2343. }
  2344. pm_runtime_put_noidle(gi2c->dev);
  2345. pm_runtime_set_suspended(gi2c->dev);
  2346. pm_runtime_disable(gi2c->dev);
  2347. i2c_del_adapter(&gi2c->adap);
  2348. for (i = 0; i < arr_idx; i++)
  2349. gi2c_dev_dbg[i] = NULL;
  2350. arr_idx = 0;
  2351. device_remove_file(gi2c->dev, &dev_attr_capture_kpi);
  2352. if (gi2c->ipc_log_kpi)
  2353. ipc_log_context_destroy(gi2c->ipc_log_kpi);
  2354. if (gi2c->ipcl)
  2355. ipc_log_context_destroy(gi2c->ipcl);
  2356. return 0;
  2357. }
  2358. /**
  2359. * geni_i2c_shutdown():shutdown call back function for i2c bus
  2360. * @pdev: platform device
  2361. *
  2362. * This function will be called as a part of device reboot or shutdown
  2363. *
  2364. * Return: None
  2365. */
  2366. static void geni_i2c_shutdown(struct platform_device *pdev)
  2367. {
  2368. struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
  2369. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "Enter %s:%d\n", __func__, true);
  2370. /* Make client i2c transfers start failing */
  2371. i2c_mark_adapter_suspended(&gi2c->adap);
  2372. }
  2373. static int geni_i2c_resume_early(struct device *device)
  2374. {
  2375. struct geni_i2c_dev *gi2c = dev_get_drvdata(device);
  2376. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s ret=%d\n", __func__, true);
  2377. /* if (pm_suspend_target_state == PM_SUSPEND_MEM) {
  2378. gi2c->se_mode = UNINITIALIZED;
  2379. gi2c->is_deep_sleep = true;
  2380. }
  2381. */
  2382. return 0;
  2383. }
  2384. static int geni_i2c_hib_resume_noirq(struct device *device)
  2385. {
  2386. struct geni_i2c_dev *gi2c = dev_get_drvdata(device);
  2387. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n", __func__);
  2388. gi2c->se_mode = UNINITIALIZED;
  2389. return 0;
  2390. }
  2391. /*
  2392. * get sync/put sync in LA-VM -> do resources on/off
  2393. * get sync/put sync in LE-VM -> do lock/unlock gpii
  2394. */
  2395. #if IS_ENABLED(CONFIG_PM)
  2396. static int geni_i2c_gpi_pause_resume(struct geni_i2c_dev *gi2c, bool is_suspend)
  2397. {
  2398. int tx_ret = 0;
  2399. /* Do dma operations only for tx channel here, as it takes care of rx channel
  2400. * also internally from the GPI driver functions. if we call for both channels,
  2401. * will see channels in wrong state due to double operations.
  2402. */
  2403. if (gi2c->tx_c) {
  2404. if (is_suspend) {
  2405. tx_ret = dmaengine_pause(gi2c->tx_c);
  2406. } else {
  2407. tx_ret = dmaengine_resume(gi2c->tx_c);
  2408. /* For deep sleep need to restore the config similar to the probe,
  2409. * hence using MSM_GPI_DEEP_SLEEP_INIT flag, in gpi_resume it will
  2410. * do similar to the probe. After this we should set this flag to
  2411. * MSM_GPI_DEFAULT, means gpi probe state is restored.
  2412. if (gi2c->is_deep_sleep)
  2413. gi2c->tx_ev.cmd = MSM_GPI_DEEP_SLEEP_INIT;
  2414. if (gi2c->is_deep_sleep) {
  2415. gi2c->tx_ev.cmd = MSM_GPI_DEFAULT;
  2416. gi2c->is_deep_sleep = false;
  2417. }
  2418. */
  2419. }
  2420. if (tx_ret) {
  2421. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2422. "%s failed: tx:%d status:%d\n",
  2423. __func__, tx_ret, is_suspend);
  2424. return -EINVAL;
  2425. }
  2426. }
  2427. return 0;
  2428. }
  2429. static int geni_i2c_runtime_suspend(struct device *dev)
  2430. {
  2431. int ret = 0;
  2432. struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
  2433. unsigned long long start_time;
  2434. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  2435. gi2c->i2c_kpi);
  2436. if (gi2c->se_mode == FIFO_SE_DMA)
  2437. disable_irq(gi2c->irq);
  2438. if (gi2c->se_mode == GSI_ONLY) {
  2439. if (!gi2c->is_le_vm) {
  2440. ret = geni_i2c_gpi_pause_resume(gi2c, true);
  2441. if (ret) {
  2442. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  2443. "%s: ret:%d\n", __func__, ret);
  2444. return ret;
  2445. }
  2446. }
  2447. }
  2448. if (gi2c->skip_bw_vote) {
  2449. if (gi2c->is_shared) {
  2450. /* Do not unconfigure GPIOs if shared se */
  2451. geni_se_common_clks_off(gi2c->i2c_rsc.clk,
  2452. gi2c->m_ahb_clk, gi2c->s_ahb_clk);
  2453. } else if (!gi2c->is_le_vm) {
  2454. geni_se_resources_off(&gi2c->i2c_rsc);
  2455. }
  2456. goto skip_bw_vote;
  2457. }
  2458. if (gi2c->is_le_vm && gi2c->first_xfer_done) {
  2459. geni_i2c_unlock_bus(gi2c);
  2460. if (gi2c->se_mode == GSI_ONLY) {
  2461. ret = geni_i2c_gpi_pause_resume(gi2c, true);
  2462. if (ret) {
  2463. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  2464. "%s: ret:%d\n", __func__, ret);
  2465. return ret;
  2466. }
  2467. }
  2468. }
  2469. else if (gi2c->is_shared) {
  2470. /* Do not unconfigure GPIOs if shared se */
  2471. geni_se_common_clks_off(gi2c->i2c_rsc.clk, gi2c->m_ahb_clk, gi2c->s_ahb_clk);
  2472. ret = geni_icc_disable(&gi2c->i2c_rsc);
  2473. if (ret)
  2474. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2475. "%s failing at geni_icc_disable ret=%d\n", __func__, ret);
  2476. } else if (!gi2c->is_le_vm) {
  2477. geni_se_resources_off(&gi2c->i2c_rsc);
  2478. ret = geni_icc_disable(&gi2c->i2c_rsc);
  2479. if (ret)
  2480. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2481. "%s failing at geni_icc_disable ret=%d\n", __func__, ret);
  2482. }
  2483. skip_bw_vote:
  2484. if (gi2c->is_i2c_hub)
  2485. clk_disable_unprepare(gi2c->core_clk);
  2486. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s ret=%d\n", __func__, ret);
  2487. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  2488. gi2c->i2c_kpi, start_time, 0, 0);
  2489. return 0;
  2490. }
  2491. static int geni_i2c_runtime_resume(struct device *dev)
  2492. {
  2493. int ret = 0;
  2494. struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
  2495. unsigned long long start_time;
  2496. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  2497. gi2c->i2c_kpi);
  2498. if (!gi2c->ipcl) {
  2499. char ipc_name[I2C_NAME_SIZE];
  2500. snprintf(ipc_name, I2C_NAME_SIZE, "%s", dev_name(gi2c->dev));
  2501. gi2c->ipcl = ipc_log_context_create(2, ipc_name, 0);
  2502. }
  2503. if (!gi2c->is_le_vm) {
  2504. if (gi2c->skip_bw_vote)
  2505. goto skip_bw_vote;
  2506. ret = geni_icc_enable(&gi2c->i2c_rsc);
  2507. if (ret) {
  2508. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2509. "%s failing at geni icc enable ret=%d\n", __func__, ret);
  2510. return ret;
  2511. }
  2512. ret = geni_icc_set_bw(&gi2c->i2c_rsc);
  2513. if (ret) {
  2514. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2515. "%s failing at icc set bw ret=%d\n", __func__, ret);
  2516. return ret;
  2517. }
  2518. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2519. "%s: GENI_TO_CORE:%d CPU_TO_GENI:%d GENI_TO_DDR:%d\n",
  2520. __func__, gi2c->i2c_rsc.icc_paths[GENI_TO_CORE].avg_bw,
  2521. gi2c->i2c_rsc.icc_paths[CPU_TO_GENI].avg_bw,
  2522. gi2c->i2c_rsc.icc_paths[GENI_TO_DDR].avg_bw);
  2523. skip_bw_vote:
  2524. if (gi2c->is_i2c_hub) {
  2525. ret = clk_prepare_enable(gi2c->core_clk);
  2526. if (ret) {
  2527. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2528. "%s failing at core clk prepare enable ret=%d\n", __func__, ret);
  2529. return ret;
  2530. }
  2531. }
  2532. /* Do not control clk/gpio/icb for LE-VM */
  2533. ret = geni_se_resources_on(&gi2c->i2c_rsc);
  2534. if (ret)
  2535. return ret;
  2536. ret = geni_i2c_prepare(gi2c);
  2537. if (ret) {
  2538. dev_err(gi2c->dev, "I2C prepare failed: %d\n", ret);
  2539. return ret;
  2540. }
  2541. geni_write_reg(0x7f, gi2c->base, GENI_OUTPUT_CTRL);
  2542. /*
  2543. * Added 10 us delay to settle the write of the register as per
  2544. * HW team recommendation
  2545. */
  2546. udelay(10);
  2547. if (gi2c->se_mode == FIFO_SE_DMA)
  2548. enable_irq(gi2c->irq);
  2549. if (gi2c->se_mode == GSI_ONLY) {
  2550. ret = geni_i2c_gpi_pause_resume(gi2c, false);
  2551. if (ret) {
  2552. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  2553. "%s: ret:%d\n", __func__, ret);
  2554. return ret;
  2555. }
  2556. }
  2557. } else if (gi2c->is_le_vm && gi2c->first_xfer_done) {
  2558. /*
  2559. * For le-vm we are doing resume operations during
  2560. * the first xfer, because we are seeing probe
  2561. * sequence issues from client and i2c-master driver,
  2562. * due to thils multiple times i2c_resume invoking
  2563. * and we are seeing unclocked access. To avoid this
  2564. * below opeations we are doing in i2c_xfer very first
  2565. * time, after first xfer below logic will continue.
  2566. */
  2567. ret = geni_i2c_prepare(gi2c);
  2568. if (ret) {
  2569. dev_err(gi2c->dev, "I2C prepare failed:%d\n", ret);
  2570. return ret;
  2571. }
  2572. if (gi2c->se_mode == GSI_ONLY) {
  2573. ret = geni_i2c_gpi_pause_resume(gi2c, false);
  2574. if (ret) {
  2575. I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
  2576. "%s: ret:%d\n", __func__, ret);
  2577. return ret;
  2578. }
  2579. }
  2580. ret = geni_i2c_lock_bus(gi2c);
  2581. if (ret) {
  2582. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2583. "%s failed: %d\n", __func__, ret);
  2584. return ret;
  2585. }
  2586. }
  2587. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s ret=%d\n", __func__, ret);
  2588. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  2589. gi2c->i2c_kpi, start_time, 0, 0);
  2590. return 0;
  2591. }
  2592. static int geni_i2c_suspend_late(struct device *device)
  2593. {
  2594. struct geni_i2c_dev *gi2c = dev_get_drvdata(device);
  2595. int ret;
  2596. unsigned long long start_time;
  2597. start_time = geni_capture_start_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  2598. gi2c->i2c_kpi);
  2599. if (atomic_read(&gi2c->is_xfer_in_progress)) {
  2600. if (!pm_runtime_status_suspended(gi2c->dev)) {
  2601. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2602. ":%s: runtime PM is active\n", __func__);
  2603. return -EBUSY;
  2604. }
  2605. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2606. "%s System suspend not allowed while xfer in progress\n",
  2607. __func__);
  2608. return -EBUSY;
  2609. }
  2610. /* Make sure no transactions are pending */
  2611. ret = i2c_trylock_bus(&gi2c->adap, I2C_LOCK_SEGMENT);
  2612. if (!ret) {
  2613. I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
  2614. "late I2C transaction request\n");
  2615. return -EBUSY;
  2616. }
  2617. if (!pm_runtime_status_suspended(device)) {
  2618. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
  2619. "%s: Force suspend\n", __func__);
  2620. geni_i2c_runtime_suspend(device);
  2621. pm_runtime_disable(device);
  2622. pm_runtime_set_suspended(device);
  2623. pm_runtime_enable(device);
  2624. }
  2625. i2c_unlock_bus(&gi2c->adap, I2C_LOCK_SEGMENT);
  2626. I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev, "%s ret=%d\n", __func__, ret);
  2627. geni_capture_stop_time(&gi2c->i2c_rsc, gi2c->ipc_log_kpi, __func__,
  2628. gi2c->i2c_kpi, start_time, 0, 0);
  2629. return 0;
  2630. }
  2631. #else
  2632. static int geni_i2c_runtime_suspend(struct device *dev)
  2633. {
  2634. return 0;
  2635. }
  2636. static int geni_i2c_runtime_resume(struct device *dev)
  2637. {
  2638. return 0;
  2639. }
  2640. static int geni_i2c_suspend_late(struct device *device)
  2641. {
  2642. return 0;
  2643. }
  2644. #endif
  2645. static const struct dev_pm_ops geni_i2c_pm_ops = {
  2646. .suspend_late = geni_i2c_suspend_late,
  2647. .resume_early = geni_i2c_resume_early,
  2648. .runtime_suspend = geni_i2c_runtime_suspend,
  2649. .runtime_resume = geni_i2c_runtime_resume,
  2650. .freeze = geni_i2c_suspend_late,
  2651. .restore = geni_i2c_hib_resume_noirq,
  2652. .thaw = geni_i2c_hib_resume_noirq,
  2653. };
  2654. static const struct of_device_id geni_i2c_dt_match[] = {
  2655. { .compatible = "qcom,i2c-geni" },
  2656. {}
  2657. };
  2658. MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
  2659. static struct platform_driver geni_i2c_driver = {
  2660. .probe = geni_i2c_probe,
  2661. .remove = geni_i2c_remove,
  2662. .shutdown = geni_i2c_shutdown,
  2663. .driver = {
  2664. .name = "i2c_geni",
  2665. .pm = &geni_i2c_pm_ops,
  2666. .of_match_table = geni_i2c_dt_match,
  2667. },
  2668. };
  2669. static int __init i2c_dev_init(void)
  2670. {
  2671. return platform_driver_register(&geni_i2c_driver);
  2672. }
  2673. static void __exit i2c_dev_exit(void)
  2674. {
  2675. platform_driver_unregister(&geni_i2c_driver);
  2676. }
  2677. module_init(i2c_dev_init);
  2678. module_exit(i2c_dev_exit);
  2679. MODULE_LICENSE("GPL");
  2680. MODULE_ALIAS("platform:i2c_geni");