ipa_mpm.c 89 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/list.h>
  7. #include <linux/slab.h>
  8. #include <linux/device.h>
  9. #include <linux/module.h>
  10. #include <linux/mhi.h>
  11. #include <linux/msm_gsi.h>
  12. #include <linux/delay.h>
  13. #include <linux/log2.h>
  14. #include <linux/gfp.h>
  15. #include "../ipa_common_i.h"
  16. #include "ipa_i.h"
  17. #define IPA_MPM_DRV_NAME "ipa_mpm"
  18. #define IPA_MPM_DBG(fmt, args...) \
  19. do { \
  20. pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
  21. __func__, __LINE__, ## args); \
  22. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  23. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  24. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  25. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  26. } while (0)
  27. #define IPA_MPM_DBG_LOW(fmt, args...) \
  28. do { \
  29. pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
  30. __func__, __LINE__, ## args); \
  31. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  32. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  33. } while (0)
  34. #define IPA_MPM_ERR(fmt, args...) \
  35. do { \
  36. pr_err(IPA_MPM_DRV_NAME " %s:%d " fmt, \
  37. __func__, __LINE__, ## args); \
  38. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  39. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  40. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  41. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  42. } while (0)
  43. #define IPA_MPM_FUNC_ENTRY() \
  44. IPA_MPM_DBG("ENTRY\n")
  45. #define IPA_MPM_FUNC_EXIT() \
  46. IPA_MPM_DBG("EXIT\n")
  47. #define IPA_MPM_MAX_MHIP_CHAN 3
  48. #define IPA_MPM_NUM_RING_DESC 6
  49. #define IPA_MPM_RING_LEN IPA_MPM_NUM_RING_DESC
  50. #define IPA_MPM_MHI_HOST_UL_CHANNEL 4
  51. #define IPA_MPM_MHI_HOST_DL_CHANNEL 5
  52. #define TETH_AGGR_TIME_LIMIT 10000 /* 10ms */
  53. #define TETH_AGGR_BYTE_LIMIT 24
  54. #define TETH_AGGR_DL_BYTE_LIMIT 16
  55. #define TRE_BUFF_SIZE 32768
  56. #define IPA_HOLB_TMR_EN 0x1
  57. #define IPA_HOLB_TMR_DIS 0x0
  58. #define RNDIS_IPA_DFLT_RT_HDL 0
  59. #define IPA_POLL_FOR_EMPTINESS_NUM 50
  60. #define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
  61. #define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
  62. #define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
  63. #define IPA_MHIP_HOLB_TMO 31 /* value to match granularity on ipa HW 4.5 */
  64. #define IPA_MPM_FLOW_CTRL_ADD 1
  65. #define IPA_MPM_FLOW_CTRL_DELETE 0
  66. enum mhip_re_type {
  67. MHIP_RE_XFER = 0x2,
  68. MHIP_RE_NOP = 0x4,
  69. };
  70. enum ipa_mpm_mhi_ch_id_type {
  71. IPA_MPM_MHIP_CH_ID_0,
  72. IPA_MPM_MHIP_CH_ID_1,
  73. IPA_MPM_MHIP_CH_ID_2,
  74. IPA_MPM_MHIP_CH_ID_MAX,
  75. };
  76. enum ipa_mpm_dma_data_direction {
  77. DMA_HIPA_BIDIRECTIONAL = 0,
  78. DMA_TO_HIPA = 1,
  79. DMA_FROM_HIPA = 2,
  80. DMA_HIPA_NONE = 3,
  81. };
  82. enum ipa_mpm_ipa_teth_client_type {
  83. IPA_MPM_MHIP_USB,
  84. IPA_MPM_MHIP_WIFI,
  85. };
  86. enum ipa_mpm_mhip_client_type {
  87. IPA_MPM_MHIP_INIT,
  88. /* USB RMNET CLIENT */
  89. IPA_MPM_MHIP_USB_RMNET,
  90. /* USB RNDIS / WIFI CLIENT */
  91. IPA_MPM_MHIP_TETH,
  92. /* USB DPL CLIENT */
  93. IPA_MPM_MHIP_USB_DPL,
  94. IPA_MPM_MHIP_NONE,
  95. };
  96. enum ipa_mpm_clk_vote_type {
  97. CLK_ON,
  98. CLK_OFF,
  99. };
  100. enum mhip_status_type {
  101. MHIP_STATUS_SUCCESS,
  102. MHIP_STATUS_NO_OP,
  103. MHIP_STATUS_FAIL,
  104. MHIP_STATUS_BAD_STATE,
  105. MHIP_STATUS_EP_NOT_FOUND,
  106. MHIP_STATUS_EP_NOT_READY,
  107. };
  108. enum mhip_smmu_domain_type {
  109. MHIP_SMMU_DOMAIN_IPA,
  110. MHIP_SMMU_DOMAIN_PCIE,
  111. MHIP_SMMU_DOMAIN_NONE,
  112. };
  113. enum ipa_mpm_start_stop_type {
  114. MPM_MHIP_STOP,
  115. MPM_MHIP_START,
  116. };
  117. /* each pair of UL/DL channels are defined below */
  118. static const struct mhi_device_id mhi_driver_match_table[] = {
  119. { .chan = "IP_HW_MHIP_0" }, /* for rndis/Wifi teth pipes */
  120. { .chan = "IP_HW_MHIP_1" }, /* for MHIP rmnet */
  121. { .chan = "IP_HW_ADPL" }, /* ADPL/ODL DL pipe */
  122. };
  123. static const char *ipa_mpm_mhip_chan_str[IPA_MPM_MHIP_CH_ID_MAX] = {
  124. __stringify(IPA_MPM_MHIP_TETH),
  125. __stringify(IPA_MPM_MHIP_USB_RMNET),
  126. __stringify(IPA_MPM_MHIP_USB_DPL),
  127. };
  128. /*
  129. * MHI PRIME GSI Descriptor format that Host IPA uses.
  130. */
  131. struct __packed mhi_p_desc {
  132. uint64_t buffer_ptr;
  133. uint16_t buff_len;
  134. uint16_t resvd1;
  135. uint16_t chain : 1;
  136. uint16_t resvd4 : 7;
  137. uint16_t ieob : 1;
  138. uint16_t ieot : 1;
  139. uint16_t bei : 1;
  140. uint16_t sct : 1;
  141. uint16_t resvd3 : 4;
  142. uint8_t re_type;
  143. uint8_t resvd2;
  144. };
  145. /*
  146. * MHI PRIME Channel Context and Event Context Array
  147. * Information that is sent to Device IPA.
  148. */
  149. struct ipa_mpm_channel_context_type {
  150. u32 chstate : 8;
  151. u32 reserved1 : 24;
  152. u32 chtype;
  153. u32 erindex;
  154. u64 rbase;
  155. u64 rlen;
  156. u64 reserved2;
  157. u64 reserved3;
  158. } __packed;
  159. struct ipa_mpm_event_context_type {
  160. u32 reserved1 : 8;
  161. u32 update_rp_modc : 8;
  162. u32 update_rp_intmodt : 16;
  163. u32 ertype;
  164. u32 update_rp_addr;
  165. u64 rbase;
  166. u64 rlen;
  167. u32 buff_size : 16;
  168. u32 reserved2 : 16;
  169. u32 reserved3;
  170. u64 reserved4;
  171. } __packed;
  172. struct ipa_mpm_pipes_info_type {
  173. enum ipa_client_type ipa_client;
  174. struct ipa_ep_cfg ep_cfg;
  175. };
  176. struct ipa_mpm_channel_type {
  177. struct ipa_mpm_pipes_info_type dl_cons;
  178. struct ipa_mpm_pipes_info_type ul_prod;
  179. enum ipa_mpm_mhip_client_type mhip_client;
  180. };
  181. static struct ipa_mpm_channel_type ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_MAX];
  182. /* For configuring IPA_CLIENT_MHI_PRIME_TETH_CONS */
  183. static struct ipa_ep_cfg mhip_dl_teth_ep_cfg = {
  184. .mode = {
  185. .mode = IPA_BASIC,
  186. .dst = IPA_CLIENT_MHI_PRIME_TETH_CONS,
  187. },
  188. .hdr = {
  189. .hdr_len = 4,
  190. .hdr_ofst_metadata_valid = 1,
  191. .hdr_ofst_metadata = 1,
  192. .hdr_ofst_pkt_size_valid = 1,
  193. .hdr_ofst_pkt_size = 2,
  194. },
  195. .hdr_ext = {
  196. .hdr_total_len_or_pad_valid = true,
  197. .hdr_payload_len_inc_padding = true,
  198. },
  199. .aggr = {
  200. .aggr_en = IPA_ENABLE_DEAGGR,
  201. .aggr = IPA_QCMAP,
  202. .aggr_byte_limit = TETH_AGGR_DL_BYTE_LIMIT,
  203. .aggr_time_limit = TETH_AGGR_TIME_LIMIT,
  204. },
  205. };
  206. static struct ipa_ep_cfg mhip_ul_teth_ep_cfg = {
  207. .mode = {
  208. .mode = IPA_BASIC,
  209. .dst = IPA_CLIENT_MHI_PRIME_TETH_PROD,
  210. },
  211. .hdr = {
  212. .hdr_len = 4,
  213. .hdr_ofst_metadata_valid = 1,
  214. .hdr_ofst_metadata = 0,
  215. .hdr_ofst_pkt_size_valid = 1,
  216. .hdr_ofst_pkt_size = 2,
  217. },
  218. .hdr_ext = {
  219. .hdr_total_len_or_pad_valid = true,
  220. .hdr_payload_len_inc_padding = true,
  221. },
  222. .aggr = {
  223. .aggr_en = IPA_ENABLE_AGGR,
  224. .aggr = IPA_QCMAP,
  225. .aggr_byte_limit = TETH_AGGR_BYTE_LIMIT,
  226. .aggr_time_limit = TETH_AGGR_TIME_LIMIT,
  227. },
  228. };
  229. /* WARNING!! Temporary for rndis intgration only */
  230. /* For configuring IPA_CLIENT_MHIP_RMNET_PROD */
  231. static struct ipa_ep_cfg mhip_dl_rmnet_ep_cfg = {
  232. .mode = {
  233. .mode = IPA_DMA,
  234. .dst = IPA_CLIENT_USB_CONS,
  235. },
  236. };
  237. /* For configuring IPA_CLIENT_MHIP_RMNET_CONS */
  238. static struct ipa_ep_cfg mhip_ul_rmnet_ep_cfg = {
  239. .mode = {
  240. .mode = IPA_DMA,
  241. .dst = IPA_CLIENT_USB_CONS,
  242. },
  243. };
  244. /* For configuring IPA_CLIENT_MHIP_DPL_PROD using USB*/
  245. static struct ipa_ep_cfg mhip_dl_dpl_ep_cfg = {
  246. .mode = {
  247. .mode = IPA_DMA,
  248. .dst = IPA_CLIENT_USB_DPL_CONS,
  249. },
  250. };
  251. struct ipa_mpm_iova_addr {
  252. dma_addr_t base;
  253. unsigned int size;
  254. };
  255. struct ipa_mpm_dev_info {
  256. struct platform_device *pdev;
  257. struct device *dev;
  258. bool ipa_smmu_enabled;
  259. bool pcie_smmu_enabled;
  260. struct ipa_mpm_iova_addr ctrl;
  261. struct ipa_mpm_iova_addr data;
  262. u32 chdb_base;
  263. u32 erdb_base;
  264. bool is_cache_coherent;
  265. };
  266. struct ipa_mpm_event_props {
  267. u16 id;
  268. phys_addr_t device_db;
  269. struct ipa_mpm_event_context_type ev_ctx;
  270. };
  271. struct ipa_mpm_channel_props {
  272. u16 id;
  273. phys_addr_t device_db;
  274. struct ipa_mpm_channel_context_type ch_ctx;
  275. };
  276. enum ipa_mpm_gsi_state {
  277. GSI_ERR,
  278. GSI_INIT,
  279. GSI_ALLOCATED,
  280. GSI_STARTED,
  281. GSI_STOPPED,
  282. };
  283. enum ipa_mpm_remote_state {
  284. MPM_MHIP_REMOTE_STOP,
  285. MPM_MHIP_REMOTE_START,
  286. MPM_MHIP_REMOTE_ERR,
  287. };
  288. struct ipa_mpm_channel {
  289. struct ipa_mpm_channel_props chan_props;
  290. struct ipa_mpm_event_props evt_props;
  291. enum ipa_mpm_gsi_state gsi_state;
  292. dma_addr_t db_host_iova;
  293. dma_addr_t db_device_iova;
  294. };
  295. enum ipa_mpm_teth_state {
  296. IPA_MPM_TETH_INIT = 0,
  297. IPA_MPM_TETH_INPROGRESS,
  298. IPA_MPM_TETH_CONNECTED,
  299. };
  300. enum ipa_mpm_mhip_chan {
  301. IPA_MPM_MHIP_CHAN_UL,
  302. IPA_MPM_MHIP_CHAN_DL,
  303. IPA_MPM_MHIP_CHAN_BOTH,
  304. };
  305. struct ipa_mpm_clk_cnt_type {
  306. atomic_t pcie_clk_cnt;
  307. atomic_t ipa_clk_cnt;
  308. };
  309. struct producer_rings {
  310. struct mhi_p_desc *tr_va;
  311. struct mhi_p_desc *er_va;
  312. void *tr_buff_va[IPA_MPM_RING_LEN];
  313. dma_addr_t tr_pa;
  314. dma_addr_t er_pa;
  315. dma_addr_t tr_buff_c_iova[IPA_MPM_RING_LEN];
  316. /*
  317. * The iova generated for AP CB,
  318. * used only for dma_map_single to flush the cache.
  319. */
  320. dma_addr_t ap_iova_er;
  321. dma_addr_t ap_iova_tr;
  322. dma_addr_t ap_iova_buff[IPA_MPM_RING_LEN];
  323. };
  324. struct ipa_mpm_mhi_driver {
  325. struct mhi_device *mhi_dev;
  326. struct producer_rings ul_prod_ring;
  327. struct producer_rings dl_prod_ring;
  328. struct ipa_mpm_channel ul_prod;
  329. struct ipa_mpm_channel dl_cons;
  330. enum ipa_mpm_mhip_client_type mhip_client;
  331. enum ipa_mpm_teth_state teth_state;
  332. bool init_complete;
  333. /* General MPM mutex to protect concurrent update of MPM GSI states */
  334. struct mutex mutex;
  335. /*
  336. * Mutex to protect mhi_dev update/ access, for concurrency such as
  337. * 5G SSR and USB disconnect/connect.
  338. */
  339. struct mutex mhi_mutex;
  340. bool in_lpm;
  341. struct ipa_mpm_clk_cnt_type clk_cnt;
  342. enum ipa_mpm_remote_state remote_state;
  343. };
  344. struct ipa_mpm_context {
  345. struct ipa_mpm_dev_info dev_info;
  346. struct ipa_mpm_mhi_driver md[IPA_MPM_MAX_MHIP_CHAN];
  347. struct mutex mutex;
  348. atomic_t probe_cnt;
  349. atomic_t pcie_clk_total_cnt;
  350. atomic_t ipa_clk_total_cnt;
  351. atomic_t flow_ctrl_mask;
  352. atomic_t adpl_over_usb_available;
  353. struct device *parent_pdev;
  354. struct ipa_smmu_cb_ctx carved_smmu_cb;
  355. struct device *mhi_parent_dev;
  356. };
  357. #define IPA_MPM_DESC_SIZE (sizeof(struct mhi_p_desc))
  358. #define IPA_MPM_RING_TOTAL_SIZE (IPA_MPM_RING_LEN * IPA_MPM_DESC_SIZE)
  359. /* WA: Make the IPA_MPM_PAGE_SIZE from 16k (next power of ring size) to
  360. * 32k. This is to make sure IOMMU map happens for the same size
  361. * for all TR/ER and doorbells.
  362. */
  363. #define IPA_MPM_PAGE_SIZE TRE_BUFF_SIZE
  364. static struct ipa_mpm_context *ipa_mpm_ctx;
  365. static struct platform_device *m_pdev;
  366. static int ipa_mpm_mhi_probe_cb(struct mhi_device *,
  367. const struct mhi_device_id *);
  368. static void ipa_mpm_mhi_remove_cb(struct mhi_device *);
  369. static void ipa_mpm_mhi_status_cb(struct mhi_device *, enum MHI_CB);
  370. static void ipa_mpm_change_teth_state(int probe_id,
  371. enum ipa_mpm_teth_state ip_state);
  372. static void ipa_mpm_change_gsi_state(int probe_id,
  373. enum ipa_mpm_mhip_chan mhip_chan,
  374. enum ipa_mpm_gsi_state next_state);
  375. static int ipa_mpm_probe(struct platform_device *pdev);
  376. static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
  377. int probe_id, bool is_force, bool *is_acted);
  378. static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
  379. int probe_id);
  380. static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
  381. enum ipa_mpm_mhip_chan mhip_chan,
  382. int probe_id,
  383. enum ipa_mpm_start_stop_type start_stop);
  384. static int ipa_mpm_start_mhip_holb_tmo(u32 clnt_hdl);
  385. static struct mhi_driver mhi_driver = {
  386. .id_table = mhi_driver_match_table,
  387. .probe = ipa_mpm_mhi_probe_cb,
  388. .remove = ipa_mpm_mhi_remove_cb,
  389. .status_cb = ipa_mpm_mhi_status_cb,
  390. .driver = {
  391. .name = IPA_MPM_DRV_NAME,
  392. .owner = THIS_MODULE,
  393. },
  394. };
  395. static void ipa_mpm_ipa3_delayed_probe(struct work_struct *work)
  396. {
  397. (void)ipa_mpm_probe(m_pdev);
  398. }
  399. static DECLARE_WORK(ipa_mpm_ipa3_scheduled_probe, ipa_mpm_ipa3_delayed_probe);
  400. static void ipa_mpm_ipa3_ready_cb(void *user_data)
  401. {
  402. struct platform_device *pdev = (struct platform_device *)(user_data);
  403. m_pdev = pdev;
  404. IPA_MPM_DBG("IPA ready callback has been triggered\n");
  405. schedule_work(&ipa_mpm_ipa3_scheduled_probe);
  406. }
  407. static void ipa_mpm_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *err_data)
  408. {
  409. IPA_MPM_ERR("GSI EVT RING ERROR, not expected..\n");
  410. ipa_assert();
  411. }
  412. static void ipa_mpm_gsi_chan_err_cb(struct gsi_chan_err_notify *err_data)
  413. {
  414. IPA_MPM_ERR("GSI CHAN ERROR, not expected..\n");
  415. ipa_assert();
  416. }
  417. static int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
  418. enum ipa_client_type dst_pipe, bool reset)
  419. {
  420. int result = 0;
  421. struct ipa_ep_cfg ep_cfg = { { 0 } };
  422. IPA_MPM_FUNC_ENTRY();
  423. IPA_MPM_DBG("DMA from %d to %d reset=%d\n", src_pipe, dst_pipe, reset);
  424. /* Reset to basic if reset = 1, otherwise set to DMA */
  425. if (reset)
  426. ep_cfg.mode.mode = IPA_BASIC;
  427. else
  428. ep_cfg.mode.mode = IPA_DMA;
  429. ep_cfg.mode.dst = dst_pipe;
  430. ep_cfg.seq.set_dynamic = true;
  431. result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
  432. IPA_MPM_FUNC_EXIT();
  433. return result;
  434. }
  435. static int ipa_mpm_start_mhip_holb_tmo(u32 clnt_hdl)
  436. {
  437. struct ipa_ep_cfg_holb holb_cfg;
  438. memset(&holb_cfg, 0, sizeof(holb_cfg));
  439. holb_cfg.en = IPA_HOLB_TMR_EN;
  440. /* 31 ms timer, which is less than tag timeout */
  441. holb_cfg.tmr_val = IPA_MHIP_HOLB_TMO;
  442. return ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
  443. }
  444. /**
  445. * ipa_mpm_smmu_map() - SMMU maps ring and the buffer pointer.
  446. * @va_addr: virtual address that needs to be mapped
  447. * @sz: size of the address to be mapped
  448. * @dir: ipa_mpm_dma_data_direction
  449. * @ap_cb_iova: iova for AP context bank
  450. *
  451. * This function SMMU maps both ring and the buffer pointer.
  452. * The ring pointers will be aligned to ring size and
  453. * the buffer pointers should be aligned to buffer size.
  454. *
  455. * Returns: iova of the mapped address
  456. */
  457. static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
  458. int sz,
  459. int dir,
  460. dma_addr_t *ap_cb_iova)
  461. {
  462. struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
  463. phys_addr_t phys_addr;
  464. dma_addr_t iova;
  465. int smmu_enabled;
  466. unsigned long iova_p;
  467. phys_addr_t pa_p;
  468. u32 size_p;
  469. int prot = IOMMU_READ | IOMMU_WRITE;
  470. struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
  471. unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
  472. int ret = 0;
  473. /* check cache coherent */
  474. if (ipa_mpm_ctx->dev_info.is_cache_coherent) {
  475. IPA_MPM_DBG_LOW("enable cache coherent\n");
  476. prot |= IOMMU_CACHE;
  477. }
  478. if (carved_iova >= cb->va_end) {
  479. IPA_MPM_ERR("running out of carved_iova %lx\n", carved_iova);
  480. ipa_assert();
  481. }
  482. /*
  483. * Both Host IPA and PCIE SMMU should be enabled or disabled
  484. * for proceed.
  485. * If SMMU Enabled => iova == pa
  486. * If SMMU Disabled => iova == iommu mapped iova
  487. * dma_map_single ensures cache is flushed and the memory is not
  488. * touched again until dma_unmap_single() is called
  489. */
  490. smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
  491. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
  492. if (smmu_enabled) {
  493. /* Map the phys addr to both PCIE and IPA AP CB
  494. * from the carved out common iova range.
  495. */
  496. ipa_smmu_domain = ipa3_get_smmu_domain();
  497. if (!ipa_smmu_domain) {
  498. IPA_MPM_ERR("invalid IPA smmu domain\n");
  499. ipa_assert();
  500. }
  501. if (!ipa_mpm_ctx->mhi_parent_dev) {
  502. IPA_MPM_ERR("invalid PCIE SMMU domain\n");
  503. ipa_assert();
  504. }
  505. phys_addr = virt_to_phys((void *) va_addr);
  506. IPA_SMMU_ROUND_TO_PAGE(carved_iova, phys_addr, sz,
  507. iova_p, pa_p, size_p);
  508. /* Flush the cache with dma_map_single for IPA AP CB */
  509. *ap_cb_iova = dma_map_single(ipa3_ctx->pdev, va_addr,
  510. size_p, dir);
  511. if (dma_mapping_error(ipa3_ctx->pdev, *ap_cb_iova)) {
  512. IPA_MPM_ERR("dma_map_single failure for entry\n");
  513. goto fail_dma_mapping;
  514. }
  515. ret = ipa3_iommu_map(ipa_smmu_domain, iova_p,
  516. pa_p, size_p, prot);
  517. if (ret) {
  518. IPA_MPM_ERR("IPA IOMMU returned failure, ret = %d\n",
  519. ret);
  520. ipa_assert();
  521. }
  522. pcie_smmu_domain = iommu_get_domain_for_dev(
  523. ipa_mpm_ctx->mhi_parent_dev);
  524. if (!pcie_smmu_domain) {
  525. IPA_MPM_ERR("invalid pcie smmu domain\n");
  526. ipa_assert();
  527. }
  528. ret = iommu_map(pcie_smmu_domain, iova_p, pa_p, size_p, prot);
  529. if (ret) {
  530. IPA_MPM_ERR("PCIe IOMMU returned failure, ret = %d\n",
  531. ret);
  532. ipa_assert();
  533. }
  534. cb->next_addr = iova_p + size_p;
  535. iova = iova_p;
  536. } else {
  537. iova = dma_map_single(ipa3_ctx->pdev, va_addr,
  538. IPA_MPM_RING_TOTAL_SIZE, dir);
  539. if (dma_mapping_error(ipa3_ctx->pdev, iova)) {
  540. IPA_MPM_ERR("dma_map_single failure for entry\n");
  541. goto fail_dma_mapping;
  542. }
  543. *ap_cb_iova = iova;
  544. }
  545. return iova;
  546. fail_dma_mapping:
  547. iova = 0;
  548. ipa_assert();
  549. return iova;
  550. }
  551. /**
  552. * ipa_mpm_smmu_unmap() - SMMU unmaps ring and the buffer pointer.
  553. * @va_addr: virtual address that needs to be mapped
  554. * @sz: size of the address to be mapped
  555. * @dir: ipa_mpm_dma_data_direction
  556. * @ap_cb_iova: iova for AP context bank
  557. *
  558. * This function SMMU unmaps both ring and the buffer pointer.
  559. * The ring pointers will be aligned to ring size and
  560. * the buffer pointers should be aligned to buffer size.
  561. *
  562. * Return: none
  563. */
  564. static void ipa_mpm_smmu_unmap(dma_addr_t carved_iova, int sz, int dir,
  565. dma_addr_t ap_cb_iova)
  566. {
  567. unsigned long iova_p;
  568. unsigned long pa_p;
  569. u32 size_p = 0;
  570. struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
  571. struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
  572. int smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
  573. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
  574. if (carved_iova <= 0) {
  575. IPA_MPM_ERR("carved_iova is zero/negative\n");
  576. WARN_ON(1);
  577. return;
  578. }
  579. if (smmu_enabled) {
  580. ipa_smmu_domain = ipa3_get_smmu_domain();
  581. if (!ipa_smmu_domain) {
  582. IPA_MPM_ERR("invalid IPA smmu domain\n");
  583. ipa_assert();
  584. }
  585. if (!ipa_mpm_ctx->mhi_parent_dev) {
  586. IPA_MPM_ERR("invalid PCIE SMMU domain\n");
  587. ipa_assert();
  588. }
  589. IPA_SMMU_ROUND_TO_PAGE(carved_iova, carved_iova, sz,
  590. iova_p, pa_p, size_p);
  591. pcie_smmu_domain = iommu_get_domain_for_dev(
  592. ipa_mpm_ctx->mhi_parent_dev);
  593. if (pcie_smmu_domain) {
  594. iommu_unmap(pcie_smmu_domain, iova_p, size_p);
  595. } else {
  596. IPA_MPM_ERR("invalid PCIE SMMU domain\n");
  597. ipa_assert();
  598. }
  599. iommu_unmap(ipa_smmu_domain, iova_p, size_p);
  600. cb->next_addr -= size_p;
  601. dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
  602. IPA_MPM_RING_TOTAL_SIZE, dir);
  603. } else {
  604. dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
  605. IPA_MPM_RING_TOTAL_SIZE, dir);
  606. }
  607. }
  608. static u32 ipa_mpm_smmu_map_doorbell(enum mhip_smmu_domain_type smmu_domain,
  609. u32 pa_addr)
  610. {
  611. /*
  612. * Doorbells are already in PA, map these to
  613. * PCIE/IPA doman if SMMUs are enabled.
  614. */
  615. struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
  616. int smmu_enabled;
  617. unsigned long iova_p;
  618. phys_addr_t pa_p;
  619. u32 size_p;
  620. int ret = 0;
  621. int prot = IOMMU_READ | IOMMU_WRITE;
  622. struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
  623. unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
  624. u32 iova = 0;
  625. u64 offset = 0;
  626. /* check cache coherent */
  627. if (ipa_mpm_ctx->dev_info.is_cache_coherent) {
  628. IPA_MPM_DBG(" enable cache coherent\n");
  629. prot |= IOMMU_CACHE;
  630. }
  631. if (carved_iova >= cb->va_end) {
  632. IPA_MPM_ERR("running out of carved_iova %lx\n", carved_iova);
  633. ipa_assert();
  634. }
  635. smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
  636. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
  637. if (smmu_enabled) {
  638. IPA_SMMU_ROUND_TO_PAGE(carved_iova, pa_addr, IPA_MPM_PAGE_SIZE,
  639. iova_p, pa_p, size_p);
  640. if (smmu_domain == MHIP_SMMU_DOMAIN_IPA) {
  641. ipa_smmu_domain = ipa3_get_smmu_domain();
  642. if (!ipa_smmu_domain) {
  643. IPA_MPM_ERR("invalid IPA smmu domain\n");
  644. ipa_assert();
  645. }
  646. ret = ipa3_iommu_map(ipa_smmu_domain,
  647. iova_p, pa_p, size_p, prot);
  648. if (ret) {
  649. IPA_MPM_ERR("IPA doorbell mapping failed\n");
  650. ipa_assert();
  651. }
  652. offset = pa_addr - pa_p;
  653. } else if (smmu_domain == MHIP_SMMU_DOMAIN_PCIE) {
  654. pcie_smmu_domain = iommu_get_domain_for_dev(
  655. ipa_mpm_ctx->mhi_parent_dev);
  656. if (!pcie_smmu_domain) {
  657. IPA_MPM_ERR("invalid IPA smmu domain\n");
  658. ipa_assert();
  659. }
  660. ret = iommu_map(pcie_smmu_domain,
  661. iova_p, pa_p, size_p, prot);
  662. if (ret) {
  663. IPA_MPM_ERR("PCIe doorbell mapping failed\n");
  664. ipa_assert();
  665. }
  666. offset = pa_addr - pa_p;
  667. }
  668. iova = iova_p + offset;
  669. cb->next_addr = iova_p + IPA_MPM_PAGE_SIZE;
  670. } else {
  671. iova = pa_addr;
  672. }
  673. return iova;
  674. }
  675. static void ipa_mpm_smmu_unmap_doorbell(enum mhip_smmu_domain_type smmu_domain,
  676. dma_addr_t iova)
  677. {
  678. /*
  679. * Doorbells are already in PA, map these to
  680. * PCIE/IPA doman if SMMUs are enabled.
  681. */
  682. struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
  683. int smmu_enabled;
  684. unsigned long iova_p;
  685. phys_addr_t pa_p;
  686. u32 size_p;
  687. struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
  688. smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
  689. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
  690. if (smmu_enabled) {
  691. IPA_SMMU_ROUND_TO_PAGE(iova, iova, IPA_MPM_PAGE_SIZE,
  692. iova_p, pa_p, size_p);
  693. if (smmu_domain == MHIP_SMMU_DOMAIN_IPA) {
  694. ipa_smmu_domain = ipa3_get_smmu_domain();
  695. if (ipa_smmu_domain) {
  696. iommu_unmap(ipa_smmu_domain, iova_p, size_p);
  697. } else {
  698. IPA_MPM_ERR("invalid IPA smmu domain\n");
  699. ipa_assert();
  700. }
  701. } else if (smmu_domain == MHIP_SMMU_DOMAIN_PCIE) {
  702. pcie_smmu_domain = iommu_get_domain_for_dev(
  703. ipa_mpm_ctx->mhi_parent_dev);
  704. if (pcie_smmu_domain) {
  705. iommu_unmap(pcie_smmu_domain, iova_p, size_p);
  706. } else {
  707. IPA_MPM_ERR("invalid PCIE smmu domain\n");
  708. ipa_assert();
  709. }
  710. cb->next_addr -= IPA_MPM_PAGE_SIZE;
  711. }
  712. }
  713. }
  714. static int get_idx_from_id(const struct mhi_device_id *id)
  715. {
  716. return (id - mhi_driver_match_table);
  717. }
  718. static void get_ipa3_client(int id,
  719. enum ipa_client_type *ul_prod,
  720. enum ipa_client_type *dl_cons)
  721. {
  722. IPA_MPM_FUNC_ENTRY();
  723. if (id >= IPA_MPM_MHIP_CH_ID_MAX) {
  724. *ul_prod = IPA_CLIENT_MAX;
  725. *dl_cons = IPA_CLIENT_MAX;
  726. } else {
  727. *ul_prod = ipa_mpm_pipes[id].ul_prod.ipa_client;
  728. *dl_cons = ipa_mpm_pipes[id].dl_cons.ipa_client;
  729. }
  730. IPA_MPM_FUNC_EXIT();
  731. }
  732. static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
  733. int mhi_idx, struct ipa_req_chan_out_params *out_params)
  734. {
  735. int ipa_ep_idx;
  736. int res;
  737. struct mhi_p_desc *er_ring_va, *tr_ring_va;
  738. void *buff_va;
  739. dma_addr_t er_carved_iova, tr_carved_iova;
  740. dma_addr_t ap_cb_tr_iova, ap_cb_er_iova, ap_cb_buff_iova;
  741. struct ipa_request_gsi_channel_params gsi_params;
  742. int dir;
  743. int i, k;
  744. int result;
  745. struct ipa3_ep_context *ep;
  746. if (mhip_client == IPA_CLIENT_MAX)
  747. goto fail_gen;
  748. if ((mhi_idx < IPA_MPM_MHIP_CH_ID_0) ||
  749. (mhi_idx >= IPA_MPM_MHIP_CH_ID_MAX))
  750. goto fail_gen;
  751. ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
  752. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  753. IPA_MPM_ERR("fail to find channel EP.\n");
  754. goto fail_gen;
  755. }
  756. ep = &ipa3_ctx->ep[ipa_ep_idx];
  757. if (ep->valid == 1) {
  758. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  759. return 0;
  760. }
  761. IPA_MPM_DBG("connecting client %d (ep: %d)\n", mhip_client, ipa_ep_idx);
  762. IPA_MPM_FUNC_ENTRY();
  763. if (IPA_MPM_RING_TOTAL_SIZE > PAGE_SIZE) {
  764. IPA_MPM_ERR("Ring Size / allocation mismatch\n");
  765. ipa_assert();
  766. }
  767. /* Only ring need alignment, separate from buffer */
  768. er_ring_va = (struct mhi_p_desc *) get_zeroed_page(GFP_KERNEL);
  769. if (!er_ring_va)
  770. goto fail_evt_alloc;
  771. tr_ring_va = (struct mhi_p_desc *) get_zeroed_page(GFP_KERNEL);
  772. if (!tr_ring_va)
  773. goto fail_tr_alloc;
  774. tr_ring_va[0].re_type = MHIP_RE_NOP;
  775. dir = IPA_CLIENT_IS_PROD(mhip_client) ?
  776. DMA_TO_HIPA : DMA_FROM_HIPA;
  777. /* allocate transfer ring elements */
  778. for (i = 1, k = 1; i < IPA_MPM_RING_LEN; i++, k++) {
  779. buff_va = kzalloc(TRE_BUFF_SIZE, GFP_KERNEL);
  780. if (!buff_va)
  781. goto fail_buff_alloc;
  782. tr_ring_va[i].buffer_ptr =
  783. ipa_mpm_smmu_map(buff_va, TRE_BUFF_SIZE, dir,
  784. &ap_cb_buff_iova);
  785. if (!tr_ring_va[i].buffer_ptr)
  786. goto fail_smmu_map_ring;
  787. tr_ring_va[i].buff_len = TRE_BUFF_SIZE;
  788. tr_ring_va[i].chain = 0;
  789. tr_ring_va[i].ieob = 0;
  790. tr_ring_va[i].ieot = 0;
  791. tr_ring_va[i].bei = 0;
  792. tr_ring_va[i].sct = 0;
  793. tr_ring_va[i].re_type = MHIP_RE_XFER;
  794. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  795. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[k] =
  796. buff_va;
  797. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[k]
  798. = tr_ring_va[i].buffer_ptr;
  799. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[k] =
  800. ap_cb_buff_iova;
  801. } else {
  802. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[k] =
  803. buff_va;
  804. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[k]
  805. = tr_ring_va[i].buffer_ptr;
  806. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[k] =
  807. ap_cb_buff_iova;
  808. }
  809. }
  810. tr_carved_iova = ipa_mpm_smmu_map(tr_ring_va, PAGE_SIZE, dir,
  811. &ap_cb_tr_iova);
  812. if (!tr_carved_iova)
  813. goto fail_smmu_map_ring;
  814. er_carved_iova = ipa_mpm_smmu_map(er_ring_va, PAGE_SIZE, dir,
  815. &ap_cb_er_iova);
  816. if (!er_carved_iova)
  817. goto fail_smmu_map_ring;
  818. /* Store Producer channel rings */
  819. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  820. /* Device UL */
  821. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = er_ring_va;
  822. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = tr_ring_va;
  823. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa = er_carved_iova;
  824. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa = tr_carved_iova;
  825. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr =
  826. ap_cb_tr_iova;
  827. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er =
  828. ap_cb_er_iova;
  829. } else {
  830. /* Host UL */
  831. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = er_ring_va;
  832. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = tr_ring_va;
  833. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = er_carved_iova;
  834. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = tr_carved_iova;
  835. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr =
  836. ap_cb_tr_iova;
  837. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er =
  838. ap_cb_er_iova;
  839. }
  840. memset(&gsi_params, 0, sizeof(struct ipa_request_gsi_channel_params));
  841. if (IPA_CLIENT_IS_PROD(mhip_client))
  842. gsi_params.ipa_ep_cfg =
  843. ipa_mpm_pipes[mhi_idx].dl_cons.ep_cfg;
  844. else
  845. gsi_params.ipa_ep_cfg =
  846. ipa_mpm_pipes[mhi_idx].ul_prod.ep_cfg;
  847. gsi_params.client = mhip_client;
  848. gsi_params.skip_ep_cfg = false;
  849. /*
  850. * RP update address = Device channel DB address
  851. * CLIENT_PROD -> Host DL
  852. * CLIENT_CONS -> Host UL
  853. */
  854. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  855. gsi_params.evt_ring_params.rp_update_addr =
  856. ipa_mpm_smmu_map_doorbell(
  857. MHIP_SMMU_DOMAIN_IPA,
  858. ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.device_db);
  859. if (gsi_params.evt_ring_params.rp_update_addr == 0)
  860. goto fail_smmu_map_db;
  861. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova =
  862. gsi_params.evt_ring_params.rp_update_addr;
  863. gsi_params.evt_ring_params.ring_base_addr =
  864. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
  865. gsi_params.chan_params.ring_base_addr =
  866. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
  867. } else {
  868. gsi_params.evt_ring_params.rp_update_addr =
  869. ipa_mpm_smmu_map_doorbell(
  870. MHIP_SMMU_DOMAIN_IPA,
  871. ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.device_db);
  872. if (gsi_params.evt_ring_params.rp_update_addr == 0)
  873. goto fail_smmu_map_db;
  874. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova =
  875. gsi_params.evt_ring_params.rp_update_addr;
  876. gsi_params.evt_ring_params.ring_base_addr =
  877. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
  878. gsi_params.chan_params.ring_base_addr =
  879. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
  880. }
  881. /* Fill Event ring params */
  882. gsi_params.evt_ring_params.intf = GSI_EVT_CHTYPE_MHIP_EV;
  883. gsi_params.evt_ring_params.intr = GSI_INTR_MSI;
  884. gsi_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
  885. gsi_params.evt_ring_params.ring_len =
  886. (IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
  887. gsi_params.evt_ring_params.ring_base_vaddr = NULL;
  888. gsi_params.evt_ring_params.int_modt = 0;
  889. gsi_params.evt_ring_params.int_modc = 0;
  890. gsi_params.evt_ring_params.intvec = 0;
  891. gsi_params.evt_ring_params.msi_addr = 0;
  892. gsi_params.evt_ring_params.exclusive = true;
  893. gsi_params.evt_ring_params.err_cb = ipa_mpm_gsi_evt_ring_err_cb;
  894. gsi_params.evt_ring_params.user_data = NULL;
  895. /* Evt Scratch Params */
  896. /* Disable the Moderation for ringing doorbells */
  897. gsi_params.evt_scratch.mhip.rp_mod_threshold = 1;
  898. gsi_params.evt_scratch.mhip.rp_mod_timer = 0;
  899. gsi_params.evt_scratch.mhip.rp_mod_counter = 0;
  900. gsi_params.evt_scratch.mhip.rp_mod_timer_id = 0;
  901. gsi_params.evt_scratch.mhip.rp_mod_timer_running = 0;
  902. gsi_params.evt_scratch.mhip.fixed_buffer_sz = TRE_BUFF_SIZE;
  903. if (IPA_CLIENT_IS_PROD(mhip_client))
  904. gsi_params.evt_scratch.mhip.rp_mod_threshold = 4;
  905. /* Channel Params */
  906. gsi_params.chan_params.prot = GSI_CHAN_PROT_MHIP;
  907. gsi_params.chan_params.dir = IPA_CLIENT_IS_PROD(mhip_client) ?
  908. GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
  909. /* chan_id is set in ipa3_request_gsi_channel() */
  910. gsi_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
  911. gsi_params.chan_params.ring_len =
  912. (IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
  913. gsi_params.chan_params.ring_base_vaddr = NULL;
  914. gsi_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
  915. gsi_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
  916. gsi_params.chan_params.low_weight = 1;
  917. gsi_params.chan_params.xfer_cb = NULL;
  918. gsi_params.chan_params.err_cb = ipa_mpm_gsi_chan_err_cb;
  919. gsi_params.chan_params.chan_user_data = NULL;
  920. /* Channel scratch */
  921. gsi_params.chan_scratch.mhip.assert_bit_40 = 0;
  922. gsi_params.chan_scratch.mhip.host_channel = 1;
  923. res = ipa3_request_gsi_channel(&gsi_params, out_params);
  924. if (res) {
  925. IPA_MPM_ERR("failed to allocate GSI channel res=%d\n", res);
  926. goto fail_alloc_channel;
  927. }
  928. if (IPA_CLIENT_IS_CONS(mhip_client)) {
  929. /*
  930. * Enable HOLB timer one time after bootup/SSR.
  931. * The HOLB timeout drops the packets on MHIP if
  932. * there is a stall on MHIP TX pipe greater than
  933. * configured timeout.
  934. */
  935. result = ipa_mpm_start_mhip_holb_tmo(ipa_ep_idx);
  936. if (result) {
  937. IPA_MPM_ERR("HOLB config failed for %d, fail = %d\n",
  938. ipa_ep_idx, result);
  939. goto fail_alloc_channel;
  940. }
  941. }
  942. if (IPA_CLIENT_IS_PROD(mhip_client))
  943. ipa_mpm_change_gsi_state(mhi_idx,
  944. IPA_MPM_MHIP_CHAN_DL,
  945. GSI_ALLOCATED);
  946. else
  947. ipa_mpm_change_gsi_state(mhi_idx,
  948. IPA_MPM_MHIP_CHAN_UL,
  949. GSI_ALLOCATED);
  950. result = ipa3_start_gsi_channel(ipa_ep_idx);
  951. if (result) {
  952. IPA_MPM_ERR("start MHIP channel %d failed\n", mhip_client);
  953. if (IPA_CLIENT_IS_PROD(mhip_client))
  954. ipa_mpm_change_gsi_state(mhi_idx,
  955. IPA_MPM_MHIP_CHAN_DL, GSI_ERR);
  956. else
  957. ipa_mpm_change_gsi_state(mhi_idx,
  958. IPA_MPM_MHIP_CHAN_UL, GSI_ERR);
  959. goto fail_start_channel;
  960. }
  961. if (IPA_CLIENT_IS_PROD(mhip_client))
  962. ipa_mpm_change_gsi_state(mhi_idx,
  963. IPA_MPM_MHIP_CHAN_DL, GSI_STARTED);
  964. else
  965. ipa_mpm_change_gsi_state(mhi_idx,
  966. IPA_MPM_MHIP_CHAN_UL, GSI_STARTED);
  967. /* Fill in the Device Context params */
  968. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  969. /* This is the DL channel :: Device -> Host */
  970. ipa_mpm_ctx->md[mhi_idx].dl_cons.evt_props.ev_ctx.rbase =
  971. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
  972. ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.ch_ctx.rbase =
  973. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
  974. } else {
  975. ipa_mpm_ctx->md[mhi_idx].ul_prod.evt_props.ev_ctx.rbase =
  976. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
  977. ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.ch_ctx.rbase =
  978. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
  979. }
  980. IPA_MPM_FUNC_EXIT();
  981. return 0;
  982. fail_start_channel:
  983. ipa3_disable_data_path(ipa_ep_idx);
  984. ipa3_stop_gsi_channel(ipa_ep_idx);
  985. fail_alloc_channel:
  986. ipa3_release_gsi_channel(ipa_ep_idx);
  987. fail_smmu_map_db:
  988. fail_smmu_map_ring:
  989. fail_tr_alloc:
  990. fail_evt_alloc:
  991. fail_buff_alloc:
  992. ipa_assert();
  993. fail_gen:
  994. return -EFAULT;
  995. }
  996. static void ipa_mpm_clean_mhip_chan(int mhi_idx,
  997. enum ipa_client_type mhip_client)
  998. {
  999. int dir;
  1000. int i;
  1001. int ipa_ep_idx;
  1002. int result;
  1003. IPA_MPM_FUNC_ENTRY();
  1004. if (mhip_client == IPA_CLIENT_MAX)
  1005. return;
  1006. if ((mhi_idx < IPA_MPM_MHIP_CH_ID_0) ||
  1007. (mhi_idx >= IPA_MPM_MHIP_CH_ID_MAX))
  1008. return;
  1009. dir = IPA_CLIENT_IS_PROD(mhip_client) ?
  1010. DMA_TO_HIPA : DMA_FROM_HIPA;
  1011. ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
  1012. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1013. IPA_MPM_ERR("fail to find channel EP.\n");
  1014. return;
  1015. }
  1016. /* For the uplink channels, enable HOLB. */
  1017. if (IPA_CLIENT_IS_CONS(mhip_client))
  1018. ipa3_disable_data_path(ipa_ep_idx);
  1019. /* Release channel */
  1020. result = ipa3_stop_gsi_channel(ipa_ep_idx);
  1021. if (result) {
  1022. IPA_MPM_ERR("Stop channel for MHIP_Client = %d failed\n",
  1023. mhip_client);
  1024. goto fail_chan;
  1025. }
  1026. result = ipa3_reset_gsi_channel(ipa_ep_idx);
  1027. if (result) {
  1028. IPA_MPM_ERR("Reset channel for MHIP_Client = %d failed\n",
  1029. mhip_client);
  1030. goto fail_chan;
  1031. }
  1032. result = ipa3_reset_gsi_event_ring(ipa_ep_idx);
  1033. if (result) {
  1034. IPA_MPM_ERR("Reset ev ring for MHIP_Client = %d failed\n",
  1035. mhip_client);
  1036. goto fail_chan;
  1037. }
  1038. result = ipa3_release_gsi_channel(ipa_ep_idx);
  1039. if (result) {
  1040. IPA_MPM_ERR("Release tr ring for MHIP_Client = %d failed\n",
  1041. mhip_client);
  1042. if (IPA_CLIENT_IS_PROD(mhip_client))
  1043. ipa_mpm_change_gsi_state(mhi_idx,
  1044. IPA_MPM_MHIP_CHAN_DL, GSI_ERR);
  1045. else
  1046. ipa_mpm_change_gsi_state(mhi_idx,
  1047. IPA_MPM_MHIP_CHAN_UL, GSI_ERR);
  1048. goto fail_chan;
  1049. }
  1050. if (IPA_CLIENT_IS_PROD(mhip_client))
  1051. ipa_mpm_change_gsi_state(mhi_idx,
  1052. IPA_MPM_MHIP_CHAN_DL, GSI_INIT);
  1053. else
  1054. ipa_mpm_change_gsi_state(mhi_idx,
  1055. IPA_MPM_MHIP_CHAN_UL, GSI_INIT);
  1056. memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
  1057. /* Unmap Doorbells */
  1058. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  1059. ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_PCIE,
  1060. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_device_iova);
  1061. ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_IPA,
  1062. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova);
  1063. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova = 0;
  1064. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_device_iova = 0;
  1065. } else {
  1066. ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_PCIE,
  1067. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_device_iova);
  1068. ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_IPA,
  1069. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova);
  1070. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova = 0;
  1071. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_device_iova = 0;
  1072. }
  1073. /* deallocate/Unmap transfer ring buffers */
  1074. for (i = 1; i < IPA_MPM_RING_LEN; i++) {
  1075. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  1076. ipa_mpm_smmu_unmap(
  1077. (dma_addr_t)
  1078. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i],
  1079. TRE_BUFF_SIZE, dir,
  1080. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]);
  1081. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i]
  1082. = 0;
  1083. kfree(
  1084. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[i]);
  1085. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[i]
  1086. = NULL;
  1087. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]
  1088. = 0;
  1089. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i]
  1090. = 0;
  1091. } else {
  1092. ipa_mpm_smmu_unmap(
  1093. (dma_addr_t)
  1094. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i],
  1095. TRE_BUFF_SIZE, dir,
  1096. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
  1097. );
  1098. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i]
  1099. = 0;
  1100. kfree(
  1101. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[i]);
  1102. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[i]
  1103. = NULL;
  1104. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
  1105. = 0;
  1106. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i]
  1107. = 0;
  1108. }
  1109. }
  1110. /* deallocate/Unmap rings */
  1111. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  1112. ipa_mpm_smmu_unmap(
  1113. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa,
  1114. IPA_MPM_PAGE_SIZE, dir,
  1115. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er);
  1116. ipa_mpm_smmu_unmap(
  1117. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa,
  1118. IPA_MPM_PAGE_SIZE, dir,
  1119. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
  1120. if (ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va) {
  1121. free_page((unsigned long)
  1122. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va);
  1123. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = NULL;
  1124. }
  1125. if (ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va) {
  1126. free_page((unsigned long)
  1127. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va);
  1128. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = NULL;
  1129. }
  1130. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er = 0;
  1131. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr = 0;
  1132. } else {
  1133. ipa_mpm_smmu_unmap(
  1134. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa,
  1135. IPA_MPM_PAGE_SIZE, dir,
  1136. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
  1137. ipa_mpm_smmu_unmap(
  1138. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa,
  1139. IPA_MPM_PAGE_SIZE, dir,
  1140. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er);
  1141. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = 0;
  1142. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = 0;
  1143. if (ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va) {
  1144. free_page((unsigned long)
  1145. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va);
  1146. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = NULL;
  1147. }
  1148. if (ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va) {
  1149. free_page((unsigned long)
  1150. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va);
  1151. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = NULL;
  1152. }
  1153. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er = 0;
  1154. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr = 0;
  1155. }
  1156. IPA_MPM_FUNC_EXIT();
  1157. return;
  1158. fail_chan:
  1159. ipa_assert();
  1160. }
  1161. /* round addresses for closest page per SMMU requirements */
  1162. static inline void ipa_mpm_smmu_round_to_page(uint64_t iova, uint64_t pa,
  1163. uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p)
  1164. {
  1165. *iova_p = rounddown(iova, PAGE_SIZE);
  1166. *pa_p = rounddown(pa, PAGE_SIZE);
  1167. *size_p = roundup(size + pa - *pa_p, PAGE_SIZE);
  1168. }
  1169. static int __ipa_mpm_configure_mhi_device(struct ipa_mpm_channel *ch,
  1170. int mhi_idx, int dir)
  1171. {
  1172. struct mhi_buf ch_config[2];
  1173. int ret;
  1174. IPA_MPM_FUNC_ENTRY();
  1175. if (ch == NULL) {
  1176. IPA_MPM_ERR("ch config is NULL\n");
  1177. return -EINVAL;
  1178. }
  1179. /* Populate CCA */
  1180. ch_config[0].buf = &ch->chan_props.ch_ctx;
  1181. ch_config[0].len = sizeof(ch->chan_props.ch_ctx);
  1182. ch_config[0].name = "CCA";
  1183. /* populate ECA */
  1184. ch_config[1].buf = &ch->evt_props.ev_ctx;
  1185. ch_config[1].len = sizeof(ch->evt_props.ev_ctx);
  1186. ch_config[1].name = "ECA";
  1187. IPA_MPM_DBG("Configuring MHI PRIME device for mhi_idx %d\n", mhi_idx);
  1188. ret = mhi_device_configure(ipa_mpm_ctx->md[mhi_idx].mhi_dev, dir,
  1189. ch_config, 2);
  1190. if (ret) {
  1191. IPA_MPM_ERR("mhi_device_configure failed\n");
  1192. return -EINVAL;
  1193. }
  1194. IPA_MPM_FUNC_EXIT();
  1195. return 0;
  1196. }
  1197. static void ipa_mpm_mhip_shutdown(int mhip_idx)
  1198. {
  1199. enum ipa_client_type ul_prod_chan, dl_cons_chan;
  1200. IPA_MPM_FUNC_ENTRY();
  1201. get_ipa3_client(mhip_idx, &ul_prod_chan, &dl_cons_chan);
  1202. if (mhip_idx != IPA_MPM_MHIP_CH_ID_2)
  1203. /* For DPL, stop only DL channel */
  1204. ipa_mpm_clean_mhip_chan(mhip_idx, ul_prod_chan);
  1205. ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
  1206. if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
  1207. ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
  1208. /* while in modem shutdown scenarios such as SSR, no explicit
  1209. * PCIe vote is needed.
  1210. */
  1211. ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
  1212. }
  1213. mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
  1214. ipa_mpm_ctx->md[mhip_idx].mhi_dev = NULL;
  1215. mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
  1216. IPA_MPM_FUNC_EXIT();
  1217. }
  1218. /**
  1219. * @ipa_mpm_vote_unvote_pcie_clk - Vote/Unvote PCIe Clock per probe_id
  1220. * Returns if success or failure.
  1221. * @ipa_mpm_clk_vote_type - Vote or Unvote for PCIe Clock
  1222. * @probe_id - MHI probe_id per client.
  1223. * @is_force - Forcebly casts vote - should be true only in probe.
  1224. * @is_acted - Output param - This indicates the clk is actually voted or not
  1225. * The flag output is checked only when we vote for clocks.
  1226. * Return value: PCIe clock voting is success or failure.
  1227. */
  1228. static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
  1229. int probe_id,
  1230. bool is_force,
  1231. bool *is_acted)
  1232. {
  1233. int result = 0;
  1234. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1235. IPA_MPM_ERR("probe_id not found\n");
  1236. return -EINVAL;
  1237. }
  1238. if (vote > CLK_OFF) {
  1239. IPA_MPM_ERR("Invalid vote\n");
  1240. return -EINVAL;
  1241. }
  1242. if (!is_acted) {
  1243. IPA_MPM_ERR("Invalid clk_vote ptr\n");
  1244. return -EFAULT;
  1245. }
  1246. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1247. if (ipa_mpm_ctx->md[probe_id].mhi_dev == NULL) {
  1248. IPA_MPM_ERR("MHI not initialized yet\n");
  1249. *is_acted = false;
  1250. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1251. return 0;
  1252. }
  1253. if (!ipa_mpm_ctx->md[probe_id].init_complete &&
  1254. !is_force) {
  1255. /*
  1256. * SSR might be in progress, dont have to vote/unvote for
  1257. * IPA clocks as it will be taken care in remove_cb/subsequent
  1258. * probe.
  1259. */
  1260. IPA_MPM_DBG("SSR in progress, return\n");
  1261. *is_acted = false;
  1262. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1263. return 0;
  1264. }
  1265. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1266. IPA_MPM_DBG("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
  1267. vote, probe_id,
  1268. atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt));
  1269. if (vote == CLK_ON) {
  1270. result = mhi_device_get_sync(
  1271. ipa_mpm_ctx->md[probe_id].mhi_dev,
  1272. MHI_VOTE_BUS | MHI_VOTE_DEVICE);
  1273. if (result) {
  1274. IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
  1275. result, probe_id);
  1276. *is_acted = false;
  1277. return result;
  1278. }
  1279. IPA_MPM_DBG("probe_id %d PCIE clock now ON\n", probe_id);
  1280. atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
  1281. atomic_inc(&ipa_mpm_ctx->pcie_clk_total_cnt);
  1282. } else {
  1283. if ((atomic_read(
  1284. &ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt)
  1285. == 0)) {
  1286. IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
  1287. probe_id);
  1288. WARN_ON(1);
  1289. *is_acted = true;
  1290. return 0;
  1291. }
  1292. mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev,
  1293. MHI_VOTE_BUS | MHI_VOTE_DEVICE);
  1294. IPA_MPM_DBG("probe_id %d PCIE clock off\n", probe_id);
  1295. atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
  1296. atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
  1297. }
  1298. *is_acted = true;
  1299. return result;
  1300. }
  1301. /*
  1302. * Turning on/OFF IPA Clock is done only once- for all clients
  1303. */
  1304. static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
  1305. int probe_id)
  1306. {
  1307. if (vote > CLK_OFF)
  1308. return;
  1309. IPA_MPM_DBG("IPA clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
  1310. vote, probe_id,
  1311. atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt));
  1312. if (vote == CLK_ON) {
  1313. IPA_ACTIVE_CLIENTS_INC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
  1314. IPA_MPM_DBG("IPA clock now ON for probe_id %d\n", probe_id);
  1315. atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
  1316. atomic_inc(&ipa_mpm_ctx->ipa_clk_total_cnt);
  1317. } else {
  1318. if ((atomic_read
  1319. (&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt)
  1320. == 0)) {
  1321. IPA_MPM_DBG("probe_id %d IPA clock count < 0\n",
  1322. probe_id);
  1323. WARN_ON(1);
  1324. return;
  1325. }
  1326. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
  1327. IPA_MPM_DBG("probe_id %d IPA clock off\n", probe_id);
  1328. atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
  1329. atomic_dec(&ipa_mpm_ctx->ipa_clk_total_cnt);
  1330. }
  1331. }
  1332. /**
  1333. * @ipa_mpm_start_stop_remote_mhip_chan - Start/Stop Remote device side MHIP
  1334. * channels.
  1335. * @ipa_mpm_clk_vote_type - Vote or Unvote for PCIe Clock
  1336. * @probe_id - MHI probe_id per client.
  1337. * @ipa_mpm_start_stop_type - Start/Stop remote channels.
  1338. * @is_force - Forcebly casts remote channels to be started/stopped.
  1339. * should be true only in probe.
  1340. * Return value: 0 if success or error value.
  1341. */
  1342. static int ipa_mpm_start_stop_remote_mhip_chan(
  1343. int probe_id,
  1344. enum ipa_mpm_start_stop_type start_stop,
  1345. bool is_force)
  1346. {
  1347. int ret = 0;
  1348. struct mhi_device *mhi_dev = ipa_mpm_ctx->md[probe_id].mhi_dev;
  1349. /* Sanity check to make sure Remote channels can be started.
  1350. * If probe in progress, mhi_prepare_for_transfer will start
  1351. * the remote channels so no need to start it from here.
  1352. */
  1353. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1354. if (!ipa_mpm_ctx->md[probe_id].init_complete && !is_force) {
  1355. IPA_MPM_ERR("MHI not initialized yet, probe in progress\n");
  1356. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1357. return ret;
  1358. }
  1359. /* For error state, expect modem SSR to recover from error */
  1360. if (ipa_mpm_ctx->md[probe_id].remote_state == MPM_MHIP_REMOTE_ERR) {
  1361. IPA_MPM_ERR("Remote channels in err state for %d\n", probe_id);
  1362. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1363. return -EFAULT;
  1364. }
  1365. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1366. if (start_stop == MPM_MHIP_START) {
  1367. if (ipa_mpm_ctx->md[probe_id].remote_state ==
  1368. MPM_MHIP_REMOTE_START) {
  1369. IPA_MPM_DBG("Remote channel already started for %d\n",
  1370. probe_id);
  1371. } else {
  1372. ret = mhi_resume_transfer(mhi_dev);
  1373. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1374. if (ret)
  1375. ipa_mpm_ctx->md[probe_id].remote_state =
  1376. MPM_MHIP_REMOTE_ERR;
  1377. else
  1378. ipa_mpm_ctx->md[probe_id].remote_state =
  1379. MPM_MHIP_REMOTE_START;
  1380. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1381. }
  1382. } else {
  1383. if (ipa_mpm_ctx->md[probe_id].remote_state ==
  1384. MPM_MHIP_REMOTE_STOP) {
  1385. IPA_MPM_DBG("Remote channel already stopped for %d\n",
  1386. probe_id);
  1387. } else {
  1388. ret = mhi_pause_transfer(mhi_dev);
  1389. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1390. if (ret)
  1391. ipa_mpm_ctx->md[probe_id].remote_state =
  1392. MPM_MHIP_REMOTE_ERR;
  1393. else
  1394. ipa_mpm_ctx->md[probe_id].remote_state =
  1395. MPM_MHIP_REMOTE_STOP;
  1396. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1397. }
  1398. }
  1399. return ret;
  1400. }
  1401. static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
  1402. enum ipa_mpm_mhip_chan mhip_chan,
  1403. int probe_id,
  1404. enum ipa_mpm_start_stop_type start_stop)
  1405. {
  1406. int ipa_ep_idx;
  1407. struct ipa3_ep_context *ep;
  1408. bool is_start;
  1409. enum ipa_client_type ul_chan, dl_chan;
  1410. u32 source_pipe_bitmask = 0;
  1411. enum gsi_status gsi_res = GSI_STATUS_SUCCESS;
  1412. int result;
  1413. IPA_MPM_FUNC_ENTRY();
  1414. if (mhip_chan > IPA_MPM_MHIP_CHAN_BOTH) {
  1415. IPA_MPM_ERR("MHI not initialized yet\n");
  1416. return MHIP_STATUS_FAIL;
  1417. }
  1418. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1419. IPA_MPM_ERR("MHI not initialized yet\n");
  1420. return MHIP_STATUS_FAIL;
  1421. }
  1422. get_ipa3_client(probe_id, &ul_chan, &dl_chan);
  1423. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1424. ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
  1425. } else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1426. ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
  1427. } else if (mhip_chan == IPA_MPM_MHIP_CHAN_BOTH) {
  1428. ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
  1429. ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
  1430. }
  1431. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1432. IPA_MPM_ERR("fail to get EP# for idx %d\n", ipa_ep_idx);
  1433. return MHIP_STATUS_EP_NOT_FOUND;
  1434. }
  1435. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1436. if (!ipa_mpm_ctx->md[probe_id].init_complete) {
  1437. IPA_MPM_ERR("MHIP probe %d not initialized\n", probe_id);
  1438. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1439. return MHIP_STATUS_EP_NOT_READY;
  1440. }
  1441. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1442. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1443. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1444. IPA_MPM_DBG("current GSI state = %d, action = %d\n",
  1445. ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state,
  1446. start_stop);
  1447. if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state <
  1448. GSI_ALLOCATED) {
  1449. IPA_MPM_ERR("GSI chan is not allocated yet\n");
  1450. return MHIP_STATUS_EP_NOT_READY;
  1451. }
  1452. } else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1453. IPA_MPM_DBG("current GSI state = %d, action = %d\n",
  1454. ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state,
  1455. start_stop);
  1456. if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state <
  1457. GSI_ALLOCATED) {
  1458. IPA_MPM_ERR("GSI chan is not allocated yet\n");
  1459. return MHIP_STATUS_EP_NOT_READY;
  1460. }
  1461. }
  1462. is_start = (start_stop == MPM_MHIP_START) ? true : false;
  1463. if (is_start) {
  1464. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1465. if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state ==
  1466. GSI_STARTED) {
  1467. IPA_MPM_ERR("GSI chan is already started\n");
  1468. return MHIP_STATUS_NO_OP;
  1469. }
  1470. }
  1471. if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1472. if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state ==
  1473. GSI_STARTED) {
  1474. IPA_MPM_ERR("GSI chan is already started\n");
  1475. return MHIP_STATUS_NO_OP;
  1476. }
  1477. }
  1478. /* Start GSI channel */
  1479. gsi_res = ipa3_start_gsi_channel(ipa_ep_idx);
  1480. if (gsi_res != GSI_STATUS_SUCCESS) {
  1481. IPA_MPM_ERR("Error starting channel: err = %d\n",
  1482. gsi_res);
  1483. goto gsi_chan_fail;
  1484. } else {
  1485. ipa_mpm_change_gsi_state(probe_id, mhip_chan,
  1486. GSI_STARTED);
  1487. }
  1488. } else {
  1489. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1490. if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state ==
  1491. GSI_STOPPED) {
  1492. IPA_MPM_ERR("GSI chan is already stopped\n");
  1493. return MHIP_STATUS_NO_OP;
  1494. } else if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state
  1495. != GSI_STARTED) {
  1496. IPA_MPM_ERR("GSI chan isn't already started\n");
  1497. return MHIP_STATUS_NO_OP;
  1498. }
  1499. }
  1500. if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1501. if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state ==
  1502. GSI_STOPPED) {
  1503. IPA_MPM_ERR("GSI chan is already stopped\n");
  1504. return MHIP_STATUS_NO_OP;
  1505. } else if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state
  1506. != GSI_STARTED) {
  1507. IPA_MPM_ERR("GSI chan isn't already started\n");
  1508. return MHIP_STATUS_NO_OP;
  1509. }
  1510. }
  1511. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1512. source_pipe_bitmask = 1 <<
  1513. ipa3_get_ep_mapping(ep->client);
  1514. /* First Stop UL GSI channel before unvote PCIe clock */
  1515. result = ipa3_stop_gsi_channel(ipa_ep_idx);
  1516. if (result) {
  1517. IPA_MPM_ERR("UL chan stop failed\n");
  1518. goto gsi_chan_fail;
  1519. } else {
  1520. ipa_mpm_change_gsi_state(probe_id, mhip_chan,
  1521. GSI_STOPPED);
  1522. }
  1523. }
  1524. if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1525. result = ipa3_stop_gsi_channel(ipa_ep_idx);
  1526. if (result) {
  1527. IPA_MPM_ERR("Fail to stop DL channel\n");
  1528. goto gsi_chan_fail;
  1529. } else {
  1530. ipa_mpm_change_gsi_state(probe_id, mhip_chan,
  1531. GSI_STOPPED);
  1532. }
  1533. }
  1534. }
  1535. IPA_MPM_FUNC_EXIT();
  1536. return MHIP_STATUS_SUCCESS;
  1537. gsi_chan_fail:
  1538. ipa3_disable_data_path(ipa_ep_idx);
  1539. ipa_mpm_change_gsi_state(probe_id, mhip_chan, GSI_ERR);
  1540. ipa_assert();
  1541. return MHIP_STATUS_FAIL;
  1542. }
  1543. int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
  1544. {
  1545. int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
  1546. int i;
  1547. static enum mhip_status_type status;
  1548. int ret = 0;
  1549. enum ipa_mpm_mhip_client_type mhip_client = IPA_MPM_MHIP_TETH;
  1550. bool is_acted = true;
  1551. const struct ipa_gsi_ep_config *ep_cfg;
  1552. uint32_t flow_ctrl_mask = 0;
  1553. if (!state)
  1554. return -EPERM;
  1555. if (!ipa3_is_mhip_offload_enabled())
  1556. return -EPERM;
  1557. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  1558. if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
  1559. probe_id = i;
  1560. break;
  1561. }
  1562. }
  1563. if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
  1564. IPA_MPM_ERR("Unknown probe_id\n");
  1565. return -EPERM;
  1566. }
  1567. IPA_MPM_DBG("WAN backhaul available for probe_id = %d\n", probe_id);
  1568. if (state->up) {
  1569. /* Start UL MHIP channel for offloading tethering connection */
  1570. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id,
  1571. false, &is_acted);
  1572. if (ret) {
  1573. IPA_MPM_ERR("Err %d cloking on PCIe clk %d\n", ret);
  1574. return ret;
  1575. }
  1576. /*
  1577. * Make sure to start Device side channels before
  1578. * starting Host side UL channels. This is to make
  1579. * sure device side access host side only after
  1580. * Host IPA gets voted.
  1581. */
  1582. ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
  1583. MPM_MHIP_START,
  1584. false);
  1585. if (ret) {
  1586. /*
  1587. * This can fail only when modem is in SSR state.
  1588. * Eventually there would be a remove callback,
  1589. * so return a failure.
  1590. */
  1591. IPA_MPM_ERR("MHIP remote chan start fail = %d\n", ret);
  1592. if (is_acted)
  1593. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
  1594. probe_id,
  1595. false,
  1596. &is_acted);
  1597. return ret;
  1598. }
  1599. IPA_MPM_DBG("MHIP remote channels are started\n");
  1600. /*
  1601. * Update flow control monitoring end point info.
  1602. * This info will be used to set delay on the end points upon
  1603. * hitting RED water mark.
  1604. */
  1605. ep_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_WLAN2_PROD);
  1606. if (!ep_cfg)
  1607. IPA_MPM_ERR("ep = %d not allocated yet\n",
  1608. IPA_CLIENT_WLAN2_PROD);
  1609. else
  1610. flow_ctrl_mask |= 1 << (ep_cfg->ipa_gsi_chan_num);
  1611. ep_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_USB_PROD);
  1612. if (!ep_cfg)
  1613. IPA_MPM_ERR("ep = %d not allocated yet\n",
  1614. IPA_CLIENT_USB_PROD);
  1615. else
  1616. flow_ctrl_mask |= 1 << (ep_cfg->ipa_gsi_chan_num);
  1617. atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, flow_ctrl_mask);
  1618. ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
  1619. IPA_MPM_FLOW_CTRL_ADD);
  1620. if (ret)
  1621. IPA_MPM_ERR("Err = %d setting uc flow control\n", ret);
  1622. status = ipa_mpm_start_stop_mhip_chan(
  1623. IPA_MPM_MHIP_CHAN_UL, probe_id, MPM_MHIP_START);
  1624. switch (status) {
  1625. case MHIP_STATUS_SUCCESS:
  1626. ipa_mpm_ctx->md[probe_id].teth_state =
  1627. IPA_MPM_TETH_CONNECTED;
  1628. break;
  1629. case MHIP_STATUS_EP_NOT_READY:
  1630. case MHIP_STATUS_NO_OP:
  1631. IPA_MPM_DBG("UL chan already start, status = %d\n",
  1632. status);
  1633. if (is_acted) {
  1634. return ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
  1635. probe_id,
  1636. false,
  1637. &is_acted);
  1638. }
  1639. break;
  1640. case MHIP_STATUS_FAIL:
  1641. case MHIP_STATUS_BAD_STATE:
  1642. case MHIP_STATUS_EP_NOT_FOUND:
  1643. IPA_MPM_ERR("UL chan start err =%d\n", status);
  1644. if (is_acted)
  1645. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  1646. false, &is_acted);
  1647. ipa_assert();
  1648. return -EFAULT;
  1649. default:
  1650. IPA_MPM_ERR("Err not found\n");
  1651. if (is_acted)
  1652. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  1653. false, &is_acted);
  1654. ret = -EFAULT;
  1655. break;
  1656. }
  1657. ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
  1658. } else {
  1659. /*
  1660. * Update flow control monitoring end point info.
  1661. * This info will be used to reset delay on the end points.
  1662. */
  1663. flow_ctrl_mask =
  1664. atomic_read(&ipa_mpm_ctx->flow_ctrl_mask);
  1665. ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
  1666. IPA_MPM_FLOW_CTRL_DELETE);
  1667. flow_ctrl_mask = 0;
  1668. atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, 0);
  1669. if (ret) {
  1670. IPA_MPM_ERR("Err = %d resetting uc flow control\n",
  1671. ret);
  1672. ipa_assert();
  1673. }
  1674. /*
  1675. * Make sure to stop Device side channels before
  1676. * stopping Host side UL channels. This is to make
  1677. * sure device side doesn't access host IPA after
  1678. * Host IPA gets devoted.
  1679. */
  1680. ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
  1681. MPM_MHIP_STOP,
  1682. false);
  1683. if (ret) {
  1684. /*
  1685. * This can fail only when modem is in SSR state.
  1686. * Eventually there would be a remove callback,
  1687. * so return a failure.
  1688. */
  1689. IPA_MPM_ERR("MHIP remote chan stop fail = %d\n", ret);
  1690. return ret;
  1691. }
  1692. IPA_MPM_DBG("MHIP remote channels are stopped\n");
  1693. status = ipa_mpm_start_stop_mhip_chan(
  1694. IPA_MPM_MHIP_CHAN_UL, probe_id,
  1695. MPM_MHIP_STOP);
  1696. switch (status) {
  1697. case MHIP_STATUS_SUCCESS:
  1698. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
  1699. break;
  1700. case MHIP_STATUS_NO_OP:
  1701. case MHIP_STATUS_EP_NOT_READY:
  1702. IPA_MPM_DBG("UL chan already stop, status = %d\n",
  1703. status);
  1704. break;
  1705. case MHIP_STATUS_FAIL:
  1706. case MHIP_STATUS_BAD_STATE:
  1707. case MHIP_STATUS_EP_NOT_FOUND:
  1708. IPA_MPM_ERR("UL chan cant be stopped err =%d\n",
  1709. status);
  1710. ipa_assert();
  1711. return -EFAULT;
  1712. default:
  1713. IPA_MPM_ERR("Err not found\n");
  1714. return -EFAULT;
  1715. }
  1716. /* Stop UL MHIP channel for offloading tethering connection */
  1717. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  1718. false, &is_acted);
  1719. if (ret) {
  1720. IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n",
  1721. ret);
  1722. return ret;
  1723. }
  1724. ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
  1725. }
  1726. return ret;
  1727. }
  1728. static void ipa_mpm_change_gsi_state(int probe_id,
  1729. enum ipa_mpm_mhip_chan mhip_chan,
  1730. enum ipa_mpm_gsi_state next_state)
  1731. {
  1732. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX)
  1733. return;
  1734. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1735. mutex_lock(&ipa_mpm_ctx->md[probe_id].mutex);
  1736. ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state = next_state;
  1737. IPA_MPM_DBG("GSI next_state = %d\n",
  1738. ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state);
  1739. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mutex);
  1740. }
  1741. if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1742. mutex_lock(&ipa_mpm_ctx->md[probe_id].mutex);
  1743. ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state = next_state;
  1744. IPA_MPM_DBG("GSI next_state = %d\n",
  1745. ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state);
  1746. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mutex);
  1747. }
  1748. }
  1749. static void ipa_mpm_change_teth_state(int probe_id,
  1750. enum ipa_mpm_teth_state next_state)
  1751. {
  1752. enum ipa_mpm_teth_state curr_state;
  1753. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1754. IPA_MPM_ERR("Unknown probe_id\n");
  1755. return;
  1756. }
  1757. curr_state = ipa_mpm_ctx->md[probe_id].teth_state;
  1758. IPA_MPM_DBG("curr_state = %d, ip_state = %d mhip_s\n",
  1759. curr_state, next_state);
  1760. switch (curr_state) {
  1761. case IPA_MPM_TETH_INIT:
  1762. if (next_state == IPA_MPM_TETH_CONNECTED)
  1763. next_state = IPA_MPM_TETH_INPROGRESS;
  1764. break;
  1765. case IPA_MPM_TETH_INPROGRESS:
  1766. break;
  1767. case IPA_MPM_TETH_CONNECTED:
  1768. break;
  1769. default:
  1770. IPA_MPM_ERR("No change in state\n");
  1771. break;
  1772. }
  1773. ipa_mpm_ctx->md[probe_id].teth_state = next_state;
  1774. IPA_MPM_DBG("next_state = %d\n", next_state);
  1775. }
  1776. static void ipa_mpm_read_channel(enum ipa_client_type chan)
  1777. {
  1778. struct gsi_chan_info chan_info;
  1779. int ipa_ep_idx;
  1780. struct ipa3_ep_context *ep;
  1781. int res;
  1782. ipa_ep_idx = ipa3_get_ep_mapping(chan);
  1783. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1784. IPAERR("failed to get idx");
  1785. return;
  1786. }
  1787. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1788. IPA_MPM_DBG("Reading channel for chan %d, ep = %d, gsi_chan_hdl = %d\n",
  1789. chan, ep, ep->gsi_chan_hdl);
  1790. res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
  1791. if (res)
  1792. IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
  1793. }
  1794. /* ipa_mpm_mhi_probe_cb is received for each MHI'/MHI channel
  1795. * Currently we have 4 MHI channels.
  1796. */
  1797. static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
  1798. const struct mhi_device_id *mhi_id)
  1799. {
  1800. struct ipa_mpm_channel *ch;
  1801. int ret;
  1802. enum ipa_client_type ul_prod, dl_cons;
  1803. int probe_id;
  1804. struct ipa_req_chan_out_params ul_out_params, dl_out_params;
  1805. void __iomem *db_addr;
  1806. int ipa_ep_idx;
  1807. struct ipa3_ep_context *ep;
  1808. u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
  1809. u32 wp_addr;
  1810. int pipe_idx;
  1811. bool is_acted = true;
  1812. uint64_t flow_ctrl_mask = 0;
  1813. bool add_delete = false;
  1814. IPA_MPM_FUNC_ENTRY();
  1815. if (ipa_mpm_ctx == NULL) {
  1816. IPA_MPM_ERR("ipa_mpm_ctx is NULL not expected, returning..\n");
  1817. return -ENOMEM;
  1818. }
  1819. probe_id = get_idx_from_id(mhi_id);
  1820. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1821. IPA_MPM_ERR("chan=%pK is not supported for now\n", mhi_id);
  1822. return -EPERM;
  1823. }
  1824. if (ipa_mpm_ctx->md[probe_id].init_complete) {
  1825. IPA_MPM_ERR("Probe initialization already done, returning\n");
  1826. return 0;
  1827. }
  1828. IPA_MPM_DBG("Received probe for id=%d\n", probe_id);
  1829. get_ipa3_client(probe_id, &ul_prod, &dl_cons);
  1830. /* Vote for IPA clock for first time in initialization seq.
  1831. * IPA clock will be devoted when MHI enters LPM
  1832. * PCIe clock will be voted / devoted with every channel probe
  1833. * we receive.
  1834. * ul_prod = Host -> Device
  1835. * dl_cons = Device -> Host
  1836. */
  1837. ipa_mpm_ctx->md[probe_id].mhi_dev = mhi_dev;
  1838. ipa_mpm_ctx->mhi_parent_dev =
  1839. ipa_mpm_ctx->md[probe_id].mhi_dev->dev.parent;
  1840. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1841. ipa_mpm_ctx->md[probe_id].remote_state = MPM_MHIP_REMOTE_STOP;
  1842. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1843. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id, true, &is_acted);
  1844. if (ret) {
  1845. IPA_MPM_ERR("Err %d voitng PCIe clocks\n", ret);
  1846. return -EPERM;
  1847. }
  1848. ipa_mpm_vote_unvote_ipa_clk(CLK_ON, probe_id);
  1849. ipa_mpm_ctx->md[probe_id].in_lpm = false;
  1850. IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
  1851. /*
  1852. * Set up MHI' pipes for Device IPA filling in
  1853. * Channel Context and Event Context.
  1854. * These params will be sent to Device side.
  1855. * UL CHAN = HOST -> Device
  1856. * DL CHAN = Device -> HOST
  1857. * per channel a TRE and EV is allocated.
  1858. * for a UL channel -
  1859. * IPA HOST PROD TRE -> IPA DEVICE CONS EV
  1860. * IPA HOST PROD EV -> IPA DEVICE CONS TRE
  1861. * for a DL channel -
  1862. * IPA Device PROD TRE -> IPA HOST CONS EV
  1863. * IPA Device PROD EV -> IPA HOST CONS TRE
  1864. */
  1865. if (ul_prod != IPA_CLIENT_MAX) {
  1866. /* store UL properties */
  1867. ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
  1868. /* Store Channel properties */
  1869. ch->chan_props.id = mhi_dev->ul_chan_id;
  1870. ch->chan_props.device_db =
  1871. ipa_mpm_ctx->dev_info.chdb_base +
  1872. ch->chan_props.id * 8;
  1873. /* Fill Channel Conext to be sent to Device side */
  1874. ch->chan_props.ch_ctx.chtype =
  1875. IPA_MPM_MHI_HOST_UL_CHANNEL;
  1876. ch->chan_props.ch_ctx.erindex =
  1877. mhi_dev->ul_event_id;
  1878. ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
  1879. GSI_EVT_RING_RE_SIZE_16B;
  1880. /* Store Event properties */
  1881. ch->evt_props.ev_ctx.update_rp_modc = 0;
  1882. ch->evt_props.ev_ctx.update_rp_intmodt = 0;
  1883. ch->evt_props.ev_ctx.ertype = 1;
  1884. ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
  1885. GSI_EVT_RING_RE_SIZE_16B;
  1886. ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
  1887. ch->evt_props.device_db =
  1888. ipa_mpm_ctx->dev_info.erdb_base +
  1889. ch->chan_props.ch_ctx.erindex * 8;
  1890. /* connect Host GSI pipes with MHI' protocol */
  1891. ret = ipa_mpm_connect_mhip_gsi_pipe(ul_prod,
  1892. probe_id, &ul_out_params);
  1893. if (ret) {
  1894. IPA_MPM_ERR("failed connecting MPM client %d\n",
  1895. ul_prod);
  1896. goto fail_gsi_setup;
  1897. }
  1898. ch->evt_props.ev_ctx.update_rp_addr =
  1899. ipa_mpm_smmu_map_doorbell(
  1900. MHIP_SMMU_DOMAIN_PCIE,
  1901. ul_out_params.db_reg_phs_addr_lsb);
  1902. if (ch->evt_props.ev_ctx.update_rp_addr == 0)
  1903. ipa_assert();
  1904. ipa_mpm_ctx->md[probe_id].ul_prod.db_device_iova =
  1905. ch->evt_props.ev_ctx.update_rp_addr;
  1906. ret = __ipa_mpm_configure_mhi_device(
  1907. ch, probe_id, DMA_TO_HIPA);
  1908. if (ret) {
  1909. IPA_MPM_ERR("configure_mhi_dev fail %d\n",
  1910. ret);
  1911. goto fail_smmu;
  1912. }
  1913. }
  1914. if (dl_cons != IPA_CLIENT_MAX) {
  1915. /* store DL channel properties */
  1916. ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
  1917. /* Store Channel properties */
  1918. ch->chan_props.id = mhi_dev->dl_chan_id;
  1919. ch->chan_props.device_db =
  1920. ipa_mpm_ctx->dev_info.chdb_base +
  1921. ch->chan_props.id * 8;
  1922. /* Fill Channel Conext to be be sent to Dev side */
  1923. ch->chan_props.ch_ctx.chstate = 1;
  1924. ch->chan_props.ch_ctx.chtype =
  1925. IPA_MPM_MHI_HOST_DL_CHANNEL;
  1926. ch->chan_props.ch_ctx.erindex = mhi_dev->dl_event_id;
  1927. ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
  1928. GSI_EVT_RING_RE_SIZE_16B;
  1929. /* Store Event properties */
  1930. ch->evt_props.ev_ctx.update_rp_modc = 0;
  1931. ch->evt_props.ev_ctx.update_rp_intmodt = 0;
  1932. ch->evt_props.ev_ctx.ertype = 1;
  1933. ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
  1934. GSI_EVT_RING_RE_SIZE_16B;
  1935. ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
  1936. ch->evt_props.device_db =
  1937. ipa_mpm_ctx->dev_info.erdb_base +
  1938. ch->chan_props.ch_ctx.erindex * 8;
  1939. /* connect Host GSI pipes with MHI' protocol */
  1940. ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons,
  1941. probe_id, &dl_out_params);
  1942. if (ret) {
  1943. IPA_MPM_ERR("connecting MPM client = %d failed\n",
  1944. dl_cons);
  1945. goto fail_gsi_setup;
  1946. }
  1947. ch->evt_props.ev_ctx.update_rp_addr =
  1948. ipa_mpm_smmu_map_doorbell(
  1949. MHIP_SMMU_DOMAIN_PCIE,
  1950. dl_out_params.db_reg_phs_addr_lsb);
  1951. if (ch->evt_props.ev_ctx.update_rp_addr == 0)
  1952. ipa_assert();
  1953. ipa_mpm_ctx->md[probe_id].dl_cons.db_device_iova =
  1954. ch->evt_props.ev_ctx.update_rp_addr;
  1955. ret = __ipa_mpm_configure_mhi_device(ch, probe_id,
  1956. DMA_FROM_HIPA);
  1957. if (ret) {
  1958. IPA_MPM_ERR("mpm_config_mhi_dev failed %d\n", ret);
  1959. goto fail_smmu;
  1960. }
  1961. }
  1962. ret = mhi_prepare_for_transfer(ipa_mpm_ctx->md[probe_id].mhi_dev);
  1963. if (ret) {
  1964. IPA_MPM_ERR("mhi_prepare_for_transfer failed %d\n", ret);
  1965. WARN_ON(1);
  1966. /*
  1967. * WA to handle prepare_for_tx failures.
  1968. * Though prepare for transfer fails, indicate success
  1969. * to MHI driver. remove_cb will be called eventually when
  1970. * Device side comes from where pending cleanup happens.
  1971. */
  1972. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1973. atomic_inc(&ipa_mpm_ctx->probe_cnt);
  1974. ipa_mpm_ctx->md[probe_id].init_complete = false;
  1975. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1976. IPA_MPM_FUNC_EXIT();
  1977. return 0;
  1978. }
  1979. /* mhi_prepare_for_transfer translates to starting remote channels */
  1980. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1981. ipa_mpm_ctx->md[probe_id].remote_state = MPM_MHIP_REMOTE_START;
  1982. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  1983. /*
  1984. * Ring initial channel db - Host Side UL and Device side DL channel.
  1985. * To ring doorbell, write "WP" into doorbell register.
  1986. * This WP should be set to 1 element less than ring max.
  1987. */
  1988. /* Ring UL PRODUCER TRANSFER RING (HOST IPA -> DEVICE IPA) Doorbell */
  1989. if (ul_prod != IPA_CLIENT_MAX) {
  1990. IPA_MPM_DBG("Host UL TR PA DB = 0X%0x\n",
  1991. ul_out_params.db_reg_phs_addr_lsb);
  1992. db_addr = ioremap(
  1993. (phys_addr_t)(ul_out_params.db_reg_phs_addr_lsb), 4);
  1994. wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
  1995. ((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
  1996. iowrite32(wp_addr, db_addr);
  1997. IPA_MPM_DBG("Host UL TR DB = 0X%pK, wp_addr = 0X%0x",
  1998. db_addr, wp_addr);
  1999. iounmap(db_addr);
  2000. ipa_mpm_read_channel(ul_prod);
  2001. /* Ring UL PRODUCER EVENT RING (HOST IPA -> DEVICE IPA) Doorbell
  2002. * Ring the event DB to a value outside the
  2003. * ring range such that rp and wp never meet.
  2004. */
  2005. ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
  2006. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  2007. IPA_MPM_ERR("fail to alloc EP.\n");
  2008. goto fail_start_channel;
  2009. }
  2010. ep = &ipa3_ctx->ep[ipa_ep_idx];
  2011. IPA_MPM_DBG("for ep_idx %d , gsi_evt_ring_hdl = %ld\n",
  2012. ipa_ep_idx, ep->gsi_evt_ring_hdl);
  2013. gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
  2014. &evt_ring_db_addr_low, &evt_ring_db_addr_high);
  2015. IPA_MPM_DBG("Host UL ER PA DB = 0X%0x\n",
  2016. evt_ring_db_addr_low);
  2017. db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
  2018. wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.er_pa +
  2019. ((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
  2020. IPA_MPM_DBG("Host UL ER DB = 0X%pK, wp_addr = 0X%0x",
  2021. db_addr, wp_addr);
  2022. iowrite32(wp_addr, db_addr);
  2023. iounmap(db_addr);
  2024. /* Ring DEVICE IPA DL CONSUMER Event Doorbell */
  2025. db_addr = ioremap((phys_addr_t)
  2026. (ipa_mpm_ctx->md[probe_id].ul_prod.evt_props.device_db),
  2027. 4);
  2028. wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
  2029. ((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
  2030. iowrite32(wp_addr, db_addr);
  2031. iounmap(db_addr);
  2032. }
  2033. /* Ring DL PRODUCER (DEVICE IPA -> HOST IPA) Doorbell */
  2034. if (dl_cons != IPA_CLIENT_MAX) {
  2035. db_addr = ioremap((phys_addr_t)
  2036. (ipa_mpm_ctx->md[probe_id].dl_cons.chan_props.device_db),
  2037. 4);
  2038. wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
  2039. ((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
  2040. IPA_MPM_DBG("Device DL TR DB = 0X%pK, wp_addr = 0X%0x",
  2041. db_addr, wp_addr);
  2042. iowrite32(wp_addr, db_addr);
  2043. iounmap(db_addr);
  2044. /*
  2045. * Ring event ring DB on Device side.
  2046. * ipa_mpm should ring the event DB to a value outside the
  2047. * ring range such that rp and wp never meet.
  2048. */
  2049. db_addr =
  2050. ioremap(
  2051. (phys_addr_t)
  2052. (ipa_mpm_ctx->md[probe_id].dl_cons.evt_props.device_db),
  2053. 4);
  2054. wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.er_pa +
  2055. ((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
  2056. iowrite32(wp_addr, db_addr);
  2057. IPA_MPM_DBG("Device UL ER DB = 0X%pK,wp_addr = 0X%0x",
  2058. db_addr, wp_addr);
  2059. iounmap(db_addr);
  2060. /* Ring DL EVENT RING CONSUMER (DEVICE IPA CONSUMER) Doorbell */
  2061. ipa_ep_idx = ipa3_get_ep_mapping(dl_cons);
  2062. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  2063. IPA_MPM_ERR("fail to alloc EP.\n");
  2064. goto fail_start_channel;
  2065. }
  2066. ep = &ipa3_ctx->ep[ipa_ep_idx];
  2067. gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
  2068. &evt_ring_db_addr_low, &evt_ring_db_addr_high);
  2069. IPA_MPM_DBG("Host DL ER PA DB = 0X%0x\n",
  2070. evt_ring_db_addr_low);
  2071. db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
  2072. wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
  2073. ((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
  2074. iowrite32(wp_addr, db_addr);
  2075. IPA_MPM_DBG("Host DL ER DB = 0X%pK, wp_addr = 0X%0x",
  2076. db_addr, wp_addr);
  2077. iounmap(db_addr);
  2078. }
  2079. /* Check if TETH connection is in progress.
  2080. * If teth isn't started by now, then Stop UL channel.
  2081. */
  2082. switch (ipa_mpm_ctx->md[probe_id].teth_state) {
  2083. case IPA_MPM_TETH_INIT:
  2084. /*
  2085. * Make sure to stop Device side channels before
  2086. * stopping Host side UL channels. This is to make
  2087. * sure Device side doesn't access host side IPA if
  2088. * Host IPA gets unvoted.
  2089. */
  2090. ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
  2091. MPM_MHIP_STOP, true);
  2092. if (ret) {
  2093. /*
  2094. * This can fail only when modem is in SSR.
  2095. * Eventually there would be a remove callback,
  2096. * so return a failure.
  2097. */
  2098. IPA_MPM_ERR("MHIP remote chan stop fail = %d\n", ret);
  2099. return ret;
  2100. }
  2101. if (ul_prod != IPA_CLIENT_MAX) {
  2102. /* No teth started yet, disable UL channel */
  2103. ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
  2104. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  2105. IPA_MPM_ERR("fail to alloc EP.\n");
  2106. goto fail_stop_channel;
  2107. }
  2108. ret = ipa3_stop_gsi_channel(ipa_ep_idx);
  2109. if (ret) {
  2110. IPA_MPM_ERR("MHIP Stop channel err = %d\n",
  2111. ret);
  2112. goto fail_stop_channel;
  2113. }
  2114. ipa_mpm_change_gsi_state(probe_id,
  2115. IPA_MPM_MHIP_CHAN_UL,
  2116. GSI_STOPPED);
  2117. }
  2118. if (is_acted)
  2119. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2120. true, &is_acted);
  2121. break;
  2122. case IPA_MPM_TETH_INPROGRESS:
  2123. case IPA_MPM_TETH_CONNECTED:
  2124. IPA_MPM_DBG("UL channel is already started, continue\n");
  2125. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
  2126. /* Lift the delay for rmnet USB prod pipe */
  2127. if (probe_id == IPA_MPM_MHIP_CH_ID_1) {
  2128. pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
  2129. ipa3_xdci_ep_delay_rm(pipe_idx);
  2130. }
  2131. break;
  2132. default:
  2133. IPA_MPM_DBG("No op for UL channel, in teth state = %d",
  2134. ipa_mpm_ctx->md[probe_id].teth_state);
  2135. break;
  2136. }
  2137. atomic_inc(&ipa_mpm_ctx->probe_cnt);
  2138. /* Check if ODL/USB DPL pipe is connected before probe */
  2139. if (probe_id == IPA_MPM_MHIP_CH_ID_2) {
  2140. if (ipa3_is_odl_connected())
  2141. ret = ipa_mpm_set_dma_mode(
  2142. IPA_CLIENT_MHI_PRIME_DPL_PROD,
  2143. IPA_CLIENT_ODL_DPL_CONS, false);
  2144. else if (atomic_read(&ipa_mpm_ctx->adpl_over_usb_available))
  2145. ret = ipa_mpm_set_dma_mode(
  2146. IPA_CLIENT_MHI_PRIME_DPL_PROD,
  2147. IPA_CLIENT_USB_DPL_CONS, false);
  2148. if (ret)
  2149. IPA_MPM_ERR("DPL DMA to ODL/USB failed, ret = %d\n",
  2150. ret);
  2151. }
  2152. mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  2153. ipa_mpm_ctx->md[probe_id].init_complete = true;
  2154. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
  2155. /* Update Flow control Monitoring, only for the teth UL Prod pipes */
  2156. if (probe_id == IPA_MPM_MHIP_CH_ID_0) {
  2157. ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
  2158. ep = &ipa3_ctx->ep[ipa_ep_idx];
  2159. ret = ipa3_uc_send_enable_flow_control(ep->gsi_chan_hdl,
  2160. IPA_MPM_RING_LEN / 4);
  2161. if (ret) {
  2162. IPA_MPM_ERR("Err %d flow control enable\n", ret);
  2163. goto fail_flow_control;
  2164. }
  2165. IPA_MPM_DBG("Flow Control enabled for %d", probe_id);
  2166. flow_ctrl_mask = atomic_read(&ipa_mpm_ctx->flow_ctrl_mask);
  2167. add_delete = flow_ctrl_mask > 0 ? 1 : 0;
  2168. ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
  2169. add_delete);
  2170. if (ret) {
  2171. IPA_MPM_ERR("Err %d flow control update\n", ret);
  2172. goto fail_flow_control;
  2173. }
  2174. IPA_MPM_DBG("Flow Control updated for %d", probe_id);
  2175. }
  2176. IPA_MPM_FUNC_EXIT();
  2177. return 0;
  2178. fail_gsi_setup:
  2179. fail_start_channel:
  2180. fail_stop_channel:
  2181. fail_smmu:
  2182. fail_flow_control:
  2183. if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
  2184. IPA_MPM_DBG("SMMU failed\n");
  2185. if (is_acted)
  2186. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id, true,
  2187. &is_acted);
  2188. ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, probe_id);
  2189. ipa_assert();
  2190. return ret;
  2191. }
  2192. static void ipa_mpm_init_mhip_channel_info(void)
  2193. {
  2194. /* IPA_MPM_MHIP_CH_ID_0 => MHIP TETH PIPES */
  2195. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ipa_client =
  2196. IPA_CLIENT_MHI_PRIME_TETH_PROD;
  2197. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ep_cfg =
  2198. mhip_dl_teth_ep_cfg;
  2199. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ipa_client =
  2200. IPA_CLIENT_MHI_PRIME_TETH_CONS;
  2201. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ep_cfg =
  2202. mhip_ul_teth_ep_cfg;
  2203. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].mhip_client =
  2204. IPA_MPM_MHIP_TETH;
  2205. /* IPA_MPM_MHIP_CH_ID_1 => MHIP RMNET PIPES */
  2206. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ipa_client =
  2207. IPA_CLIENT_MHI_PRIME_RMNET_PROD;
  2208. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ep_cfg =
  2209. mhip_dl_rmnet_ep_cfg;
  2210. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ipa_client =
  2211. IPA_CLIENT_MHI_PRIME_RMNET_CONS;
  2212. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ep_cfg =
  2213. mhip_ul_rmnet_ep_cfg;
  2214. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].mhip_client =
  2215. IPA_MPM_MHIP_USB_RMNET;
  2216. /* IPA_MPM_MHIP_CH_ID_2 => MHIP ADPL PIPE */
  2217. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ipa_client =
  2218. IPA_CLIENT_MHI_PRIME_DPL_PROD;
  2219. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ep_cfg =
  2220. mhip_dl_dpl_ep_cfg;
  2221. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].ul_prod.ipa_client =
  2222. IPA_CLIENT_MAX;
  2223. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].mhip_client =
  2224. IPA_MPM_MHIP_USB_DPL;
  2225. }
  2226. static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
  2227. {
  2228. int mhip_idx;
  2229. IPA_MPM_FUNC_ENTRY();
  2230. for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
  2231. if (mhi_dev == ipa_mpm_ctx->md[mhip_idx].mhi_dev)
  2232. break;
  2233. }
  2234. if (mhip_idx >= IPA_MPM_MHIP_CH_ID_MAX) {
  2235. IPA_MPM_DBG("remove_cb for mhip_idx = %d not probed before\n",
  2236. mhip_idx);
  2237. return;
  2238. }
  2239. IPA_MPM_DBG("remove_cb for mhip_idx = %d", mhip_idx);
  2240. mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
  2241. ipa_mpm_ctx->md[mhip_idx].init_complete = false;
  2242. mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
  2243. if (mhip_idx == IPA_MPM_MHIP_CH_ID_0)
  2244. ipa3_uc_send_disable_flow_control();
  2245. ipa_mpm_mhip_shutdown(mhip_idx);
  2246. atomic_dec(&ipa_mpm_ctx->probe_cnt);
  2247. if (atomic_read(&ipa_mpm_ctx->probe_cnt) == 0) {
  2248. /* Last probe done, reset Everything here */
  2249. ipa_mpm_ctx->mhi_parent_dev = NULL;
  2250. ipa_mpm_ctx->carved_smmu_cb.next_addr =
  2251. ipa_mpm_ctx->carved_smmu_cb.va_start;
  2252. atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
  2253. for (mhip_idx = 0;
  2254. mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
  2255. atomic_set(
  2256. &ipa_mpm_ctx->md[mhip_idx].clk_cnt.pcie_clk_cnt,
  2257. 0);
  2258. }
  2259. }
  2260. IPA_MPM_FUNC_EXIT();
  2261. }
  2262. static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
  2263. enum MHI_CB mhi_cb)
  2264. {
  2265. int mhip_idx;
  2266. enum mhip_status_type status;
  2267. IPA_MPM_DBG("%d\n", mhi_cb);
  2268. for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
  2269. if (mhi_dev == ipa_mpm_ctx->md[mhip_idx].mhi_dev)
  2270. break;
  2271. }
  2272. if (mhip_idx >= IPA_MPM_MHIP_CH_ID_MAX) {
  2273. IPA_MPM_DBG("ignoring secondary callbacks\n");
  2274. return;
  2275. }
  2276. mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
  2277. if (!ipa_mpm_ctx->md[mhip_idx].init_complete) {
  2278. /*
  2279. * SSR might be in progress, dont have to vote/unvote for
  2280. * IPA clocks as it will be taken care in remove_cb/subsequent
  2281. * probe.
  2282. */
  2283. IPA_MPM_DBG("SSR in progress, return\n");
  2284. mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
  2285. return;
  2286. }
  2287. mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
  2288. switch (mhi_cb) {
  2289. case MHI_CB_IDLE:
  2290. break;
  2291. case MHI_CB_LPM_ENTER:
  2292. if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
  2293. status = ipa_mpm_start_stop_mhip_chan(
  2294. IPA_MPM_MHIP_CHAN_DL,
  2295. mhip_idx, MPM_MHIP_STOP);
  2296. IPA_MPM_DBG("status = %d\n", status);
  2297. ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
  2298. ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
  2299. } else {
  2300. IPA_MPM_DBG("Already in lpm\n");
  2301. }
  2302. break;
  2303. case MHI_CB_LPM_EXIT:
  2304. if (ipa_mpm_ctx->md[mhip_idx].in_lpm) {
  2305. ipa_mpm_vote_unvote_ipa_clk(CLK_ON, mhip_idx);
  2306. status = ipa_mpm_start_stop_mhip_chan(
  2307. IPA_MPM_MHIP_CHAN_DL,
  2308. mhip_idx, MPM_MHIP_START);
  2309. IPA_MPM_DBG("status = %d\n", status);
  2310. ipa_mpm_ctx->md[mhip_idx].in_lpm = false;
  2311. } else {
  2312. IPA_MPM_DBG("Already out of lpm\n");
  2313. }
  2314. break;
  2315. case MHI_CB_EE_RDDM:
  2316. case MHI_CB_PENDING_DATA:
  2317. case MHI_CB_SYS_ERROR:
  2318. case MHI_CB_FATAL_ERROR:
  2319. case MHI_CB_EE_MISSION_MODE:
  2320. case MHI_CB_DTR_SIGNAL:
  2321. IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
  2322. break;
  2323. }
  2324. }
  2325. static void ipa_mpm_mhip_map_prot(enum ipa_usb_teth_prot prot,
  2326. enum ipa_mpm_mhip_client_type *mhip_client)
  2327. {
  2328. switch (prot) {
  2329. case IPA_USB_RNDIS:
  2330. *mhip_client = IPA_MPM_MHIP_TETH;
  2331. break;
  2332. case IPA_USB_RMNET:
  2333. *mhip_client = IPA_MPM_MHIP_USB_RMNET;
  2334. break;
  2335. case IPA_USB_DIAG:
  2336. *mhip_client = IPA_MPM_MHIP_USB_DPL;
  2337. break;
  2338. default:
  2339. *mhip_client = IPA_MPM_MHIP_NONE;
  2340. break;
  2341. }
  2342. IPA_MPM_DBG("Mapped xdci prot %d -> MHIP prot %d\n", prot,
  2343. *mhip_client);
  2344. }
  2345. int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
  2346. {
  2347. int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
  2348. int i;
  2349. enum ipa_mpm_mhip_client_type mhip_client;
  2350. enum mhip_status_type status;
  2351. int pipe_idx;
  2352. bool is_acted = true;
  2353. int ret = 0;
  2354. if (ipa_mpm_ctx == NULL) {
  2355. IPA_MPM_ERR("MPM not platform probed yet, returning ..\n");
  2356. return 0;
  2357. }
  2358. ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
  2359. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  2360. if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
  2361. probe_id = i;
  2362. break;
  2363. }
  2364. }
  2365. if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
  2366. IPA_MPM_ERR("Unknown probe_id\n");
  2367. return 0;
  2368. }
  2369. IPA_MPM_DBG("Connect xdci prot %d -> mhip_client = %d probe_id = %d\n",
  2370. xdci_teth_prot, mhip_client, probe_id);
  2371. ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
  2372. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id,
  2373. false, &is_acted);
  2374. if (ret) {
  2375. IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
  2376. return ret;
  2377. }
  2378. /*
  2379. * Make sure to start Device side channels before
  2380. * starting Host side UL channels. This is to make
  2381. * sure device side access host side IPA only when
  2382. * Host IPA gets voted.
  2383. */
  2384. ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
  2385. MPM_MHIP_START, false);
  2386. if (ret) {
  2387. /*
  2388. * This can fail only when modem is in SSR state.
  2389. * Eventually there would be a remove callback,
  2390. * so return a failure. Dont have to unvote PCIE here.
  2391. */
  2392. IPA_MPM_ERR("MHIP remote chan start fail = %d\n",
  2393. ret);
  2394. return ret;
  2395. }
  2396. IPA_MPM_DBG("MHIP remote channel start success\n");
  2397. switch (mhip_client) {
  2398. case IPA_MPM_MHIP_USB_RMNET:
  2399. ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
  2400. IPA_CLIENT_MHI_PRIME_RMNET_CONS, false);
  2401. break;
  2402. case IPA_MPM_MHIP_USB_DPL:
  2403. IPA_MPM_DBG("connecting DPL prot %d\n", mhip_client);
  2404. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
  2405. atomic_set(&ipa_mpm_ctx->adpl_over_usb_available, 1);
  2406. return 0;
  2407. default:
  2408. IPA_MPM_DBG("mhip_client = %d not processed\n", mhip_client);
  2409. if (is_acted) {
  2410. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2411. false, &is_acted);
  2412. if (ret) {
  2413. IPA_MPM_ERR("Err unvoting PCIe clk, err = %d\n",
  2414. ret);
  2415. return ret;
  2416. }
  2417. }
  2418. return 0;
  2419. }
  2420. if (mhip_client != IPA_MPM_MHIP_USB_DPL)
  2421. /* Start UL MHIP channel for offloading teth connection */
  2422. status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
  2423. probe_id,
  2424. MPM_MHIP_START);
  2425. switch (status) {
  2426. case MHIP_STATUS_SUCCESS:
  2427. case MHIP_STATUS_NO_OP:
  2428. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
  2429. pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
  2430. /* Lift the delay for rmnet USB prod pipe */
  2431. ipa3_xdci_ep_delay_rm(pipe_idx);
  2432. if (status == MHIP_STATUS_NO_OP && is_acted) {
  2433. /* Channels already have been started,
  2434. * we can devote for pcie clocks
  2435. */
  2436. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2437. false, &is_acted);
  2438. }
  2439. break;
  2440. case MHIP_STATUS_EP_NOT_READY:
  2441. if (is_acted)
  2442. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2443. false, &is_acted);
  2444. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
  2445. break;
  2446. case MHIP_STATUS_FAIL:
  2447. case MHIP_STATUS_BAD_STATE:
  2448. case MHIP_STATUS_EP_NOT_FOUND:
  2449. IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
  2450. if (is_acted)
  2451. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2452. false, &is_acted);
  2453. ret = -EFAULT;
  2454. break;
  2455. default:
  2456. if (is_acted)
  2457. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2458. false, &is_acted);
  2459. IPA_MPM_ERR("Err not found\n");
  2460. break;
  2461. }
  2462. return ret;
  2463. }
  2464. int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
  2465. {
  2466. int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
  2467. int i;
  2468. enum ipa_mpm_mhip_client_type mhip_client;
  2469. enum mhip_status_type status;
  2470. int ret = 0;
  2471. bool is_acted = true;
  2472. if (ipa_mpm_ctx == NULL) {
  2473. IPA_MPM_ERR("MPM not platform probed, returning ..\n");
  2474. return 0;
  2475. }
  2476. ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
  2477. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  2478. if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
  2479. probe_id = i;
  2480. break;
  2481. }
  2482. }
  2483. if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
  2484. IPA_MPM_ERR("Invalid probe_id\n");
  2485. return 0;
  2486. }
  2487. IPA_MPM_ERR("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
  2488. xdci_teth_prot, mhip_client, probe_id);
  2489. /*
  2490. * Make sure to stop Device side channels before
  2491. * stopping Host side UL channels. This is to make
  2492. * sure device side doesn't access host side IPA if
  2493. * Host IPA gets unvoted.
  2494. */
  2495. ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
  2496. MPM_MHIP_STOP, false);
  2497. if (ret) {
  2498. /*
  2499. * This can fail only when modem is in SSR state.
  2500. * Eventually there would be a remove callback,
  2501. * so return a failure.
  2502. */
  2503. IPA_MPM_ERR("MHIP remote chan stop fail = %d\n", ret);
  2504. return ret;
  2505. }
  2506. IPA_MPM_DBG("MHIP remote channels are stopped\n");
  2507. switch (mhip_client) {
  2508. case IPA_MPM_MHIP_USB_RMNET:
  2509. ret = ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
  2510. IPA_CLIENT_APPS_LAN_CONS, true);
  2511. if (ret) {
  2512. IPA_MPM_ERR("failed to reset dma mode\n");
  2513. return ret;
  2514. }
  2515. break;
  2516. case IPA_MPM_MHIP_TETH:
  2517. IPA_MPM_DBG("Rndis Disconnect, wait for wan_state ioctl\n");
  2518. return 0;
  2519. case IPA_MPM_MHIP_USB_DPL:
  2520. IPA_MPM_DBG("Teth Disconnecting for DPL\n");
  2521. /* change teth state only if ODL is disconnected */
  2522. if (!ipa3_is_odl_connected()) {
  2523. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
  2524. ipa_mpm_ctx->md[probe_id].mhip_client =
  2525. IPA_MPM_MHIP_NONE;
  2526. }
  2527. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2528. false, &is_acted);
  2529. if (ret)
  2530. IPA_MPM_ERR("Error clking off PCIe clk err%d\n", ret);
  2531. atomic_set(&ipa_mpm_ctx->adpl_over_usb_available, 0);
  2532. return ret;
  2533. default:
  2534. IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
  2535. return 0;
  2536. }
  2537. status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
  2538. probe_id, MPM_MHIP_STOP);
  2539. switch (status) {
  2540. case MHIP_STATUS_SUCCESS:
  2541. case MHIP_STATUS_NO_OP:
  2542. case MHIP_STATUS_EP_NOT_READY:
  2543. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
  2544. break;
  2545. case MHIP_STATUS_FAIL:
  2546. case MHIP_STATUS_BAD_STATE:
  2547. case MHIP_STATUS_EP_NOT_FOUND:
  2548. IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
  2549. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2550. false, &is_acted);
  2551. return -EFAULT;
  2552. default:
  2553. IPA_MPM_ERR("Err not found\n");
  2554. break;
  2555. }
  2556. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
  2557. false, &is_acted);
  2558. if (ret) {
  2559. IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n", ret);
  2560. return ret;
  2561. }
  2562. ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
  2563. return ret;
  2564. }
  2565. static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
  2566. {
  2567. struct ipa_smmu_in_params smmu_in;
  2568. struct ipa_smmu_out_params smmu_out;
  2569. u32 carved_iova_ap_mapping[2];
  2570. struct ipa_smmu_cb_ctx *cb;
  2571. struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
  2572. int ret = 0;
  2573. if (ipa_mpm_ctx->carved_smmu_cb.valid) {
  2574. IPA_MPM_DBG("SMMU Context allocated, returning ..\n");
  2575. return ret;
  2576. }
  2577. cb = &ipa_mpm_ctx->carved_smmu_cb;
  2578. /* get IPA SMMU enabled status */
  2579. smmu_in.smmu_client = IPA_SMMU_AP_CLIENT;
  2580. if (ipa_get_smmu_params(&smmu_in, &smmu_out))
  2581. ipa_mpm_ctx->dev_info.ipa_smmu_enabled = false;
  2582. else
  2583. ipa_mpm_ctx->dev_info.ipa_smmu_enabled =
  2584. smmu_out.smmu_enable;
  2585. /* get cache_coherent enable or not */
  2586. ipa_mpm_ctx->dev_info.is_cache_coherent = ap_cb->is_cache_coherent;
  2587. if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iova-mapping",
  2588. carved_iova_ap_mapping, 2)) {
  2589. IPA_MPM_ERR("failed to read of_node %s\n",
  2590. "qcom,mpm-iova-mapping");
  2591. return -EINVAL;
  2592. }
  2593. ipa_mpm_ctx->dev_info.pcie_smmu_enabled = true;
  2594. if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled !=
  2595. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) {
  2596. IPA_MPM_DBG("PCIE/IPA SMMU config mismatch\n");
  2597. return -EINVAL;
  2598. }
  2599. cb->va_start = carved_iova_ap_mapping[0];
  2600. cb->va_size = carved_iova_ap_mapping[1];
  2601. cb->va_end = cb->va_start + cb->va_size;
  2602. if (cb->va_end >= ap_cb->va_start) {
  2603. IPA_MPM_ERR("MPM iommu and AP overlap addr 0x%lx\n",
  2604. cb->va_start);
  2605. ipa_assert();
  2606. return -EFAULT;
  2607. }
  2608. cb->dev = ipa_mpm_ctx->dev_info.dev;
  2609. cb->valid = true;
  2610. cb->next_addr = cb->va_start;
  2611. if (dma_set_mask_and_coherent(ipa_mpm_ctx->dev_info.dev,
  2612. DMA_BIT_MASK(64))) {
  2613. IPA_MPM_ERR("setting DMA mask to 64 failed.\n");
  2614. return -EINVAL;
  2615. }
  2616. return ret;
  2617. }
  2618. static int ipa_mpm_probe(struct platform_device *pdev)
  2619. {
  2620. int ret = 0;
  2621. int i = 0;
  2622. int idx = 0;
  2623. IPA_MPM_FUNC_ENTRY();
  2624. if (ipa_mpm_ctx) {
  2625. IPA_MPM_DBG("MPM is already probed, returning\n");
  2626. return 0;
  2627. }
  2628. ret = ipa_register_ipa_ready_cb(ipa_mpm_ipa3_ready_cb, (void *)pdev);
  2629. /*
  2630. * If we received -EEXIST, IPA has initialized. So we need
  2631. * to continue the probing process.
  2632. */
  2633. if (!ret) {
  2634. IPA_MPM_DBG("IPA not ready yet, registering callback\n");
  2635. return ret;
  2636. }
  2637. IPA_MPM_DBG("IPA is ready, continue with probe\n");
  2638. ipa_mpm_ctx = kzalloc(sizeof(*ipa_mpm_ctx), GFP_KERNEL);
  2639. if (!ipa_mpm_ctx)
  2640. return -ENOMEM;
  2641. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  2642. mutex_init(&ipa_mpm_ctx->md[i].mutex);
  2643. mutex_init(&ipa_mpm_ctx->md[i].mhi_mutex);
  2644. }
  2645. ipa_mpm_ctx->dev_info.pdev = pdev;
  2646. ipa_mpm_ctx->dev_info.dev = &pdev->dev;
  2647. ipa_mpm_init_mhip_channel_info();
  2648. if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base",
  2649. &ipa_mpm_ctx->dev_info.chdb_base)) {
  2650. IPA_MPM_ERR("failed to read qcom,mhi-chdb-base\n");
  2651. goto fail_probe;
  2652. }
  2653. IPA_MPM_DBG("chdb-base=0x%x\n", ipa_mpm_ctx->dev_info.chdb_base);
  2654. if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base",
  2655. &ipa_mpm_ctx->dev_info.erdb_base)) {
  2656. IPA_MPM_ERR("failed to read qcom,mhi-erdb-base\n");
  2657. goto fail_probe;
  2658. }
  2659. IPA_MPM_DBG("erdb-base=0x%x\n", ipa_mpm_ctx->dev_info.erdb_base);
  2660. ret = ipa_mpm_populate_smmu_info(pdev);
  2661. if (ret) {
  2662. IPA_MPM_DBG("SMMU Config failed\n");
  2663. goto fail_probe;
  2664. }
  2665. atomic_set(&ipa_mpm_ctx->ipa_clk_total_cnt, 0);
  2666. atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
  2667. atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, 0);
  2668. for (idx = 0; idx < IPA_MPM_MHIP_CH_ID_MAX; idx++) {
  2669. ipa_mpm_ctx->md[idx].ul_prod.gsi_state = GSI_INIT;
  2670. ipa_mpm_ctx->md[idx].dl_cons.gsi_state = GSI_INIT;
  2671. atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.ipa_clk_cnt, 0);
  2672. atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.pcie_clk_cnt, 0);
  2673. }
  2674. ret = mhi_driver_register(&mhi_driver);
  2675. if (ret) {
  2676. IPA_MPM_ERR("mhi_driver_register failed %d\n", ret);
  2677. goto fail_probe;
  2678. }
  2679. IPA_MPM_FUNC_EXIT();
  2680. return 0;
  2681. fail_probe:
  2682. kfree(ipa_mpm_ctx);
  2683. ipa_mpm_ctx = NULL;
  2684. return -EFAULT;
  2685. }
  2686. static int ipa_mpm_remove(struct platform_device *pdev)
  2687. {
  2688. IPA_MPM_FUNC_ENTRY();
  2689. mhi_driver_unregister(&mhi_driver);
  2690. IPA_MPM_FUNC_EXIT();
  2691. return 0;
  2692. }
  2693. static const struct of_device_id ipa_mpm_dt_match[] = {
  2694. { .compatible = "qcom,ipa-mpm" },
  2695. {},
  2696. };
  2697. MODULE_DEVICE_TABLE(of, ipa_mpm_dt_match);
  2698. static struct platform_driver ipa_ipa_mpm_driver = {
  2699. .driver = {
  2700. .name = "ipa_mpm",
  2701. .of_match_table = ipa_mpm_dt_match,
  2702. },
  2703. .probe = ipa_mpm_probe,
  2704. .remove = ipa_mpm_remove,
  2705. };
  2706. /**
  2707. * ipa_mpm_init() - Registers ipa_mpm as a platform device for a APQ
  2708. *
  2709. * This function is called after bootup for APQ device.
  2710. * ipa_mpm will register itself as a platform device, and probe
  2711. * function will get called.
  2712. *
  2713. * Return: None
  2714. */
  2715. static int __init ipa_mpm_init(void)
  2716. {
  2717. IPA_MPM_DBG("register ipa_mpm platform device\n");
  2718. return platform_driver_register(&ipa_ipa_mpm_driver);
  2719. }
  2720. static void __exit ipa_mpm_exit(void)
  2721. {
  2722. IPA_MPM_DBG("unregister ipa_mpm platform device\n");
  2723. platform_driver_unregister(&ipa_ipa_mpm_driver);
  2724. }
  2725. /**
  2726. * ipa3_is_mhip_offload_enabled() - check if IPA MPM module was initialized
  2727. * successfully. If it is initialized, MHIP is enabled for teth
  2728. *
  2729. * Return value: 1 for yes; 0 for no
  2730. */
  2731. int ipa3_is_mhip_offload_enabled(void)
  2732. {
  2733. if (ipa_mpm_ctx == NULL)
  2734. return 0;
  2735. else
  2736. return 1;
  2737. }
  2738. int ipa_mpm_panic_handler(char *buf, int size)
  2739. {
  2740. int i;
  2741. int cnt = 0;
  2742. cnt = scnprintf(buf, size,
  2743. "\n---- MHIP Active Clients Table ----\n");
  2744. cnt += scnprintf(buf + cnt, size - cnt,
  2745. "Total PCIe active clients count: %d\n",
  2746. atomic_read(&ipa_mpm_ctx->pcie_clk_total_cnt));
  2747. cnt += scnprintf(buf + cnt, size - cnt,
  2748. "Total IPA active clients count: %d\n",
  2749. atomic_read(&ipa_mpm_ctx->ipa_clk_total_cnt));
  2750. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  2751. cnt += scnprintf(buf + cnt, size - cnt,
  2752. "client id: %d ipa vote cnt: %d pcie vote cnt\n", i,
  2753. atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.ipa_clk_cnt),
  2754. atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.pcie_clk_cnt));
  2755. }
  2756. return cnt;
  2757. }
  2758. /**
  2759. * ipa3_get_mhip_gsi_stats() - Query MHIP gsi stats from uc
  2760. * @stats: [inout] stats blob from client populated by driver
  2761. *
  2762. * Returns: 0 on success, negative on failure
  2763. *
  2764. * @note Cannot be called from atomic context
  2765. *
  2766. */
  2767. int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
  2768. {
  2769. int i;
  2770. if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
  2771. IPAERR("bad parms NULL mhip_gsi_stats_mmio\n");
  2772. return -EINVAL;
  2773. }
  2774. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  2775. for (i = 0; i < MAX_MHIP_CHANNELS; i++) {
  2776. stats->ring[i].ringFull = ioread32(
  2777. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2778. + i * IPA3_UC_DEBUG_STATS_OFF +
  2779. IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
  2780. stats->ring[i].ringEmpty = ioread32(
  2781. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2782. + i * IPA3_UC_DEBUG_STATS_OFF +
  2783. IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
  2784. stats->ring[i].ringUsageHigh = ioread32(
  2785. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2786. + i * IPA3_UC_DEBUG_STATS_OFF +
  2787. IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
  2788. stats->ring[i].ringUsageLow = ioread32(
  2789. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2790. + i * IPA3_UC_DEBUG_STATS_OFF +
  2791. IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
  2792. stats->ring[i].RingUtilCount = ioread32(
  2793. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2794. + i * IPA3_UC_DEBUG_STATS_OFF +
  2795. IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
  2796. }
  2797. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2798. return 0;
  2799. }
  2800. /**
  2801. * ipa3_mpm_enable_adpl_over_odl() - Enable or disable ADPL over ODL
  2802. * @enable: true for enable, false for disable
  2803. *
  2804. * Returns: 0 on success, negative on failure
  2805. *
  2806. */
  2807. int ipa3_mpm_enable_adpl_over_odl(bool enable)
  2808. {
  2809. int ret;
  2810. bool is_acted = true;
  2811. IPA_MPM_FUNC_ENTRY();
  2812. if (!ipa3_is_mhip_offload_enabled()) {
  2813. IPA_MPM_ERR("mpm ctx is NULL\n");
  2814. return -EPERM;
  2815. }
  2816. if (enable) {
  2817. /* inc clk count and set DMA to ODL */
  2818. IPA_MPM_DBG("mpm enabling ADPL over ODL\n");
  2819. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON,
  2820. IPA_MPM_MHIP_CH_ID_2, false, &is_acted);
  2821. if (ret) {
  2822. IPA_MPM_ERR("Err %d cloking on PCIe clk\n", ret);
  2823. return ret;
  2824. }
  2825. ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
  2826. IPA_CLIENT_ODL_DPL_CONS, false);
  2827. if (ret) {
  2828. IPA_MPM_ERR("MPM failed to set dma mode to ODL\n");
  2829. if (is_acted)
  2830. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
  2831. IPA_MPM_MHIP_CH_ID_2,
  2832. false,
  2833. &is_acted);
  2834. return ret;
  2835. }
  2836. ipa_mpm_change_teth_state(IPA_MPM_MHIP_CH_ID_2,
  2837. IPA_MPM_TETH_CONNECTED);
  2838. } else {
  2839. /* dec clk count and set DMA to USB */
  2840. IPA_MPM_DBG("mpm disabling ADPL over ODL\n");
  2841. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
  2842. IPA_MPM_MHIP_CH_ID_2,
  2843. false,
  2844. &is_acted);
  2845. if (ret) {
  2846. IPA_MPM_ERR("Err %d cloking off PCIe clk\n",
  2847. ret);
  2848. return ret;
  2849. }
  2850. ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
  2851. IPA_CLIENT_USB_DPL_CONS, false);
  2852. if (ret) {
  2853. IPA_MPM_ERR("MPM failed to set dma mode to USB\n");
  2854. if (ipa_mpm_vote_unvote_pcie_clk(CLK_ON,
  2855. IPA_MPM_MHIP_CH_ID_2,
  2856. false,
  2857. &is_acted))
  2858. IPA_MPM_ERR("Err clocking on pcie\n");
  2859. return ret;
  2860. }
  2861. /* If USB is not available then reset teth state */
  2862. if (atomic_read(&ipa_mpm_ctx->adpl_over_usb_available)) {
  2863. IPA_MPM_DBG("mpm enabling ADPL over USB\n");
  2864. } else {
  2865. ipa_mpm_change_teth_state(IPA_MPM_MHIP_CH_ID_2,
  2866. IPA_MPM_TETH_INIT);
  2867. IPA_MPM_DBG("USB disconnected. ADPL on standby\n");
  2868. }
  2869. }
  2870. IPA_MPM_FUNC_EXIT();
  2871. return ret;
  2872. }
  2873. late_initcall(ipa_mpm_init);
  2874. module_exit(ipa_mpm_exit);
  2875. MODULE_LICENSE("GPL v2");
  2876. MODULE_DESCRIPTION("MHI Proxy Manager Driver");