ipa_mpm.c 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/list.h>
  7. #include <linux/slab.h>
  8. #include <linux/device.h>
  9. #include <linux/module.h>
  10. #include <linux/mhi.h>
  11. #include <linux/msm_gsi.h>
  12. #include <linux/delay.h>
  13. #include <linux/log2.h>
  14. #include "../ipa_common_i.h"
  15. #include "ipa_i.h"
  16. #define IPA_MPM_DRV_NAME "ipa_mpm"
  17. #define IPA_MPM_DBG(fmt, args...) \
  18. do { \
  19. pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
  20. __func__, __LINE__, ## args); \
  21. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  22. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  23. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  24. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  25. } while (0)
  26. #define IPA_MPM_DBG_LOW(fmt, args...) \
  27. do { \
  28. pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
  29. __func__, __LINE__, ## args); \
  30. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  31. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  32. } while (0)
  33. #define IPA_MPM_ERR(fmt, args...) \
  34. do { \
  35. pr_err(IPA_MPM_DRV_NAME " %s:%d " fmt, \
  36. __func__, __LINE__, ## args); \
  37. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  38. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  39. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  40. IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
  41. } while (0)
  42. #define IPA_MPM_FUNC_ENTRY() \
  43. IPA_MPM_DBG("ENTRY\n")
  44. #define IPA_MPM_FUNC_EXIT() \
  45. IPA_MPM_DBG("EXIT\n")
  46. #define IPA_MPM_MAX_MHIP_CHAN 3
  47. #define IPA_MPM_NUM_RING_DESC 0x400
  48. #define IPA_MPM_RING_LEN (IPA_MPM_NUM_RING_DESC - 10)
  49. #define IPA_MPM_MHI_HOST_UL_CHANNEL 4
  50. #define IPA_MPM_MHI_HOST_DL_CHANNEL 5
  51. #define TETH_AGGR_TIME_LIMIT 10000 /* 10ms */
  52. #define TETH_AGGR_BYTE_LIMIT 24
  53. #define TETH_AGGR_DL_BYTE_LIMIT 16
  54. #define TRE_BUFF_SIZE 32768
  55. #define IPA_HOLB_TMR_EN 0x1
  56. #define IPA_HOLB_TMR_DIS 0x0
  57. #define RNDIS_IPA_DFLT_RT_HDL 0
  58. #define IPA_POLL_FOR_EMPTINESS_NUM 50
  59. #define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
  60. #define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
  61. #define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
  62. enum mhip_re_type {
  63. MHIP_RE_XFER = 0x2,
  64. MHIP_RE_NOP = 0x4,
  65. };
  66. enum ipa_mpm_mhi_ch_id_type {
  67. IPA_MPM_MHIP_CH_ID_0,
  68. IPA_MPM_MHIP_CH_ID_1,
  69. IPA_MPM_MHIP_CH_ID_2,
  70. IPA_MPM_MHIP_CH_ID_MAX,
  71. };
  72. enum ipa_mpm_dma_data_direction {
  73. DMA_HIPA_BIDIRECTIONAL = 0,
  74. DMA_TO_HIPA = 1,
  75. DMA_FROM_HIPA = 2,
  76. DMA_HIPA_NONE = 3,
  77. };
  78. enum ipa_mpm_ipa_teth_client_type {
  79. IPA_MPM_MHIP_USB,
  80. IPA_MPM_MHIP_WIFI,
  81. };
  82. enum ipa_mpm_mhip_client_type {
  83. IPA_MPM_MHIP_INIT,
  84. /* USB RMNET CLIENT */
  85. IPA_MPM_MHIP_USB_RMNET,
  86. /* USB RNDIS / WIFI CLIENT */
  87. IPA_MPM_MHIP_TETH,
  88. /* USB DPL CLIENT */
  89. IPA_MPM_MHIP_USB_DPL,
  90. IPA_MPM_MHIP_NONE,
  91. };
  92. enum ipa_mpm_clk_vote_type {
  93. CLK_ON,
  94. CLK_OFF,
  95. };
  96. enum mhip_status_type {
  97. MHIP_STATUS_SUCCESS,
  98. MHIP_STATUS_NO_OP,
  99. MHIP_STATUS_FAIL,
  100. MHIP_STATUS_BAD_STATE,
  101. MHIP_STATUS_EP_NOT_FOUND,
  102. MHIP_STATUS_EP_NOT_READY,
  103. };
  104. enum mhip_smmu_domain_type {
  105. MHIP_SMMU_DOMAIN_IPA,
  106. MHIP_SMMU_DOMAIN_PCIE,
  107. MHIP_SMMU_DOMAIN_NONE,
  108. };
  109. enum ipa_mpm_start_stop_type {
  110. MPM_MHIP_STOP,
  111. MPM_MHIP_START,
  112. };
  113. /* each pair of UL/DL channels are defined below */
  114. static const struct mhi_device_id mhi_driver_match_table[] = {
  115. { .chan = "IP_HW_MHIP_0" }, /* for rndis/Wifi teth pipes */
  116. { .chan = "IP_HW_MHIP_1" }, /* for MHIP rmnet */
  117. { .chan = "IP_HW_ADPL" }, /* ADPL/ODL DL pipe */
  118. };
  119. static const char *ipa_mpm_mhip_chan_str[IPA_MPM_MHIP_CH_ID_MAX] = {
  120. __stringify(IPA_MPM_MHIP_TETH),
  121. __stringify(IPA_MPM_MHIP_USB_RMNET),
  122. __stringify(IPA_MPM_MHIP_USB_DPL),
  123. };
  124. /*
  125. * MHI PRIME GSI Descriptor format that Host IPA uses.
  126. */
  127. struct __packed mhi_p_desc {
  128. uint64_t buffer_ptr;
  129. uint16_t buff_len;
  130. uint16_t resvd1;
  131. uint16_t chain : 1;
  132. uint16_t resvd4 : 7;
  133. uint16_t ieob : 1;
  134. uint16_t ieot : 1;
  135. uint16_t bei : 1;
  136. uint16_t sct : 1;
  137. uint16_t resvd3 : 4;
  138. uint8_t re_type;
  139. uint8_t resvd2;
  140. };
  141. /*
  142. * MHI PRIME Channel Context and Event Context Array
  143. * Information that is sent to Device IPA.
  144. */
  145. struct ipa_mpm_channel_context_type {
  146. u32 chstate : 8;
  147. u32 reserved1 : 24;
  148. u32 chtype;
  149. u32 erindex;
  150. u64 rbase;
  151. u64 rlen;
  152. u64 reserved2;
  153. u64 reserved3;
  154. } __packed;
  155. struct ipa_mpm_event_context_type {
  156. u32 reserved1 : 8;
  157. u32 update_rp_modc : 8;
  158. u32 update_rp_intmodt : 16;
  159. u32 ertype;
  160. u32 update_rp_addr;
  161. u64 rbase;
  162. u64 rlen;
  163. u32 buff_size : 16;
  164. u32 reserved2 : 16;
  165. u32 reserved3;
  166. u64 reserved4;
  167. } __packed;
  168. struct ipa_mpm_pipes_info_type {
  169. enum ipa_client_type ipa_client;
  170. struct ipa_ep_cfg ep_cfg;
  171. };
  172. struct ipa_mpm_channel_type {
  173. struct ipa_mpm_pipes_info_type dl_cons;
  174. struct ipa_mpm_pipes_info_type ul_prod;
  175. enum ipa_mpm_mhip_client_type mhip_client;
  176. };
  177. static struct ipa_mpm_channel_type ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_MAX];
  178. /* For configuring IPA_CLIENT_MHI_PRIME_TETH_CONS */
  179. static struct ipa_ep_cfg mhip_dl_teth_ep_cfg = {
  180. .mode = {
  181. .mode = IPA_BASIC,
  182. .dst = IPA_CLIENT_MHI_PRIME_TETH_CONS,
  183. },
  184. .hdr = {
  185. .hdr_len = 4,
  186. .hdr_ofst_metadata_valid = 1,
  187. .hdr_ofst_metadata = 1,
  188. .hdr_ofst_pkt_size_valid = 1,
  189. .hdr_ofst_pkt_size = 2,
  190. },
  191. .hdr_ext = {
  192. .hdr_total_len_or_pad_valid = true,
  193. .hdr_payload_len_inc_padding = true,
  194. },
  195. .aggr = {
  196. .aggr_en = IPA_ENABLE_DEAGGR,
  197. .aggr = IPA_QCMAP,
  198. .aggr_byte_limit = TETH_AGGR_DL_BYTE_LIMIT,
  199. .aggr_time_limit = TETH_AGGR_TIME_LIMIT,
  200. },
  201. };
  202. static struct ipa_ep_cfg mhip_ul_teth_ep_cfg = {
  203. .mode = {
  204. .mode = IPA_BASIC,
  205. .dst = IPA_CLIENT_MHI_PRIME_TETH_PROD,
  206. },
  207. .hdr = {
  208. .hdr_len = 4,
  209. .hdr_ofst_metadata_valid = 1,
  210. .hdr_ofst_metadata = 0,
  211. .hdr_ofst_pkt_size_valid = 1,
  212. .hdr_ofst_pkt_size = 2,
  213. },
  214. .hdr_ext = {
  215. .hdr_total_len_or_pad_valid = true,
  216. .hdr_payload_len_inc_padding = true,
  217. },
  218. .aggr = {
  219. .aggr_en = IPA_ENABLE_AGGR,
  220. .aggr = IPA_QCMAP,
  221. .aggr_byte_limit = TETH_AGGR_BYTE_LIMIT,
  222. .aggr_time_limit = TETH_AGGR_TIME_LIMIT,
  223. },
  224. };
  225. /* WARNING!! Temporary for rndis intgration only */
  226. /* For configuring IPA_CLIENT_MHIP_RMNET_PROD */
  227. static struct ipa_ep_cfg mhip_dl_rmnet_ep_cfg = {
  228. .mode = {
  229. .mode = IPA_DMA,
  230. .dst = IPA_CLIENT_USB_CONS,
  231. },
  232. };
  233. /* For configuring IPA_CLIENT_MHIP_RMNET_CONS */
  234. static struct ipa_ep_cfg mhip_ul_rmnet_ep_cfg = {
  235. .mode = {
  236. .mode = IPA_DMA,
  237. .dst = IPA_CLIENT_USB_CONS,
  238. },
  239. };
  240. /* For configuring IPA_CLIENT_MHIP_DPL_PROD */
  241. static struct ipa_ep_cfg mhip_dl_dpl_ep_cfg = {
  242. .mode = {
  243. .mode = IPA_DMA,
  244. .dst = IPA_CLIENT_USB_DPL_CONS,
  245. },
  246. };
  247. struct ipa_mpm_iova_addr {
  248. dma_addr_t base;
  249. unsigned int size;
  250. };
  251. struct ipa_mpm_dev_info {
  252. struct platform_device *pdev;
  253. struct device *dev;
  254. bool ipa_smmu_enabled;
  255. bool pcie_smmu_enabled;
  256. struct ipa_mpm_iova_addr ctrl;
  257. struct ipa_mpm_iova_addr data;
  258. u32 chdb_base;
  259. u32 erdb_base;
  260. bool is_cache_coherent;
  261. };
  262. struct ipa_mpm_event_props {
  263. u16 id;
  264. phys_addr_t device_db;
  265. struct ipa_mpm_event_context_type ev_ctx;
  266. };
  267. struct ipa_mpm_channel_props {
  268. u16 id;
  269. phys_addr_t device_db;
  270. struct ipa_mpm_channel_context_type ch_ctx;
  271. };
  272. enum ipa_mpm_gsi_state {
  273. GSI_ERR,
  274. GSI_INIT,
  275. GSI_ALLOCATED,
  276. GSI_STARTED,
  277. GSI_STOPPED,
  278. };
  279. struct ipa_mpm_channel {
  280. struct ipa_mpm_channel_props chan_props;
  281. struct ipa_mpm_event_props evt_props;
  282. enum ipa_mpm_gsi_state gsi_state;
  283. dma_addr_t db_host_iova;
  284. dma_addr_t db_device_iova;
  285. };
  286. enum ipa_mpm_teth_state {
  287. IPA_MPM_TETH_INIT = 0,
  288. IPA_MPM_TETH_INPROGRESS,
  289. IPA_MPM_TETH_CONNECTED,
  290. };
  291. enum ipa_mpm_mhip_chan {
  292. IPA_MPM_MHIP_CHAN_UL,
  293. IPA_MPM_MHIP_CHAN_DL,
  294. IPA_MPM_MHIP_CHAN_BOTH,
  295. };
  296. struct ipa_mpm_clk_cnt_type {
  297. atomic_t pcie_clk_cnt;
  298. atomic_t ipa_clk_cnt;
  299. };
  300. struct producer_rings {
  301. struct mhi_p_desc *tr_va;
  302. struct mhi_p_desc *er_va;
  303. void *tre_buff_va[IPA_MPM_RING_LEN];
  304. dma_addr_t tr_pa;
  305. dma_addr_t er_pa;
  306. dma_addr_t tre_buff_iova[IPA_MPM_RING_LEN];
  307. /*
  308. * The iova generated for AP CB,
  309. * used only for dma_map_single to flush the cache.
  310. */
  311. dma_addr_t ap_iova_er;
  312. dma_addr_t ap_iova_tr;
  313. dma_addr_t ap_iova_buff[IPA_MPM_RING_LEN];
  314. };
  315. struct ipa_mpm_mhi_driver {
  316. struct mhi_device *mhi_dev;
  317. struct producer_rings ul_prod_ring;
  318. struct producer_rings dl_prod_ring;
  319. struct ipa_mpm_channel ul_prod;
  320. struct ipa_mpm_channel dl_cons;
  321. enum ipa_mpm_mhip_client_type mhip_client;
  322. enum ipa_mpm_teth_state teth_state;
  323. struct mutex mutex;
  324. bool init_complete;
  325. struct mutex lpm_mutex;
  326. bool in_lpm;
  327. struct ipa_mpm_clk_cnt_type clk_cnt;
  328. };
  329. struct ipa_mpm_context {
  330. struct ipa_mpm_dev_info dev_info;
  331. struct ipa_mpm_mhi_driver md[IPA_MPM_MAX_MHIP_CHAN];
  332. struct mutex mutex;
  333. atomic_t probe_cnt;
  334. atomic_t pcie_clk_total_cnt;
  335. atomic_t ipa_clk_total_cnt;
  336. struct device *parent_pdev;
  337. struct ipa_smmu_cb_ctx carved_smmu_cb;
  338. struct device *mhi_parent_dev;
  339. };
  340. #define IPA_MPM_DESC_SIZE (sizeof(struct mhi_p_desc))
  341. #define IPA_MPM_RING_TOTAL_SIZE (IPA_MPM_RING_LEN * IPA_MPM_DESC_SIZE)
  342. /* WA: Make the IPA_MPM_PAGE_SIZE from 16k (next power of ring size) to
  343. * 32k. This is to make sure IOMMU map happens for the same size
  344. * for all TR/ER and doorbells.
  345. */
  346. #define IPA_MPM_PAGE_SIZE TRE_BUFF_SIZE
  347. static struct ipa_mpm_context *ipa_mpm_ctx;
  348. static struct platform_device *m_pdev;
  349. static int ipa_mpm_mhi_probe_cb(struct mhi_device *,
  350. const struct mhi_device_id *);
  351. static void ipa_mpm_mhi_remove_cb(struct mhi_device *);
  352. static void ipa_mpm_mhi_status_cb(struct mhi_device *, enum MHI_CB);
  353. static void ipa_mpm_change_teth_state(int probe_id,
  354. enum ipa_mpm_teth_state ip_state);
  355. static void ipa_mpm_change_gsi_state(int probe_id,
  356. enum ipa_mpm_mhip_chan mhip_chan,
  357. enum ipa_mpm_gsi_state next_state);
  358. static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
  359. enum ipa_mpm_start_stop_type start);
  360. static int ipa_mpm_probe(struct platform_device *pdev);
  361. static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
  362. int probe_id);
  363. static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
  364. int probe_id);
  365. static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
  366. enum ipa_mpm_mhip_chan mhip_chan,
  367. int probe_id,
  368. enum ipa_mpm_start_stop_type start_stop);
  369. static struct mhi_driver mhi_driver = {
  370. .id_table = mhi_driver_match_table,
  371. .probe = ipa_mpm_mhi_probe_cb,
  372. .remove = ipa_mpm_mhi_remove_cb,
  373. .status_cb = ipa_mpm_mhi_status_cb,
  374. .driver = {
  375. .name = IPA_MPM_DRV_NAME,
  376. .owner = THIS_MODULE,
  377. },
  378. };
  379. static void ipa_mpm_ipa3_delayed_probe(struct work_struct *work)
  380. {
  381. (void)ipa_mpm_probe(m_pdev);
  382. }
  383. static DECLARE_WORK(ipa_mpm_ipa3_scheduled_probe, ipa_mpm_ipa3_delayed_probe);
  384. static void ipa_mpm_ipa3_ready_cb(void *user_data)
  385. {
  386. struct platform_device *pdev = (struct platform_device *)(user_data);
  387. m_pdev = pdev;
  388. IPA_MPM_DBG("IPA ready callback has been triggered\n");
  389. schedule_work(&ipa_mpm_ipa3_scheduled_probe);
  390. }
  391. static void ipa_mpm_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *err_data)
  392. {
  393. IPA_MPM_ERR("GSI EVT RING ERROR, not expected..\n");
  394. ipa_assert();
  395. }
  396. static void ipa_mpm_gsi_chan_err_cb(struct gsi_chan_err_notify *err_data)
  397. {
  398. IPA_MPM_ERR("GSI CHAN ERROR, not expected..\n");
  399. ipa_assert();
  400. }
  401. /**
  402. * ipa_mpm_smmu_map() - SMMU maps ring and the buffer pointer.
  403. * @va_addr: virtual address that needs to be mapped
  404. * @sz: size of the address to be mapped
  405. * @dir: ipa_mpm_dma_data_direction
  406. * @ap_cb_iova: iova for AP context bank
  407. *
  408. * This function SMMU maps both ring and the buffer pointer.
  409. * The ring pointers will be aligned to ring size and
  410. * the buffer pointers should be aligned to buffer size.
  411. *
  412. * Returns: iova of the mapped address
  413. */
  414. static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
  415. int sz,
  416. int dir,
  417. dma_addr_t *ap_cb_iova)
  418. {
  419. struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
  420. phys_addr_t phys_addr;
  421. dma_addr_t iova;
  422. int smmu_enabled;
  423. unsigned long iova_p;
  424. phys_addr_t pa_p;
  425. u32 size_p;
  426. int prot = IOMMU_READ | IOMMU_WRITE;
  427. struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
  428. unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
  429. int ret = 0;
  430. /* check cache coherent */
  431. if (ipa_mpm_ctx->dev_info.is_cache_coherent) {
  432. IPA_MPM_DBG(" enable cache coherent\n");
  433. prot |= IOMMU_CACHE;
  434. }
  435. if (carved_iova >= cb->va_end) {
  436. IPA_MPM_ERR("running out of carved_iova %lx\n", carved_iova);
  437. ipa_assert();
  438. }
  439. /*
  440. * Both Host IPA and PCIE SMMU should be enabled or disabled
  441. * for proceed.
  442. * If SMMU Enabled => iova == pa
  443. * If SMMU Disabled => iova == iommu mapped iova
  444. * dma_map_single ensures cache is flushed and the memory is not
  445. * touched again until dma_unmap_single() is called
  446. */
  447. smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
  448. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
  449. if (smmu_enabled) {
  450. /* Map the phys addr to both PCIE and IPA AP CB
  451. * from the carved out common iova range.
  452. */
  453. ipa_smmu_domain = ipa3_get_smmu_domain();
  454. if (!ipa_smmu_domain) {
  455. IPA_MPM_ERR("invalid IPA smmu domain\n");
  456. ipa_assert();
  457. }
  458. if (!ipa_mpm_ctx->mhi_parent_dev) {
  459. IPA_MPM_ERR("invalid PCIE SMMU domain\n");
  460. ipa_assert();
  461. }
  462. phys_addr = virt_to_phys((void *) va_addr);
  463. IPA_SMMU_ROUND_TO_PAGE(carved_iova, phys_addr, sz,
  464. iova_p, pa_p, size_p);
  465. /* Flush the cache with dma_map_single for IPA AP CB */
  466. *ap_cb_iova = dma_map_single(ipa3_ctx->pdev, va_addr,
  467. IPA_MPM_RING_TOTAL_SIZE, dir);
  468. ret = ipa3_iommu_map(ipa_smmu_domain, iova_p,
  469. pa_p, size_p, prot);
  470. if (ret) {
  471. IPA_MPM_ERR("IPA IOMMU returned failure, ret = %d\n",
  472. ret);
  473. ipa_assert();
  474. }
  475. pcie_smmu_domain = iommu_get_domain_for_dev(
  476. ipa_mpm_ctx->mhi_parent_dev);
  477. if (!pcie_smmu_domain) {
  478. IPA_MPM_ERR("invalid pcie smmu domain\n");
  479. ipa_assert();
  480. }
  481. ret = iommu_map(pcie_smmu_domain, iova_p, pa_p, size_p, prot);
  482. if (ret) {
  483. IPA_MPM_ERR("PCIe IOMMU returned failure, ret = %d\n",
  484. ret);
  485. ipa_assert();
  486. }
  487. iova = iova_p;
  488. cb->next_addr = iova_p + size_p;
  489. } else {
  490. iova = dma_map_single(ipa3_ctx->pdev, va_addr,
  491. IPA_MPM_RING_TOTAL_SIZE, dir);
  492. *ap_cb_iova = iova;
  493. }
  494. return iova;
  495. }
  496. /**
  497. * ipa_mpm_smmu_unmap() - SMMU unmaps ring and the buffer pointer.
  498. * @va_addr: virtual address that needs to be mapped
  499. * @sz: size of the address to be mapped
  500. * @dir: ipa_mpm_dma_data_direction
  501. * @ap_cb_iova: iova for AP context bank
  502. *
  503. * This function SMMU unmaps both ring and the buffer pointer.
  504. * The ring pointers will be aligned to ring size and
  505. * the buffer pointers should be aligned to buffer size.
  506. *
  507. * Return: none
  508. */
  509. static void ipa_mpm_smmu_unmap(dma_addr_t carved_iova, int sz, int dir,
  510. dma_addr_t ap_cb_iova)
  511. {
  512. unsigned long iova_p;
  513. unsigned long pa_p;
  514. u32 size_p = 0;
  515. struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
  516. struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
  517. int smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
  518. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
  519. if (carved_iova <= 0) {
  520. IPA_MPM_ERR("carved_iova is zero/negative\n");
  521. WARN_ON(1);
  522. return;
  523. }
  524. if (smmu_enabled) {
  525. ipa_smmu_domain = ipa3_get_smmu_domain();
  526. if (!ipa_smmu_domain) {
  527. IPA_MPM_ERR("invalid IPA smmu domain\n");
  528. ipa_assert();
  529. }
  530. if (!ipa_mpm_ctx->mhi_parent_dev) {
  531. IPA_MPM_ERR("invalid PCIE SMMU domain\n");
  532. ipa_assert();
  533. }
  534. IPA_SMMU_ROUND_TO_PAGE(carved_iova, carved_iova, sz,
  535. iova_p, pa_p, size_p);
  536. pcie_smmu_domain = iommu_get_domain_for_dev(
  537. ipa_mpm_ctx->mhi_parent_dev);
  538. if (pcie_smmu_domain) {
  539. iommu_unmap(pcie_smmu_domain, iova_p, size_p);
  540. } else {
  541. IPA_MPM_ERR("invalid PCIE SMMU domain\n");
  542. ipa_assert();
  543. }
  544. iommu_unmap(ipa_smmu_domain, iova_p, size_p);
  545. cb->next_addr -= size_p;
  546. dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
  547. IPA_MPM_RING_TOTAL_SIZE, dir);
  548. } else {
  549. dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
  550. IPA_MPM_RING_TOTAL_SIZE, dir);
  551. }
  552. }
  553. static u32 ipa_mpm_smmu_map_doorbell(enum mhip_smmu_domain_type smmu_domain,
  554. u32 pa_addr)
  555. {
  556. /*
  557. * Doorbells are already in PA, map these to
  558. * PCIE/IPA doman if SMMUs are enabled.
  559. */
  560. struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
  561. int smmu_enabled;
  562. unsigned long iova_p;
  563. phys_addr_t pa_p;
  564. u32 size_p;
  565. int ret = 0;
  566. int prot = IOMMU_READ | IOMMU_WRITE;
  567. struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
  568. unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
  569. u32 iova = 0;
  570. u64 offset = 0;
  571. /* check cache coherent */
  572. if (ipa_mpm_ctx->dev_info.is_cache_coherent) {
  573. IPA_MPM_DBG(" enable cache coherent\n");
  574. prot |= IOMMU_CACHE;
  575. }
  576. if (carved_iova >= cb->va_end) {
  577. IPA_MPM_ERR("running out of carved_iova %lx\n", carved_iova);
  578. ipa_assert();
  579. }
  580. smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
  581. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
  582. if (smmu_enabled) {
  583. IPA_SMMU_ROUND_TO_PAGE(carved_iova, pa_addr, IPA_MPM_PAGE_SIZE,
  584. iova_p, pa_p, size_p);
  585. if (smmu_domain == MHIP_SMMU_DOMAIN_IPA) {
  586. ipa_smmu_domain = ipa3_get_smmu_domain();
  587. if (!ipa_smmu_domain) {
  588. IPA_MPM_ERR("invalid IPA smmu domain\n");
  589. ipa_assert();
  590. }
  591. ret = ipa3_iommu_map(ipa_smmu_domain,
  592. iova_p, pa_p, size_p, prot);
  593. if (ret) {
  594. IPA_MPM_ERR("IPA doorbell mapping failed\n");
  595. ipa_assert();
  596. }
  597. offset = pa_addr - pa_p;
  598. } else if (smmu_domain == MHIP_SMMU_DOMAIN_PCIE) {
  599. pcie_smmu_domain = iommu_get_domain_for_dev(
  600. ipa_mpm_ctx->mhi_parent_dev);
  601. if (!pcie_smmu_domain) {
  602. IPA_MPM_ERR("invalid IPA smmu domain\n");
  603. ipa_assert();
  604. }
  605. ret = iommu_map(pcie_smmu_domain,
  606. iova_p, pa_p, size_p, prot);
  607. if (ret) {
  608. IPA_MPM_ERR("PCIe doorbell mapping failed\n");
  609. ipa_assert();
  610. }
  611. offset = pa_addr - pa_p;
  612. }
  613. iova = iova_p + offset;
  614. cb->next_addr = iova_p + IPA_MPM_PAGE_SIZE;
  615. } else {
  616. iova = pa_addr;
  617. }
  618. return iova;
  619. }
  620. static void ipa_mpm_smmu_unmap_doorbell(enum mhip_smmu_domain_type smmu_domain,
  621. dma_addr_t iova)
  622. {
  623. /*
  624. * Doorbells are already in PA, map these to
  625. * PCIE/IPA doman if SMMUs are enabled.
  626. */
  627. struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
  628. int smmu_enabled;
  629. unsigned long iova_p;
  630. phys_addr_t pa_p;
  631. u32 size_p;
  632. struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
  633. smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
  634. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
  635. if (smmu_enabled) {
  636. IPA_SMMU_ROUND_TO_PAGE(iova, iova, IPA_MPM_PAGE_SIZE,
  637. iova_p, pa_p, size_p);
  638. if (smmu_domain == MHIP_SMMU_DOMAIN_IPA) {
  639. ipa_smmu_domain = ipa3_get_smmu_domain();
  640. if (ipa_smmu_domain) {
  641. iommu_unmap(ipa_smmu_domain, iova_p, size_p);
  642. } else {
  643. IPA_MPM_ERR("invalid IPA smmu domain\n");
  644. ipa_assert();
  645. }
  646. } else if (smmu_domain == MHIP_SMMU_DOMAIN_PCIE) {
  647. pcie_smmu_domain = iommu_get_domain_for_dev(
  648. ipa_mpm_ctx->mhi_parent_dev);
  649. if (pcie_smmu_domain) {
  650. iommu_unmap(pcie_smmu_domain, iova_p, size_p);
  651. } else {
  652. IPA_MPM_ERR("invalid PCIE smmu domain\n");
  653. ipa_assert();
  654. }
  655. cb->next_addr -= IPA_MPM_PAGE_SIZE;
  656. }
  657. }
  658. }
  659. static int get_idx_from_id(const struct mhi_device_id *id)
  660. {
  661. return (id - mhi_driver_match_table);
  662. }
  663. static void get_ipa3_client(int id,
  664. enum ipa_client_type *ul_prod,
  665. enum ipa_client_type *dl_cons)
  666. {
  667. IPA_MPM_FUNC_ENTRY();
  668. if (id >= IPA_MPM_MHIP_CH_ID_MAX) {
  669. *ul_prod = IPA_CLIENT_MAX;
  670. *dl_cons = IPA_CLIENT_MAX;
  671. } else {
  672. *ul_prod = ipa_mpm_pipes[id].ul_prod.ipa_client;
  673. *dl_cons = ipa_mpm_pipes[id].dl_cons.ipa_client;
  674. }
  675. IPA_MPM_FUNC_EXIT();
  676. }
  677. static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
  678. int mhi_idx, struct ipa_req_chan_out_params *out_params)
  679. {
  680. int ipa_ep_idx;
  681. int res;
  682. struct mhi_p_desc *ev_ring;
  683. struct mhi_p_desc *tr_ring;
  684. int tr_ring_sz, ev_ring_sz;
  685. dma_addr_t ev_ring_iova, tr_ring_iova;
  686. dma_addr_t ap_cb_iova;
  687. dma_addr_t ap_cb_er_iova;
  688. struct ipa_request_gsi_channel_params gsi_params;
  689. int dir;
  690. int i;
  691. void *buff;
  692. int result;
  693. int k;
  694. struct ipa3_ep_context *ep;
  695. if (mhip_client == IPA_CLIENT_MAX)
  696. goto fail_gen;
  697. if ((mhi_idx < IPA_MPM_MHIP_CH_ID_0) ||
  698. (mhi_idx >= IPA_MPM_MHIP_CH_ID_MAX))
  699. goto fail_gen;
  700. ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
  701. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  702. IPA_MPM_ERR("fail to find channel EP.\n");
  703. goto fail_gen;
  704. }
  705. ep = &ipa3_ctx->ep[ipa_ep_idx];
  706. if (ep->valid == 1) {
  707. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  708. return 0;
  709. }
  710. IPA_MPM_DBG("connecting client %d (ep: %d)\n", mhip_client, ipa_ep_idx);
  711. IPA_MPM_FUNC_ENTRY();
  712. ev_ring_sz = IPA_MPM_RING_TOTAL_SIZE;
  713. ev_ring = kzalloc(ev_ring_sz, GFP_KERNEL);
  714. if (!ev_ring)
  715. goto fail_evt_alloc;
  716. tr_ring_sz = IPA_MPM_RING_TOTAL_SIZE;
  717. tr_ring = kzalloc(tr_ring_sz, GFP_KERNEL);
  718. if (!tr_ring)
  719. goto fail_tr_alloc;
  720. tr_ring[0].re_type = MHIP_RE_NOP;
  721. dir = IPA_CLIENT_IS_PROD(mhip_client) ?
  722. DMA_TO_HIPA : DMA_FROM_HIPA;
  723. /* allocate transfer ring elements */
  724. for (i = 1, k = 1; i < IPA_MPM_RING_LEN; i++, k++) {
  725. buff = kzalloc(TRE_BUFF_SIZE, GFP_KERNEL);
  726. if (!buff)
  727. goto fail_buff_alloc;
  728. tr_ring[i].buffer_ptr =
  729. ipa_mpm_smmu_map(buff, TRE_BUFF_SIZE, dir,
  730. &ap_cb_iova);
  731. if (!tr_ring[i].buffer_ptr)
  732. goto fail_smmu_map_ring;
  733. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  734. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_va[k] =
  735. buff;
  736. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_iova[k] =
  737. tr_ring[i].buffer_ptr;
  738. } else {
  739. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_va[k] =
  740. buff;
  741. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_iova[k] =
  742. tr_ring[i].buffer_ptr;
  743. }
  744. tr_ring[i].buff_len = TRE_BUFF_SIZE;
  745. tr_ring[i].chain = 0;
  746. tr_ring[i].ieob = 0;
  747. tr_ring[i].ieot = 0;
  748. tr_ring[i].bei = 0;
  749. tr_ring[i].sct = 0;
  750. tr_ring[i].re_type = MHIP_RE_XFER;
  751. if (IPA_CLIENT_IS_PROD(mhip_client))
  752. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[k] =
  753. ap_cb_iova;
  754. else
  755. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[k] =
  756. ap_cb_iova;
  757. }
  758. tr_ring_iova = ipa_mpm_smmu_map(tr_ring, IPA_MPM_PAGE_SIZE, dir,
  759. &ap_cb_iova);
  760. if (!tr_ring_iova)
  761. goto fail_smmu_map_ring;
  762. ev_ring_iova = ipa_mpm_smmu_map(ev_ring, IPA_MPM_PAGE_SIZE, dir,
  763. &ap_cb_er_iova);
  764. if (!ev_ring_iova)
  765. goto fail_smmu_map_ring;
  766. /* Store Producer channel rings */
  767. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  768. /* Device UL */
  769. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = ev_ring;
  770. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = tr_ring;
  771. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa = ev_ring_iova;
  772. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa = tr_ring_iova;
  773. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr =
  774. ap_cb_iova;
  775. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er =
  776. ap_cb_er_iova;
  777. } else {
  778. /* Host UL */
  779. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = ev_ring;
  780. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = tr_ring;
  781. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = ev_ring_iova;
  782. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = tr_ring_iova;
  783. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr =
  784. ap_cb_iova;
  785. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er =
  786. ap_cb_er_iova;
  787. }
  788. memset(&gsi_params, 0, sizeof(struct ipa_request_gsi_channel_params));
  789. if (IPA_CLIENT_IS_PROD(mhip_client))
  790. gsi_params.ipa_ep_cfg =
  791. ipa_mpm_pipes[mhi_idx].dl_cons.ep_cfg;
  792. else
  793. gsi_params.ipa_ep_cfg =
  794. ipa_mpm_pipes[mhi_idx].ul_prod.ep_cfg;
  795. gsi_params.client = mhip_client;
  796. gsi_params.skip_ep_cfg = false;
  797. /*
  798. * RP update address = Device channel DB address
  799. * CLIENT_PROD -> Host DL
  800. * CLIENT_CONS -> Host UL
  801. */
  802. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  803. gsi_params.evt_ring_params.rp_update_addr =
  804. ipa_mpm_smmu_map_doorbell(
  805. MHIP_SMMU_DOMAIN_IPA,
  806. ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.device_db);
  807. if (gsi_params.evt_ring_params.rp_update_addr == 0)
  808. goto fail_smmu_map_db;
  809. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova =
  810. gsi_params.evt_ring_params.rp_update_addr;
  811. gsi_params.evt_ring_params.ring_base_addr =
  812. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
  813. gsi_params.chan_params.ring_base_addr =
  814. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
  815. } else {
  816. gsi_params.evt_ring_params.rp_update_addr =
  817. ipa_mpm_smmu_map_doorbell(
  818. MHIP_SMMU_DOMAIN_IPA,
  819. ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.device_db);
  820. if (gsi_params.evt_ring_params.rp_update_addr == 0)
  821. goto fail_smmu_map_db;
  822. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova =
  823. gsi_params.evt_ring_params.rp_update_addr;
  824. gsi_params.evt_ring_params.ring_base_addr =
  825. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
  826. gsi_params.chan_params.ring_base_addr =
  827. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
  828. }
  829. /* Fill Event ring params */
  830. gsi_params.evt_ring_params.intf = GSI_EVT_CHTYPE_MHIP_EV;
  831. gsi_params.evt_ring_params.intr = GSI_INTR_MSI;
  832. gsi_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
  833. gsi_params.evt_ring_params.ring_len =
  834. (IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
  835. gsi_params.evt_ring_params.ring_base_vaddr = NULL;
  836. gsi_params.evt_ring_params.int_modt = 0;
  837. gsi_params.evt_ring_params.int_modc = 0;
  838. gsi_params.evt_ring_params.intvec = 0;
  839. gsi_params.evt_ring_params.msi_addr = 0;
  840. gsi_params.evt_ring_params.exclusive = true;
  841. gsi_params.evt_ring_params.err_cb = ipa_mpm_gsi_evt_ring_err_cb;
  842. gsi_params.evt_ring_params.user_data = NULL;
  843. /* Evt Scratch Params */
  844. /* Disable the Moderation for ringing doorbells */
  845. gsi_params.evt_scratch.mhip.rp_mod_threshold = 1;
  846. gsi_params.evt_scratch.mhip.rp_mod_timer = 0;
  847. gsi_params.evt_scratch.mhip.rp_mod_counter = 0;
  848. gsi_params.evt_scratch.mhip.rp_mod_timer_id = 0;
  849. gsi_params.evt_scratch.mhip.rp_mod_timer_running = 0;
  850. gsi_params.evt_scratch.mhip.fixed_buffer_sz = TRE_BUFF_SIZE;
  851. if (IPA_CLIENT_IS_PROD(mhip_client))
  852. gsi_params.evt_scratch.mhip.rp_mod_threshold = 4;
  853. /* Channel Params */
  854. gsi_params.chan_params.prot = GSI_CHAN_PROT_MHIP;
  855. gsi_params.chan_params.dir = IPA_CLIENT_IS_PROD(mhip_client) ?
  856. GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
  857. /* chan_id is set in ipa3_request_gsi_channel() */
  858. gsi_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
  859. gsi_params.chan_params.ring_len =
  860. (IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
  861. gsi_params.chan_params.ring_base_vaddr = NULL;
  862. gsi_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
  863. gsi_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
  864. gsi_params.chan_params.low_weight = 1;
  865. gsi_params.chan_params.xfer_cb = NULL;
  866. gsi_params.chan_params.err_cb = ipa_mpm_gsi_chan_err_cb;
  867. gsi_params.chan_params.chan_user_data = NULL;
  868. /* Channel scratch */
  869. gsi_params.chan_scratch.mhip.assert_bit_40 = 0;
  870. gsi_params.chan_scratch.mhip.host_channel = 1;
  871. res = ipa3_request_gsi_channel(&gsi_params, out_params);
  872. if (res) {
  873. IPA_MPM_ERR("failed to allocate GSI channel res=%d\n", res);
  874. goto fail_alloc_channel;
  875. }
  876. if (IPA_CLIENT_IS_PROD(mhip_client))
  877. ipa_mpm_change_gsi_state(mhi_idx,
  878. IPA_MPM_MHIP_CHAN_DL,
  879. GSI_ALLOCATED);
  880. else
  881. ipa_mpm_change_gsi_state(mhi_idx,
  882. IPA_MPM_MHIP_CHAN_UL,
  883. GSI_ALLOCATED);
  884. result = ipa3_start_gsi_channel(ipa_ep_idx);
  885. if (result) {
  886. IPA_MPM_ERR("start MHIP channel %d failed\n", mhip_client);
  887. if (IPA_CLIENT_IS_PROD(mhip_client))
  888. ipa_mpm_change_gsi_state(mhi_idx,
  889. IPA_MPM_MHIP_CHAN_DL, GSI_ERR);
  890. else
  891. ipa_mpm_change_gsi_state(mhi_idx,
  892. IPA_MPM_MHIP_CHAN_UL, GSI_ERR);
  893. goto fail_start_channel;
  894. }
  895. if (IPA_CLIENT_IS_PROD(mhip_client))
  896. ipa_mpm_change_gsi_state(mhi_idx,
  897. IPA_MPM_MHIP_CHAN_DL, GSI_STARTED);
  898. else
  899. ipa_mpm_change_gsi_state(mhi_idx,
  900. IPA_MPM_MHIP_CHAN_UL, GSI_STARTED);
  901. /* Fill in the Device Context params */
  902. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  903. /* This is the DL channel :: Device -> Host */
  904. ipa_mpm_ctx->md[mhi_idx].dl_cons.evt_props.ev_ctx.rbase =
  905. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
  906. ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.ch_ctx.rbase =
  907. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
  908. } else {
  909. ipa_mpm_ctx->md[mhi_idx].ul_prod.evt_props.ev_ctx.rbase =
  910. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
  911. ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.ch_ctx.rbase =
  912. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
  913. }
  914. IPA_MPM_FUNC_EXIT();
  915. return 0;
  916. fail_start_channel:
  917. ipa3_disable_data_path(ipa_ep_idx);
  918. ipa3_stop_gsi_channel(ipa_ep_idx);
  919. fail_alloc_channel:
  920. ipa3_release_gsi_channel(ipa_ep_idx);
  921. fail_smmu_map_db:
  922. fail_smmu_map_ring:
  923. fail_tr_alloc:
  924. fail_evt_alloc:
  925. fail_buff_alloc:
  926. ipa_assert();
  927. fail_gen:
  928. return -EFAULT;
  929. }
  930. static void ipa_mpm_clean_mhip_chan(int mhi_idx,
  931. enum ipa_client_type mhip_client)
  932. {
  933. int dir;
  934. int i;
  935. int ipa_ep_idx;
  936. int result;
  937. IPA_MPM_FUNC_ENTRY();
  938. if (mhip_client == IPA_CLIENT_MAX)
  939. return;
  940. if ((mhi_idx < IPA_MPM_MHIP_CH_ID_0) ||
  941. (mhi_idx >= IPA_MPM_MHIP_CH_ID_MAX))
  942. return;
  943. dir = IPA_CLIENT_IS_PROD(mhip_client) ?
  944. DMA_TO_HIPA : DMA_FROM_HIPA;
  945. ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
  946. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  947. IPA_MPM_ERR("fail to find channel EP.\n");
  948. return;
  949. }
  950. /* For the uplink channels, enable HOLB. */
  951. if (IPA_CLIENT_IS_CONS(mhip_client))
  952. ipa3_disable_data_path(ipa_ep_idx);
  953. /* Release channel */
  954. result = ipa3_stop_gsi_channel(ipa_ep_idx);
  955. if (result) {
  956. IPA_MPM_ERR("Stop channel for MHIP_Client = %d failed\n",
  957. mhip_client);
  958. goto fail_chan;
  959. }
  960. result = ipa3_reset_gsi_channel(ipa_ep_idx);
  961. if (result) {
  962. IPA_MPM_ERR("Reset channel for MHIP_Client = %d failed\n",
  963. mhip_client);
  964. goto fail_chan;
  965. }
  966. result = ipa3_reset_gsi_event_ring(ipa_ep_idx);
  967. if (result) {
  968. IPA_MPM_ERR("Reset ev ring for MHIP_Client = %d failed\n",
  969. mhip_client);
  970. goto fail_chan;
  971. }
  972. result = ipa3_release_gsi_channel(ipa_ep_idx);
  973. if (result) {
  974. IPA_MPM_ERR("Release tr ring for MHIP_Client = %d failed\n",
  975. mhip_client);
  976. if (IPA_CLIENT_IS_PROD(mhip_client))
  977. ipa_mpm_change_gsi_state(mhi_idx,
  978. IPA_MPM_MHIP_CHAN_DL, GSI_ERR);
  979. else
  980. ipa_mpm_change_gsi_state(mhi_idx,
  981. IPA_MPM_MHIP_CHAN_UL, GSI_ERR);
  982. goto fail_chan;
  983. }
  984. if (IPA_CLIENT_IS_PROD(mhip_client))
  985. ipa_mpm_change_gsi_state(mhi_idx,
  986. IPA_MPM_MHIP_CHAN_DL, GSI_INIT);
  987. else
  988. ipa_mpm_change_gsi_state(mhi_idx,
  989. IPA_MPM_MHIP_CHAN_UL, GSI_INIT);
  990. memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
  991. /* Unmap Doorbells */
  992. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  993. ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_PCIE,
  994. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_device_iova);
  995. ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_IPA,
  996. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova);
  997. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova = 0;
  998. ipa_mpm_ctx->md[mhi_idx].dl_cons.db_device_iova = 0;
  999. } else {
  1000. ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_PCIE,
  1001. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_device_iova);
  1002. ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_IPA,
  1003. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova);
  1004. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova = 0;
  1005. ipa_mpm_ctx->md[mhi_idx].ul_prod.db_device_iova = 0;
  1006. }
  1007. /* deallocate/Unmap transfer ring buffers */
  1008. for (i = 1; i < IPA_MPM_RING_LEN; i++) {
  1009. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  1010. ipa_mpm_smmu_unmap(
  1011. (dma_addr_t)
  1012. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_iova[i],
  1013. TRE_BUFF_SIZE, dir,
  1014. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]);
  1015. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_iova[i]
  1016. = 0;
  1017. kfree(
  1018. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_va[i]);
  1019. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_va[i]
  1020. = NULL;
  1021. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]
  1022. = 0;
  1023. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_iova[i]
  1024. = 0;
  1025. } else {
  1026. ipa_mpm_smmu_unmap(
  1027. (dma_addr_t)
  1028. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_iova[i],
  1029. TRE_BUFF_SIZE, dir,
  1030. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
  1031. );
  1032. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_iova[i]
  1033. = 0;
  1034. kfree(
  1035. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_va[i]);
  1036. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_va[i]
  1037. = NULL;
  1038. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
  1039. = 0;
  1040. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_iova[i]
  1041. = 0;
  1042. }
  1043. }
  1044. /* deallocate/Unmap rings */
  1045. if (IPA_CLIENT_IS_PROD(mhip_client)) {
  1046. ipa_mpm_smmu_unmap(
  1047. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa,
  1048. IPA_MPM_PAGE_SIZE, dir,
  1049. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er);
  1050. ipa_mpm_smmu_unmap(
  1051. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa,
  1052. IPA_MPM_PAGE_SIZE, dir,
  1053. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
  1054. kfree(ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va);
  1055. kfree(ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va);
  1056. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = NULL;
  1057. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = NULL;
  1058. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr = 0;
  1059. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er = 0;
  1060. } else {
  1061. ipa_mpm_smmu_unmap(
  1062. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa,
  1063. IPA_MPM_PAGE_SIZE, dir,
  1064. ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
  1065. ipa_mpm_smmu_unmap(
  1066. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa,
  1067. IPA_MPM_PAGE_SIZE, dir,
  1068. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er);
  1069. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = 0;
  1070. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = 0;
  1071. kfree(ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va);
  1072. kfree(ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va);
  1073. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = NULL;
  1074. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = NULL;
  1075. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er = 0;
  1076. ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr = 0;
  1077. }
  1078. IPA_MPM_FUNC_EXIT();
  1079. return;
  1080. fail_chan:
  1081. ipa_assert();
  1082. }
  1083. /* round addresses for closest page per SMMU requirements */
  1084. static inline void ipa_mpm_smmu_round_to_page(uint64_t iova, uint64_t pa,
  1085. uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p)
  1086. {
  1087. *iova_p = rounddown(iova, PAGE_SIZE);
  1088. *pa_p = rounddown(pa, PAGE_SIZE);
  1089. *size_p = roundup(size + pa - *pa_p, PAGE_SIZE);
  1090. }
  1091. static int __ipa_mpm_configure_mhi_device(struct ipa_mpm_channel *ch,
  1092. int mhi_idx, int dir)
  1093. {
  1094. struct mhi_buf ch_config[2];
  1095. int ret;
  1096. IPA_MPM_FUNC_ENTRY();
  1097. if (ch == NULL) {
  1098. IPA_MPM_ERR("ch config is NULL\n");
  1099. return -EINVAL;
  1100. }
  1101. /* Populate CCA */
  1102. ch_config[0].buf = &ch->chan_props.ch_ctx;
  1103. ch_config[0].len = sizeof(ch->chan_props.ch_ctx);
  1104. ch_config[0].name = "CCA";
  1105. /* populate ECA */
  1106. ch_config[1].buf = &ch->evt_props.ev_ctx;
  1107. ch_config[1].len = sizeof(ch->evt_props.ev_ctx);
  1108. ch_config[1].name = "ECA";
  1109. IPA_MPM_DBG("Configuring MHI PRIME device for mhi_idx %d\n", mhi_idx);
  1110. ret = mhi_device_configure(ipa_mpm_ctx->md[mhi_idx].mhi_dev, dir,
  1111. ch_config, 2);
  1112. if (ret) {
  1113. IPA_MPM_ERR("mhi_device_configure failed\n");
  1114. return -EINVAL;
  1115. }
  1116. IPA_MPM_FUNC_EXIT();
  1117. return 0;
  1118. }
  1119. static void ipa_mpm_mhip_shutdown(int mhip_idx)
  1120. {
  1121. enum ipa_client_type ul_prod_chan, dl_cons_chan;
  1122. IPA_MPM_FUNC_ENTRY();
  1123. get_ipa3_client(mhip_idx, &ul_prod_chan, &dl_cons_chan);
  1124. if (mhip_idx != IPA_MPM_MHIP_CH_ID_2) {
  1125. /* For DPL, stop only DL channel */
  1126. ipa_mpm_start_stop_ul_mhip_data_path(mhip_idx, MPM_MHIP_STOP);
  1127. ipa_mpm_clean_mhip_chan(mhip_idx, ul_prod_chan);
  1128. }
  1129. ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
  1130. mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
  1131. if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
  1132. ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
  1133. /* while in modem shutdown scenarios such as SSR, no explicit
  1134. * PCIe vote is needed.
  1135. */
  1136. ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
  1137. }
  1138. mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
  1139. ipa_mpm_ctx->md[mhip_idx].mhi_dev = NULL;
  1140. ipa_mpm_ctx->md[mhip_idx].init_complete = false;
  1141. IPA_MPM_FUNC_EXIT();
  1142. }
  1143. /*
  1144. * Turning on/OFF PCIE Clock is done once for all clients.
  1145. * Always vote for Probe_ID 0 as a standard.
  1146. */
  1147. static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
  1148. int probe_id)
  1149. {
  1150. int result = 0;
  1151. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1152. IPA_MPM_ERR("probe_id not found\n");
  1153. return -EINVAL;
  1154. }
  1155. if (vote > CLK_OFF) {
  1156. IPA_MPM_ERR("Invalid vote\n");
  1157. return -EINVAL;
  1158. }
  1159. if (ipa_mpm_ctx->md[probe_id].mhi_dev == NULL) {
  1160. IPA_MPM_ERR("MHI not initialized yet\n");
  1161. return 0;
  1162. }
  1163. IPA_MPM_ERR("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
  1164. vote, probe_id,
  1165. atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt));
  1166. if (vote == CLK_ON) {
  1167. result = mhi_device_get_sync(
  1168. ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
  1169. if (result) {
  1170. IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
  1171. result, probe_id);
  1172. return result;
  1173. }
  1174. IPA_MPM_DBG("probe_id %d PCIE clock now ON\n", probe_id);
  1175. atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
  1176. atomic_inc(&ipa_mpm_ctx->pcie_clk_total_cnt);
  1177. } else {
  1178. if ((atomic_read(
  1179. &ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt)
  1180. == 0)) {
  1181. IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
  1182. probe_id);
  1183. WARN_ON(1);
  1184. return 0;
  1185. }
  1186. mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
  1187. IPA_MPM_DBG("probe_id %d PCIE clock off\n", probe_id);
  1188. atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
  1189. atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
  1190. }
  1191. return result;
  1192. }
  1193. /*
  1194. * Turning on/OFF IPA Clock is done only once- for all clients
  1195. */
  1196. static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
  1197. int probe_id)
  1198. {
  1199. if (vote > CLK_OFF)
  1200. return;
  1201. IPA_MPM_ERR("IPA clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
  1202. vote, probe_id,
  1203. atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt));
  1204. if (vote == CLK_ON) {
  1205. IPA_ACTIVE_CLIENTS_INC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
  1206. IPA_MPM_DBG("IPA clock now ON for probe_id %d\n", probe_id);
  1207. atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
  1208. atomic_inc(&ipa_mpm_ctx->ipa_clk_total_cnt);
  1209. } else {
  1210. if ((atomic_read
  1211. (&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt)
  1212. == 0)) {
  1213. IPA_MPM_DBG("probe_id %d IPA clock count < 0\n",
  1214. probe_id);
  1215. WARN_ON(1);
  1216. return;
  1217. }
  1218. IPA_ACTIVE_CLIENTS_DEC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
  1219. IPA_MPM_DBG("probe_id %d IPA clock off\n", probe_id);
  1220. atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
  1221. atomic_dec(&ipa_mpm_ctx->ipa_clk_total_cnt);
  1222. }
  1223. }
  1224. static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
  1225. enum ipa_mpm_mhip_chan mhip_chan,
  1226. int probe_id,
  1227. enum ipa_mpm_start_stop_type start_stop)
  1228. {
  1229. int ipa_ep_idx;
  1230. struct ipa3_ep_context *ep;
  1231. bool is_start;
  1232. enum ipa_client_type ul_chan, dl_chan;
  1233. u32 source_pipe_bitmask = 0;
  1234. enum gsi_status gsi_res = GSI_STATUS_SUCCESS;
  1235. int result;
  1236. IPA_MPM_FUNC_ENTRY();
  1237. if (mhip_chan > IPA_MPM_MHIP_CHAN_BOTH) {
  1238. IPA_MPM_ERR("MHI not initialized yet\n");
  1239. return MHIP_STATUS_FAIL;
  1240. }
  1241. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1242. IPA_MPM_ERR("MHI not initialized yet\n");
  1243. return MHIP_STATUS_FAIL;
  1244. }
  1245. get_ipa3_client(probe_id, &ul_chan, &dl_chan);
  1246. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1247. ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
  1248. } else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1249. ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
  1250. } else if (mhip_chan == IPA_MPM_MHIP_CHAN_BOTH) {
  1251. ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
  1252. ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
  1253. }
  1254. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1255. IPA_MPM_ERR("fail to get EP# for idx %d\n", ipa_ep_idx);
  1256. return MHIP_STATUS_EP_NOT_FOUND;
  1257. }
  1258. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1259. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1260. IPA_MPM_DBG("current GSI state = %d, action = %d\n",
  1261. ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state,
  1262. start_stop);
  1263. if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state <
  1264. GSI_ALLOCATED) {
  1265. IPA_MPM_ERR("GSI chan is not allocated yet\n");
  1266. return MHIP_STATUS_EP_NOT_READY;
  1267. }
  1268. } else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1269. IPA_MPM_DBG("current GSI state = %d, action = %d\n",
  1270. ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state,
  1271. start_stop);
  1272. if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state <
  1273. GSI_ALLOCATED) {
  1274. IPA_MPM_ERR("GSI chan is not allocated yet\n");
  1275. return MHIP_STATUS_EP_NOT_READY;
  1276. }
  1277. }
  1278. is_start = (start_stop == MPM_MHIP_START) ? true : false;
  1279. if (is_start) {
  1280. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1281. if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state ==
  1282. GSI_STARTED) {
  1283. IPA_MPM_ERR("GSI chan is already started\n");
  1284. return MHIP_STATUS_NO_OP;
  1285. }
  1286. }
  1287. if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1288. if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state ==
  1289. GSI_STARTED) {
  1290. IPA_MPM_ERR("GSI chan is already started\n");
  1291. return MHIP_STATUS_NO_OP;
  1292. }
  1293. }
  1294. /* Start GSI channel */
  1295. gsi_res = ipa3_start_gsi_channel(ipa_ep_idx);
  1296. if (gsi_res != GSI_STATUS_SUCCESS) {
  1297. IPA_MPM_ERR("Error starting channel: err = %d\n",
  1298. gsi_res);
  1299. goto gsi_chan_fail;
  1300. } else {
  1301. ipa_mpm_change_gsi_state(probe_id, mhip_chan,
  1302. GSI_STARTED);
  1303. }
  1304. } else {
  1305. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1306. if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state ==
  1307. GSI_STOPPED) {
  1308. IPA_MPM_ERR("GSI chan is already stopped\n");
  1309. return MHIP_STATUS_NO_OP;
  1310. } else if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state
  1311. != GSI_STARTED) {
  1312. IPA_MPM_ERR("GSI chan isn't already started\n");
  1313. return MHIP_STATUS_NO_OP;
  1314. }
  1315. }
  1316. if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1317. if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state ==
  1318. GSI_STOPPED) {
  1319. IPA_MPM_ERR("GSI chan is already stopped\n");
  1320. return MHIP_STATUS_NO_OP;
  1321. } else if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state
  1322. != GSI_STARTED) {
  1323. IPA_MPM_ERR("GSI chan isn't already started\n");
  1324. return MHIP_STATUS_NO_OP;
  1325. }
  1326. }
  1327. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1328. source_pipe_bitmask = 1 <<
  1329. ipa3_get_ep_mapping(ep->client);
  1330. /* First Stop UL GSI channel before unvote PCIe clock */
  1331. result = ipa3_stop_gsi_channel(ipa_ep_idx);
  1332. if (result) {
  1333. IPA_MPM_ERR("UL chan stop failed\n");
  1334. goto gsi_chan_fail;
  1335. } else {
  1336. ipa_mpm_change_gsi_state(probe_id, mhip_chan,
  1337. GSI_STOPPED);
  1338. }
  1339. }
  1340. if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1341. result = ipa3_stop_gsi_channel(ipa_ep_idx);
  1342. if (result) {
  1343. IPA_MPM_ERR("Fail to stop DL channel\n");
  1344. goto gsi_chan_fail;
  1345. } else {
  1346. ipa_mpm_change_gsi_state(probe_id, mhip_chan,
  1347. GSI_STOPPED);
  1348. }
  1349. }
  1350. }
  1351. IPA_MPM_FUNC_EXIT();
  1352. return MHIP_STATUS_SUCCESS;
  1353. gsi_chan_fail:
  1354. ipa3_disable_data_path(ipa_ep_idx);
  1355. ipa_mpm_change_gsi_state(probe_id, mhip_chan, GSI_ERR);
  1356. ipa_assert();
  1357. return MHIP_STATUS_FAIL;
  1358. }
  1359. int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
  1360. {
  1361. int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
  1362. int i;
  1363. static enum mhip_status_type status;
  1364. int ret = 0;
  1365. enum ipa_mpm_mhip_client_type mhip_client = IPA_MPM_MHIP_TETH;
  1366. if (!state)
  1367. return -EPERM;
  1368. if (!ipa3_is_mhip_offload_enabled())
  1369. return -EPERM;
  1370. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  1371. if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
  1372. probe_id = i;
  1373. break;
  1374. }
  1375. }
  1376. if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
  1377. IPA_MPM_ERR("Unknown probe_id\n");
  1378. return -EPERM;
  1379. }
  1380. IPA_MPM_DBG("WAN backhaul available for probe_id = %d\n", probe_id);
  1381. if (state->up) {
  1382. /* Start UL MHIP channel for offloading tethering connection */
  1383. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
  1384. if (ret) {
  1385. IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n",
  1386. ret);
  1387. return ret;
  1388. }
  1389. status = ipa_mpm_start_stop_mhip_chan(
  1390. IPA_MPM_MHIP_CHAN_UL, probe_id, MPM_MHIP_START);
  1391. switch (status) {
  1392. case MHIP_STATUS_SUCCESS:
  1393. ipa_mpm_ctx->md[probe_id].teth_state =
  1394. IPA_MPM_TETH_CONNECTED;
  1395. ret = ipa_mpm_start_stop_ul_mhip_data_path(
  1396. probe_id, MPM_MHIP_START);
  1397. if (ret) {
  1398. IPA_MPM_ERR("err UL chan start\n");
  1399. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  1400. return ret;
  1401. }
  1402. break;
  1403. case MHIP_STATUS_EP_NOT_READY:
  1404. case MHIP_STATUS_NO_OP:
  1405. IPA_MPM_DBG("UL chan already start, status = %d\n",
  1406. status);
  1407. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  1408. return ret;
  1409. case MHIP_STATUS_FAIL:
  1410. case MHIP_STATUS_BAD_STATE:
  1411. case MHIP_STATUS_EP_NOT_FOUND:
  1412. IPA_MPM_ERR("UL chan start err =%d\n", status);
  1413. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  1414. ipa_assert();
  1415. return -EFAULT;
  1416. default:
  1417. IPA_MPM_ERR("Err not found\n");
  1418. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  1419. ret = -EFAULT;
  1420. break;
  1421. }
  1422. ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
  1423. } else {
  1424. status = ipa_mpm_start_stop_mhip_chan(
  1425. IPA_MPM_MHIP_CHAN_UL, probe_id,
  1426. MPM_MHIP_STOP);
  1427. switch (status) {
  1428. case MHIP_STATUS_SUCCESS:
  1429. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
  1430. ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
  1431. MPM_MHIP_STOP);
  1432. break;
  1433. case MHIP_STATUS_NO_OP:
  1434. case MHIP_STATUS_EP_NOT_READY:
  1435. IPA_MPM_DBG("UL chan already stop, status = %d\n",
  1436. status);
  1437. break;
  1438. case MHIP_STATUS_FAIL:
  1439. case MHIP_STATUS_BAD_STATE:
  1440. case MHIP_STATUS_EP_NOT_FOUND:
  1441. IPA_MPM_ERR("UL chan cant be stopped err =%d\n",
  1442. status);
  1443. ipa_assert();
  1444. return -EFAULT;
  1445. default:
  1446. IPA_MPM_ERR("Err not found\n");
  1447. return -EFAULT;
  1448. }
  1449. /* Stop UL MHIP channel for offloading tethering connection */
  1450. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  1451. if (ret) {
  1452. IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n",
  1453. ret);
  1454. return ret;
  1455. }
  1456. ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
  1457. }
  1458. return ret;
  1459. }
  1460. static void ipa_mpm_change_gsi_state(int probe_id,
  1461. enum ipa_mpm_mhip_chan mhip_chan,
  1462. enum ipa_mpm_gsi_state next_state)
  1463. {
  1464. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX)
  1465. return;
  1466. if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
  1467. mutex_lock(&ipa_mpm_ctx->md[probe_id].mutex);
  1468. ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state = next_state;
  1469. IPA_MPM_DBG("GSI next_state = %d\n",
  1470. ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state);
  1471. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mutex);
  1472. }
  1473. if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
  1474. mutex_lock(&ipa_mpm_ctx->md[probe_id].mutex);
  1475. ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state = next_state;
  1476. IPA_MPM_DBG("GSI next_state = %d\n",
  1477. ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state);
  1478. mutex_unlock(&ipa_mpm_ctx->md[probe_id].mutex);
  1479. }
  1480. }
  1481. static void ipa_mpm_change_teth_state(int probe_id,
  1482. enum ipa_mpm_teth_state next_state)
  1483. {
  1484. enum ipa_mpm_teth_state curr_state;
  1485. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1486. IPA_MPM_ERR("Unknown probe_id\n");
  1487. return;
  1488. }
  1489. curr_state = ipa_mpm_ctx->md[probe_id].teth_state;
  1490. IPA_MPM_DBG("curr_state = %d, ip_state = %d mhip_s\n",
  1491. curr_state, next_state);
  1492. switch (curr_state) {
  1493. case IPA_MPM_TETH_INIT:
  1494. if (next_state == IPA_MPM_TETH_CONNECTED)
  1495. next_state = IPA_MPM_TETH_INPROGRESS;
  1496. break;
  1497. case IPA_MPM_TETH_INPROGRESS:
  1498. break;
  1499. case IPA_MPM_TETH_CONNECTED:
  1500. break;
  1501. default:
  1502. IPA_MPM_ERR("No change in state\n");
  1503. break;
  1504. }
  1505. ipa_mpm_ctx->md[probe_id].teth_state = next_state;
  1506. IPA_MPM_DBG("next_state = %d\n", next_state);
  1507. }
  1508. static void ipa_mpm_read_channel(enum ipa_client_type chan)
  1509. {
  1510. struct gsi_chan_info chan_info;
  1511. int ipa_ep_idx;
  1512. struct ipa3_ep_context *ep;
  1513. int res;
  1514. ipa_ep_idx = ipa3_get_ep_mapping(chan);
  1515. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1516. IPAERR("failed to get idx");
  1517. return;
  1518. }
  1519. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1520. IPA_MPM_ERR("Reading channel for chan %d, ep = %d, gsi_chan_hdl = %d\n",
  1521. chan, ep, ep->gsi_chan_hdl);
  1522. res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
  1523. if (res)
  1524. IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
  1525. }
  1526. static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
  1527. enum ipa_mpm_start_stop_type start)
  1528. {
  1529. int ipa_ep_idx;
  1530. int res = 0;
  1531. enum ipa_client_type ul_chan, dl_chan;
  1532. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1533. IPA_MPM_ERR("Unknown probe_id\n");
  1534. return 0;
  1535. }
  1536. get_ipa3_client(probe_id, &ul_chan, &dl_chan);
  1537. IPA_MPM_DBG("Start/Stop Data Path ? = %d\n", start);
  1538. /* MHIP Start Data path:
  1539. * IPA MHIP Producer: remove HOLB
  1540. * IPA MHIP Consumer : no op as there is no delay on these pipes.
  1541. */
  1542. if (start) {
  1543. IPA_MPM_DBG("Enabling data path\n");
  1544. if (ul_chan != IPA_CLIENT_MAX) {
  1545. /* Remove HOLB on the producer pipe */
  1546. IPA_MPM_DBG("Removing HOLB on ep = %s\n",
  1547. __stringify(ul_chan));
  1548. ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
  1549. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1550. IPAERR("failed to get idx");
  1551. return ipa_ep_idx;
  1552. }
  1553. res = ipa3_enable_data_path(ipa_ep_idx);
  1554. if (res)
  1555. IPA_MPM_ERR("Enable data path failed res=%d\n",
  1556. res);
  1557. }
  1558. } else {
  1559. IPA_MPM_DBG("Disabling data path\n");
  1560. if (ul_chan != IPA_CLIENT_MAX) {
  1561. /* Set HOLB on the producer pipe */
  1562. ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
  1563. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1564. IPAERR("failed to get idx");
  1565. return ipa_ep_idx;
  1566. }
  1567. res = ipa3_disable_data_path(ipa_ep_idx);
  1568. if (res)
  1569. IPA_MPM_ERR("disable data path failed res=%d\n",
  1570. res);
  1571. }
  1572. }
  1573. return res;
  1574. }
  1575. /* ipa_mpm_mhi_probe_cb is received for each MHI'/MHI channel
  1576. * Currently we have 4 MHI channels.
  1577. */
  1578. static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
  1579. const struct mhi_device_id *mhi_id)
  1580. {
  1581. struct ipa_mpm_channel *ch;
  1582. int ret;
  1583. enum ipa_client_type ul_prod, dl_cons;
  1584. int probe_id;
  1585. struct ipa_req_chan_out_params ul_out_params, dl_out_params;
  1586. void __iomem *db_addr;
  1587. int ipa_ep_idx;
  1588. struct ipa3_ep_context *ep;
  1589. u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
  1590. u32 wp_addr;
  1591. int pipe_idx;
  1592. IPA_MPM_FUNC_ENTRY();
  1593. if (ipa_mpm_ctx == NULL) {
  1594. IPA_MPM_ERR("ipa_mpm_ctx is NULL not expected, returning..\n");
  1595. return -ENOMEM;
  1596. }
  1597. probe_id = get_idx_from_id(mhi_id);
  1598. if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
  1599. IPA_MPM_ERR("chan=%pK is not supported for now\n", mhi_id);
  1600. return -EPERM;
  1601. }
  1602. if (ipa_mpm_ctx->md[probe_id].init_complete) {
  1603. IPA_MPM_ERR("Probe initialization already done, returning\n");
  1604. return 0;
  1605. }
  1606. IPA_MPM_DBG("Received probe for id=%d\n", probe_id);
  1607. get_ipa3_client(probe_id, &ul_prod, &dl_cons);
  1608. /* Vote for IPA clock for first time in initialization seq.
  1609. * IPA clock will be devoted when MHI enters LPM
  1610. * PCIe clock will be voted / devoted with every channel probe
  1611. * we receive.
  1612. * ul_prod = Host -> Device
  1613. * dl_cons = Device -> Host
  1614. */
  1615. ipa_mpm_ctx->md[probe_id].mhi_dev = mhi_dev;
  1616. ipa_mpm_ctx->mhi_parent_dev =
  1617. ipa_mpm_ctx->md[probe_id].mhi_dev->dev.parent;
  1618. ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
  1619. mutex_lock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
  1620. ipa_mpm_vote_unvote_ipa_clk(CLK_ON, probe_id);
  1621. ipa_mpm_ctx->md[probe_id].in_lpm = false;
  1622. mutex_unlock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
  1623. IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
  1624. /*
  1625. * Set up MHI' pipes for Device IPA filling in
  1626. * Channel Context and Event Context.
  1627. * These params will be sent to Device side.
  1628. * UL CHAN = HOST -> Device
  1629. * DL CHAN = Device -> HOST
  1630. * per channel a TRE and EV is allocated.
  1631. * for a UL channel -
  1632. * IPA HOST PROD TRE -> IPA DEVICE CONS EV
  1633. * IPA HOST PROD EV -> IPA DEVICE CONS TRE
  1634. * for a DL channel -
  1635. * IPA Device PROD TRE -> IPA HOST CONS EV
  1636. * IPA Device PROD EV -> IPA HOST CONS TRE
  1637. */
  1638. if (ul_prod != IPA_CLIENT_MAX) {
  1639. /* store UL properties */
  1640. ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
  1641. /* Store Channel properties */
  1642. ch->chan_props.id = mhi_dev->ul_chan_id;
  1643. ch->chan_props.device_db =
  1644. ipa_mpm_ctx->dev_info.chdb_base +
  1645. ch->chan_props.id * 8;
  1646. /* Fill Channel Conext to be sent to Device side */
  1647. ch->chan_props.ch_ctx.chtype =
  1648. IPA_MPM_MHI_HOST_UL_CHANNEL;
  1649. ch->chan_props.ch_ctx.erindex =
  1650. mhi_dev->ul_event_id;
  1651. ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
  1652. GSI_EVT_RING_RE_SIZE_16B;
  1653. /* Store Event properties */
  1654. ch->evt_props.ev_ctx.update_rp_modc = 0;
  1655. ch->evt_props.ev_ctx.update_rp_intmodt = 0;
  1656. ch->evt_props.ev_ctx.ertype = 1;
  1657. ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
  1658. GSI_EVT_RING_RE_SIZE_16B;
  1659. ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
  1660. ch->evt_props.device_db =
  1661. ipa_mpm_ctx->dev_info.erdb_base +
  1662. ch->chan_props.ch_ctx.erindex * 8;
  1663. }
  1664. if (dl_cons != IPA_CLIENT_MAX) {
  1665. /* store DL channel properties */
  1666. ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
  1667. /* Store Channel properties */
  1668. ch->chan_props.id = mhi_dev->dl_chan_id;
  1669. ch->chan_props.device_db =
  1670. ipa_mpm_ctx->dev_info.chdb_base +
  1671. ch->chan_props.id * 8;
  1672. /* Fill Channel Conext to be be sent to Dev side */
  1673. ch->chan_props.ch_ctx.chstate = 1;
  1674. ch->chan_props.ch_ctx.chtype =
  1675. IPA_MPM_MHI_HOST_DL_CHANNEL;
  1676. ch->chan_props.ch_ctx.erindex = mhi_dev->dl_event_id;
  1677. ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
  1678. GSI_EVT_RING_RE_SIZE_16B;
  1679. /* Store Event properties */
  1680. ch->evt_props.ev_ctx.update_rp_modc = 0;
  1681. ch->evt_props.ev_ctx.update_rp_intmodt = 0;
  1682. ch->evt_props.ev_ctx.ertype = 1;
  1683. ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
  1684. GSI_EVT_RING_RE_SIZE_16B;
  1685. ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
  1686. ch->evt_props.device_db =
  1687. ipa_mpm_ctx->dev_info.erdb_base +
  1688. ch->chan_props.ch_ctx.erindex * 8;
  1689. }
  1690. /* connect Host GSI pipes with MHI' protocol */
  1691. if (ul_prod != IPA_CLIENT_MAX) {
  1692. ret = ipa_mpm_connect_mhip_gsi_pipe(ul_prod,
  1693. probe_id, &ul_out_params);
  1694. if (ret) {
  1695. IPA_MPM_ERR("failed connecting MPM client %d\n",
  1696. ul_prod);
  1697. goto fail_gsi_setup;
  1698. }
  1699. }
  1700. if (dl_cons != IPA_CLIENT_MAX) {
  1701. ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons,
  1702. probe_id, &dl_out_params);
  1703. if (ret) {
  1704. IPA_MPM_ERR("connecting MPM client = %d failed\n",
  1705. dl_cons);
  1706. goto fail_gsi_setup;
  1707. }
  1708. }
  1709. if (ul_prod != IPA_CLIENT_MAX) {
  1710. ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
  1711. ch->evt_props.ev_ctx.update_rp_addr =
  1712. ipa_mpm_smmu_map_doorbell(
  1713. MHIP_SMMU_DOMAIN_PCIE,
  1714. ul_out_params.db_reg_phs_addr_lsb);
  1715. if (ch->evt_props.ev_ctx.update_rp_addr == 0)
  1716. ipa_assert();
  1717. ipa_mpm_ctx->md[probe_id].ul_prod.db_device_iova =
  1718. ch->evt_props.ev_ctx.update_rp_addr;
  1719. ret = __ipa_mpm_configure_mhi_device(
  1720. ch, probe_id, DMA_TO_HIPA);
  1721. if (ret) {
  1722. IPA_MPM_ERR("configure_mhi_dev fail %d\n",
  1723. ret);
  1724. goto fail_smmu;
  1725. }
  1726. }
  1727. if (dl_cons != IPA_CLIENT_MAX) {
  1728. ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
  1729. ch->evt_props.ev_ctx.update_rp_addr =
  1730. ipa_mpm_smmu_map_doorbell(
  1731. MHIP_SMMU_DOMAIN_PCIE,
  1732. dl_out_params.db_reg_phs_addr_lsb);
  1733. if (ch->evt_props.ev_ctx.update_rp_addr == 0)
  1734. ipa_assert();
  1735. ipa_mpm_ctx->md[probe_id].dl_cons.db_device_iova =
  1736. ch->evt_props.ev_ctx.update_rp_addr;
  1737. ret = __ipa_mpm_configure_mhi_device(ch, probe_id,
  1738. DMA_FROM_HIPA);
  1739. if (ret) {
  1740. IPA_MPM_ERR("mpm_config_mhi_dev failed %d\n", ret);
  1741. goto fail_smmu;
  1742. }
  1743. }
  1744. ret = mhi_prepare_for_transfer(ipa_mpm_ctx->md[probe_id].mhi_dev);
  1745. if (ret) {
  1746. IPA_MPM_ERR("mhi_prepare_for_transfer failed %d\n", ret);
  1747. WARN_ON(1);
  1748. /*
  1749. * WA to handle prepare_for_tx failures.
  1750. * Though prepare for transfer fails, indicate success
  1751. * to MHI driver. remove_cb will be called eventually when
  1752. * Device side comes from where pending cleanup happens.
  1753. */
  1754. atomic_inc(&ipa_mpm_ctx->probe_cnt);
  1755. ipa_mpm_ctx->md[probe_id].init_complete = true;
  1756. IPA_MPM_FUNC_EXIT();
  1757. return 0;
  1758. }
  1759. /*
  1760. * Ring initial channel db - Host Side UL and Device side DL channel.
  1761. * To ring doorbell, write "WP" into doorbell register.
  1762. * This WP should be set to 1 element less than ring max.
  1763. */
  1764. /* Ring UL PRODUCER TRANSFER RING (HOST IPA -> DEVICE IPA) Doorbell */
  1765. if (ul_prod != IPA_CLIENT_MAX) {
  1766. IPA_MPM_DBG("Host UL TR PA DB = 0X%0x\n",
  1767. ul_out_params.db_reg_phs_addr_lsb);
  1768. db_addr = ioremap(
  1769. (phys_addr_t)(ul_out_params.db_reg_phs_addr_lsb), 4);
  1770. wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
  1771. ((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
  1772. iowrite32(wp_addr, db_addr);
  1773. IPA_MPM_DBG("Host UL TR DB = 0X%pK, wp_addr = 0X%0x",
  1774. db_addr, wp_addr);
  1775. iounmap(db_addr);
  1776. ipa_mpm_read_channel(ul_prod);
  1777. }
  1778. /* Ring UL PRODUCER EVENT RING (HOST IPA -> DEVICE IPA) Doorbell
  1779. * Ring the event DB to a value outside the
  1780. * ring range such that rp and wp never meet.
  1781. */
  1782. if (ul_prod != IPA_CLIENT_MAX) {
  1783. ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
  1784. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1785. IPA_MPM_ERR("fail to alloc EP.\n");
  1786. goto fail_start_channel;
  1787. }
  1788. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1789. IPA_MPM_DBG("for ep_idx %d , gsi_evt_ring_hdl = %ld\n",
  1790. ipa_ep_idx, ep->gsi_evt_ring_hdl);
  1791. gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
  1792. &evt_ring_db_addr_low, &evt_ring_db_addr_high);
  1793. IPA_MPM_DBG("Host UL ER PA DB = 0X%0x\n",
  1794. evt_ring_db_addr_low);
  1795. db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
  1796. wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.er_pa +
  1797. ((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
  1798. IPA_MPM_DBG("Host UL ER DB = 0X%pK, wp_addr = 0X%0x",
  1799. db_addr, wp_addr);
  1800. iowrite32(wp_addr, db_addr);
  1801. iounmap(db_addr);
  1802. }
  1803. /* Ring DEVICE IPA DL CONSUMER Event Doorbell */
  1804. if (ul_prod != IPA_CLIENT_MAX) {
  1805. db_addr = ioremap((phys_addr_t)
  1806. (ipa_mpm_ctx->md[probe_id].ul_prod.evt_props.device_db),
  1807. 4);
  1808. wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
  1809. ((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
  1810. iowrite32(wp_addr, db_addr);
  1811. iounmap(db_addr);
  1812. }
  1813. /* Ring DL PRODUCER (DEVICE IPA -> HOST IPA) Doorbell */
  1814. if (dl_cons != IPA_CLIENT_MAX) {
  1815. db_addr = ioremap((phys_addr_t)
  1816. (ipa_mpm_ctx->md[probe_id].dl_cons.chan_props.device_db),
  1817. 4);
  1818. wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
  1819. ((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
  1820. IPA_MPM_DBG("Device DL TR DB = 0X%pK, wp_addr = 0X%0x",
  1821. db_addr, wp_addr);
  1822. iowrite32(wp_addr, db_addr);
  1823. iounmap(db_addr);
  1824. }
  1825. /*
  1826. * Ring event ring DB on Device side.
  1827. * ipa_mpm should ring the event DB to a value outside the
  1828. * ring range such that rp and wp never meet.
  1829. */
  1830. if (dl_cons != IPA_CLIENT_MAX) {
  1831. db_addr =
  1832. ioremap(
  1833. (phys_addr_t)
  1834. (ipa_mpm_ctx->md[probe_id].dl_cons.evt_props.device_db),
  1835. 4);
  1836. wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.er_pa +
  1837. ((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
  1838. iowrite32(wp_addr, db_addr);
  1839. IPA_MPM_DBG("Device UL ER DB = 0X%pK,wp_addr = 0X%0x",
  1840. db_addr, wp_addr);
  1841. iounmap(db_addr);
  1842. }
  1843. /* Ring DL EVENT RING CONSUMER (DEVICE IPA CONSUMER) Doorbell */
  1844. if (dl_cons != IPA_CLIENT_MAX) {
  1845. ipa_ep_idx = ipa3_get_ep_mapping(dl_cons);
  1846. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  1847. IPA_MPM_ERR("fail to alloc EP.\n");
  1848. goto fail_start_channel;
  1849. }
  1850. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1851. gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
  1852. &evt_ring_db_addr_low, &evt_ring_db_addr_high);
  1853. IPA_MPM_DBG("Host DL ER PA DB = 0X%0x\n",
  1854. evt_ring_db_addr_low);
  1855. db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
  1856. wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
  1857. ((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
  1858. iowrite32(wp_addr, db_addr);
  1859. IPA_MPM_DBG("Host DL ER DB = 0X%pK, wp_addr = 0X%0x",
  1860. db_addr, wp_addr);
  1861. iounmap(db_addr);
  1862. }
  1863. /* Check if TETH connection is in progress.
  1864. * If teth isn't started by now, then Stop UL channel.
  1865. */
  1866. switch (ipa_mpm_ctx->md[probe_id].teth_state) {
  1867. case IPA_MPM_TETH_INIT:
  1868. if (ul_prod != IPA_CLIENT_MAX) {
  1869. /* No teth started yet, disable UL channel */
  1870. ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
  1871. probe_id, MPM_MHIP_STOP);
  1872. /* Disable data path */
  1873. if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
  1874. MPM_MHIP_STOP)) {
  1875. IPA_MPM_ERR("MHIP Enable data path failed\n");
  1876. goto fail_start_channel;
  1877. }
  1878. }
  1879. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  1880. break;
  1881. case IPA_MPM_TETH_INPROGRESS:
  1882. case IPA_MPM_TETH_CONNECTED:
  1883. IPA_MPM_DBG("UL channel is already started, continue\n");
  1884. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
  1885. /* Enable data path */
  1886. if (ul_prod != IPA_CLIENT_MAX) {
  1887. if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
  1888. MPM_MHIP_START)) {
  1889. IPA_MPM_ERR("MHIP Enable data path failed\n");
  1890. goto fail_start_channel;
  1891. }
  1892. }
  1893. /* Lift the delay for rmnet USB prod pipe */
  1894. if (probe_id == IPA_MPM_MHIP_CH_ID_1) {
  1895. pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
  1896. ipa3_xdci_ep_delay_rm(pipe_idx);
  1897. }
  1898. break;
  1899. default:
  1900. IPA_MPM_DBG("No op for UL channel, in teth state = %d",
  1901. ipa_mpm_ctx->md[probe_id].teth_state);
  1902. break;
  1903. }
  1904. atomic_inc(&ipa_mpm_ctx->probe_cnt);
  1905. ipa_mpm_ctx->md[probe_id].init_complete = true;
  1906. IPA_MPM_FUNC_EXIT();
  1907. return 0;
  1908. fail_gsi_setup:
  1909. fail_start_channel:
  1910. fail_smmu:
  1911. if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
  1912. IPA_MPM_DBG("SMMU failed\n");
  1913. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  1914. ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, probe_id);
  1915. ipa_assert();
  1916. return ret;
  1917. }
  1918. static void ipa_mpm_init_mhip_channel_info(void)
  1919. {
  1920. /* IPA_MPM_MHIP_CH_ID_0 => MHIP TETH PIPES */
  1921. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ipa_client =
  1922. IPA_CLIENT_MHI_PRIME_TETH_PROD;
  1923. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ep_cfg =
  1924. mhip_dl_teth_ep_cfg;
  1925. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ipa_client =
  1926. IPA_CLIENT_MHI_PRIME_TETH_CONS;
  1927. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ep_cfg =
  1928. mhip_ul_teth_ep_cfg;
  1929. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].mhip_client =
  1930. IPA_MPM_MHIP_TETH;
  1931. /* IPA_MPM_MHIP_CH_ID_1 => MHIP RMNET PIPES */
  1932. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ipa_client =
  1933. IPA_CLIENT_MHI_PRIME_RMNET_PROD;
  1934. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ep_cfg =
  1935. mhip_dl_rmnet_ep_cfg;
  1936. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ipa_client =
  1937. IPA_CLIENT_MHI_PRIME_RMNET_CONS;
  1938. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ep_cfg =
  1939. mhip_ul_rmnet_ep_cfg;
  1940. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].mhip_client =
  1941. IPA_MPM_MHIP_USB_RMNET;
  1942. /* IPA_MPM_MHIP_CH_ID_2 => MHIP ADPL PIPE */
  1943. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ipa_client =
  1944. IPA_CLIENT_MHI_PRIME_DPL_PROD;
  1945. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ep_cfg =
  1946. mhip_dl_dpl_ep_cfg;
  1947. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].ul_prod.ipa_client =
  1948. IPA_CLIENT_MAX;
  1949. ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].mhip_client =
  1950. IPA_MPM_MHIP_USB_DPL;
  1951. }
  1952. static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
  1953. {
  1954. int mhip_idx;
  1955. IPA_MPM_FUNC_ENTRY();
  1956. for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
  1957. if (mhi_dev == ipa_mpm_ctx->md[mhip_idx].mhi_dev)
  1958. break;
  1959. }
  1960. if (mhip_idx >= IPA_MPM_MHIP_CH_ID_MAX) {
  1961. IPA_MPM_DBG("remove_cb for mhip_idx = %d not probed before\n",
  1962. mhip_idx);
  1963. return;
  1964. }
  1965. IPA_MPM_DBG("remove_cb for mhip_idx = %d", mhip_idx);
  1966. ipa_mpm_mhip_shutdown(mhip_idx);
  1967. atomic_dec(&ipa_mpm_ctx->probe_cnt);
  1968. if (atomic_read(&ipa_mpm_ctx->probe_cnt) == 0) {
  1969. /* Last probe done, reset Everything here */
  1970. ipa_mpm_ctx->mhi_parent_dev = NULL;
  1971. ipa_mpm_ctx->carved_smmu_cb.next_addr =
  1972. ipa_mpm_ctx->carved_smmu_cb.va_start;
  1973. atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
  1974. for (mhip_idx = 0;
  1975. mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
  1976. atomic_set(
  1977. &ipa_mpm_ctx->md[mhip_idx].clk_cnt.pcie_clk_cnt,
  1978. 0);
  1979. }
  1980. }
  1981. IPA_MPM_FUNC_EXIT();
  1982. }
  1983. static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
  1984. enum MHI_CB mhi_cb)
  1985. {
  1986. int mhip_idx;
  1987. enum mhip_status_type status;
  1988. IPA_MPM_DBG("%d\n", mhi_cb);
  1989. for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
  1990. if (mhi_dev == ipa_mpm_ctx->md[mhip_idx].mhi_dev)
  1991. break;
  1992. }
  1993. if (mhip_idx >= IPA_MPM_MHIP_CH_ID_MAX) {
  1994. IPA_MPM_DBG("ignoring secondary callbacks\n");
  1995. return;
  1996. }
  1997. mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
  1998. switch (mhi_cb) {
  1999. case MHI_CB_IDLE:
  2000. break;
  2001. case MHI_CB_LPM_ENTER:
  2002. if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
  2003. status = ipa_mpm_start_stop_mhip_chan(
  2004. IPA_MPM_MHIP_CHAN_DL,
  2005. mhip_idx, MPM_MHIP_STOP);
  2006. IPA_MPM_DBG("status = %d\n", status);
  2007. ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
  2008. ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
  2009. } else {
  2010. IPA_MPM_DBG("Already in lpm\n");
  2011. }
  2012. break;
  2013. case MHI_CB_LPM_EXIT:
  2014. if (ipa_mpm_ctx->md[mhip_idx].in_lpm) {
  2015. ipa_mpm_vote_unvote_ipa_clk(CLK_ON, mhip_idx);
  2016. status = ipa_mpm_start_stop_mhip_chan(
  2017. IPA_MPM_MHIP_CHAN_DL,
  2018. mhip_idx, MPM_MHIP_START);
  2019. IPA_MPM_DBG("status = %d\n", status);
  2020. ipa_mpm_ctx->md[mhip_idx].in_lpm = false;
  2021. } else {
  2022. IPA_MPM_DBG("Already out of lpm\n");
  2023. }
  2024. break;
  2025. case MHI_CB_EE_RDDM:
  2026. case MHI_CB_PENDING_DATA:
  2027. case MHI_CB_SYS_ERROR:
  2028. case MHI_CB_FATAL_ERROR:
  2029. case MHI_CB_BW_REQ:
  2030. case MHI_CB_EE_MISSION_MODE:
  2031. IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
  2032. break;
  2033. }
  2034. mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
  2035. }
  2036. static int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
  2037. enum ipa_client_type dst_pipe)
  2038. {
  2039. int result = 0;
  2040. struct ipa_ep_cfg ep_cfg = { { 0 } };
  2041. IPA_MPM_FUNC_ENTRY();
  2042. IPA_MPM_DBG("DMA from %d to %d\n", src_pipe, dst_pipe);
  2043. /* Set USB PROD PIPE DMA to MHIP PROD PIPE */
  2044. ep_cfg.mode.mode = IPA_DMA;
  2045. ep_cfg.mode.dst = dst_pipe;
  2046. ep_cfg.seq.set_dynamic = true;
  2047. result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
  2048. IPA_MPM_FUNC_EXIT();
  2049. return result;
  2050. }
  2051. int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
  2052. enum ipa_client_type dst_pipe)
  2053. {
  2054. int result = 0;
  2055. struct ipa_ep_cfg ep_cfg = { { 0 } };
  2056. IPA_MPM_FUNC_ENTRY();
  2057. IPA_MPM_DBG("DMA from %d to %d\n", src_pipe, dst_pipe);
  2058. /* Set USB PROD PIPE DMA to MHIP PROD PIPE */
  2059. ep_cfg.mode.mode = IPA_BASIC;
  2060. ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
  2061. ep_cfg.seq.set_dynamic = true;
  2062. result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
  2063. IPA_MPM_FUNC_EXIT();
  2064. return result;
  2065. }
  2066. static void ipa_mpm_mhip_map_prot(enum ipa_usb_teth_prot prot,
  2067. enum ipa_mpm_mhip_client_type *mhip_client)
  2068. {
  2069. switch (prot) {
  2070. case IPA_USB_RNDIS:
  2071. *mhip_client = IPA_MPM_MHIP_TETH;
  2072. break;
  2073. case IPA_USB_RMNET:
  2074. *mhip_client = IPA_MPM_MHIP_USB_RMNET;
  2075. break;
  2076. case IPA_USB_DIAG:
  2077. *mhip_client = IPA_MPM_MHIP_USB_DPL;
  2078. break;
  2079. default:
  2080. *mhip_client = IPA_MPM_MHIP_NONE;
  2081. break;
  2082. }
  2083. IPA_MPM_DBG("Mapped xdci prot %d -> MHIP prot %d\n", prot,
  2084. *mhip_client);
  2085. }
  2086. int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
  2087. {
  2088. int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
  2089. int i;
  2090. enum ipa_mpm_mhip_client_type mhip_client;
  2091. enum mhip_status_type status;
  2092. int ret = 0;
  2093. int pipe_idx;
  2094. if (ipa_mpm_ctx == NULL) {
  2095. IPA_MPM_ERR("MPM not platform probed yet, returning ..\n");
  2096. return 0;
  2097. }
  2098. ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
  2099. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  2100. if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
  2101. probe_id = i;
  2102. break;
  2103. }
  2104. }
  2105. if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
  2106. IPA_MPM_ERR("Unknown probe_id\n");
  2107. return 0;
  2108. }
  2109. IPA_MPM_DBG("Connect xdci prot %d -> mhip_client = %d probe_id = %d\n",
  2110. xdci_teth_prot, mhip_client, probe_id);
  2111. ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
  2112. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
  2113. if (ret) {
  2114. IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
  2115. return ret;
  2116. }
  2117. switch (mhip_client) {
  2118. case IPA_MPM_MHIP_USB_RMNET:
  2119. ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
  2120. IPA_CLIENT_MHI_PRIME_RMNET_CONS);
  2121. break;
  2122. case IPA_MPM_MHIP_USB_DPL:
  2123. IPA_MPM_DBG("connecting DPL prot %d\n", mhip_client);
  2124. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
  2125. return 0;
  2126. default:
  2127. IPA_MPM_DBG("mhip_client = %d not processed\n", mhip_client);
  2128. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  2129. if (ret) {
  2130. IPA_MPM_ERR("Error unvoting on PCIe clk, err = %d\n",
  2131. ret);
  2132. return ret;
  2133. }
  2134. return 0;
  2135. }
  2136. if (mhip_client != IPA_MPM_MHIP_USB_DPL)
  2137. /* Start UL MHIP channel for offloading teth connection */
  2138. status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
  2139. probe_id,
  2140. MPM_MHIP_START);
  2141. switch (status) {
  2142. case MHIP_STATUS_SUCCESS:
  2143. case MHIP_STATUS_NO_OP:
  2144. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
  2145. ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
  2146. MPM_MHIP_START);
  2147. pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
  2148. /* Lift the delay for rmnet USB prod pipe */
  2149. ipa3_xdci_ep_delay_rm(pipe_idx);
  2150. if (status == MHIP_STATUS_NO_OP) {
  2151. /* Channels already have been started,
  2152. * we can devote for pcie clocks
  2153. */
  2154. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  2155. }
  2156. break;
  2157. case MHIP_STATUS_EP_NOT_READY:
  2158. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  2159. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
  2160. break;
  2161. case MHIP_STATUS_FAIL:
  2162. case MHIP_STATUS_BAD_STATE:
  2163. case MHIP_STATUS_EP_NOT_FOUND:
  2164. IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
  2165. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  2166. ret = -EFAULT;
  2167. break;
  2168. default:
  2169. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  2170. IPA_MPM_ERR("Err not found\n");
  2171. break;
  2172. }
  2173. return ret;
  2174. }
  2175. int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
  2176. {
  2177. int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
  2178. int i;
  2179. enum ipa_mpm_mhip_client_type mhip_client;
  2180. enum mhip_status_type status;
  2181. int ret = 0;
  2182. if (ipa_mpm_ctx == NULL) {
  2183. IPA_MPM_ERR("MPM not platform probed, returning ..\n");
  2184. return 0;
  2185. }
  2186. ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
  2187. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  2188. if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
  2189. probe_id = i;
  2190. break;
  2191. }
  2192. }
  2193. if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
  2194. IPA_MPM_ERR("Invalid probe_id\n");
  2195. return 0;
  2196. }
  2197. IPA_MPM_ERR("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
  2198. xdci_teth_prot, mhip_client, probe_id);
  2199. switch (mhip_client) {
  2200. case IPA_MPM_MHIP_USB_RMNET:
  2201. ret = ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
  2202. IPA_CLIENT_MHI_PRIME_RMNET_CONS);
  2203. if (ret) {
  2204. IPA_MPM_ERR("failed to reset dma mode\n");
  2205. return ret;
  2206. }
  2207. break;
  2208. case IPA_MPM_MHIP_TETH:
  2209. IPA_MPM_DBG("Rndis Disconnect, wait for wan_state ioctl\n");
  2210. return 0;
  2211. case IPA_MPM_MHIP_USB_DPL:
  2212. IPA_MPM_DBG("Teth Disconnecting for DPL\n");
  2213. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
  2214. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  2215. if (ret)
  2216. IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n",
  2217. ret);
  2218. ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
  2219. return ret;
  2220. default:
  2221. IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
  2222. return 0;
  2223. }
  2224. status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
  2225. probe_id, MPM_MHIP_STOP);
  2226. switch (status) {
  2227. case MHIP_STATUS_SUCCESS:
  2228. case MHIP_STATUS_NO_OP:
  2229. case MHIP_STATUS_EP_NOT_READY:
  2230. ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
  2231. ipa_mpm_start_stop_ul_mhip_data_path(probe_id, MPM_MHIP_STOP);
  2232. break;
  2233. case MHIP_STATUS_FAIL:
  2234. case MHIP_STATUS_BAD_STATE:
  2235. case MHIP_STATUS_EP_NOT_FOUND:
  2236. IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
  2237. ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  2238. return -EFAULT;
  2239. break;
  2240. default:
  2241. IPA_MPM_ERR("Err not found\n");
  2242. break;
  2243. }
  2244. ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
  2245. if (ret) {
  2246. IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n", ret);
  2247. return ret;
  2248. }
  2249. ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
  2250. return ret;
  2251. }
  2252. static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
  2253. {
  2254. struct ipa_smmu_in_params smmu_in;
  2255. struct ipa_smmu_out_params smmu_out;
  2256. u32 carved_iova_ap_mapping[2];
  2257. struct ipa_smmu_cb_ctx *cb;
  2258. struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
  2259. int ret = 0;
  2260. if (ipa_mpm_ctx->carved_smmu_cb.valid) {
  2261. IPA_MPM_DBG("SMMU Context allocated, returning ..\n");
  2262. return ret;
  2263. }
  2264. cb = &ipa_mpm_ctx->carved_smmu_cb;
  2265. /* get IPA SMMU enabled status */
  2266. smmu_in.smmu_client = IPA_SMMU_AP_CLIENT;
  2267. if (ipa_get_smmu_params(&smmu_in, &smmu_out))
  2268. ipa_mpm_ctx->dev_info.ipa_smmu_enabled = false;
  2269. else
  2270. ipa_mpm_ctx->dev_info.ipa_smmu_enabled =
  2271. smmu_out.smmu_enable;
  2272. /* get cache_coherent enable or not */
  2273. ipa_mpm_ctx->dev_info.is_cache_coherent = ap_cb->is_cache_coherent;
  2274. if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iova-mapping",
  2275. carved_iova_ap_mapping, 2)) {
  2276. IPA_MPM_ERR("failed to read of_node %s\n",
  2277. "qcom,mpm-iova-mapping");
  2278. return -EINVAL;
  2279. }
  2280. ipa_mpm_ctx->dev_info.pcie_smmu_enabled = true;
  2281. if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled !=
  2282. ipa_mpm_ctx->dev_info.pcie_smmu_enabled) {
  2283. IPA_MPM_DBG("PCIE/IPA SMMU config mismatch\n");
  2284. return -EINVAL;
  2285. }
  2286. cb->va_start = carved_iova_ap_mapping[0];
  2287. cb->va_size = carved_iova_ap_mapping[1];
  2288. cb->va_end = cb->va_start + cb->va_size;
  2289. if (cb->va_end >= ap_cb->va_start) {
  2290. IPA_MPM_ERR("MPM iommu and AP overlap addr 0x%lx\n",
  2291. cb->va_start);
  2292. ipa_assert();
  2293. return -EFAULT;
  2294. }
  2295. cb->dev = ipa_mpm_ctx->dev_info.dev;
  2296. cb->valid = true;
  2297. cb->next_addr = cb->va_start;
  2298. if (dma_set_mask_and_coherent(ipa_mpm_ctx->dev_info.dev,
  2299. DMA_BIT_MASK(64))) {
  2300. IPA_MPM_ERR("setting DMA mask to 64 failed.\n");
  2301. return -EINVAL;
  2302. }
  2303. return ret;
  2304. }
  2305. static int ipa_mpm_probe(struct platform_device *pdev)
  2306. {
  2307. int ret = 0;
  2308. int i = 0;
  2309. int idx = 0;
  2310. IPA_MPM_FUNC_ENTRY();
  2311. if (ipa_mpm_ctx) {
  2312. IPA_MPM_DBG("MPM is already probed, returning\n");
  2313. return 0;
  2314. }
  2315. ret = ipa_register_ipa_ready_cb(ipa_mpm_ipa3_ready_cb, (void *)pdev);
  2316. /*
  2317. * If we received -EEXIST, IPA has initialized. So we need
  2318. * to continue the probing process.
  2319. */
  2320. if (!ret) {
  2321. IPA_MPM_DBG("IPA not ready yet, registering callback\n");
  2322. return ret;
  2323. }
  2324. IPA_MPM_DBG("IPA is ready, continue with probe\n");
  2325. ipa_mpm_ctx = kzalloc(sizeof(*ipa_mpm_ctx), GFP_KERNEL);
  2326. if (!ipa_mpm_ctx)
  2327. return -ENOMEM;
  2328. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  2329. mutex_init(&ipa_mpm_ctx->md[i].mutex);
  2330. mutex_init(&ipa_mpm_ctx->md[i].lpm_mutex);
  2331. }
  2332. ipa_mpm_ctx->dev_info.pdev = pdev;
  2333. ipa_mpm_ctx->dev_info.dev = &pdev->dev;
  2334. ipa_mpm_init_mhip_channel_info();
  2335. if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base",
  2336. &ipa_mpm_ctx->dev_info.chdb_base)) {
  2337. IPA_MPM_ERR("failed to read qcom,mhi-chdb-base\n");
  2338. goto fail_probe;
  2339. }
  2340. IPA_MPM_DBG("chdb-base=0x%x\n", ipa_mpm_ctx->dev_info.chdb_base);
  2341. if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base",
  2342. &ipa_mpm_ctx->dev_info.erdb_base)) {
  2343. IPA_MPM_ERR("failed to read qcom,mhi-erdb-base\n");
  2344. goto fail_probe;
  2345. }
  2346. IPA_MPM_DBG("erdb-base=0x%x\n", ipa_mpm_ctx->dev_info.erdb_base);
  2347. ret = ipa_mpm_populate_smmu_info(pdev);
  2348. if (ret) {
  2349. IPA_MPM_DBG("SMMU Config failed\n");
  2350. goto fail_probe;
  2351. }
  2352. atomic_set(&ipa_mpm_ctx->ipa_clk_total_cnt, 0);
  2353. atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
  2354. for (idx = 0; idx < IPA_MPM_MHIP_CH_ID_MAX; idx++) {
  2355. ipa_mpm_ctx->md[idx].ul_prod.gsi_state = GSI_INIT;
  2356. ipa_mpm_ctx->md[idx].dl_cons.gsi_state = GSI_INIT;
  2357. atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.ipa_clk_cnt, 0);
  2358. atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.pcie_clk_cnt, 0);
  2359. }
  2360. ret = mhi_driver_register(&mhi_driver);
  2361. if (ret) {
  2362. IPA_MPM_ERR("mhi_driver_register failed %d\n", ret);
  2363. goto fail_probe;
  2364. }
  2365. IPA_MPM_FUNC_EXIT();
  2366. return 0;
  2367. fail_probe:
  2368. kfree(ipa_mpm_ctx);
  2369. ipa_mpm_ctx = NULL;
  2370. return -EFAULT;
  2371. }
  2372. static int ipa_mpm_remove(struct platform_device *pdev)
  2373. {
  2374. IPA_MPM_FUNC_ENTRY();
  2375. mhi_driver_unregister(&mhi_driver);
  2376. IPA_MPM_FUNC_EXIT();
  2377. return 0;
  2378. }
  2379. static const struct of_device_id ipa_mpm_dt_match[] = {
  2380. { .compatible = "qcom,ipa-mpm" },
  2381. {},
  2382. };
  2383. MODULE_DEVICE_TABLE(of, ipa_mpm_dt_match);
  2384. static struct platform_driver ipa_ipa_mpm_driver = {
  2385. .driver = {
  2386. .name = "ipa_mpm",
  2387. .of_match_table = ipa_mpm_dt_match,
  2388. },
  2389. .probe = ipa_mpm_probe,
  2390. .remove = ipa_mpm_remove,
  2391. };
  2392. /**
  2393. * ipa_mpm_init() - Registers ipa_mpm as a platform device for a APQ
  2394. *
  2395. * This function is called after bootup for APQ device.
  2396. * ipa_mpm will register itself as a platform device, and probe
  2397. * function will get called.
  2398. *
  2399. * Return: None
  2400. */
  2401. static int __init ipa_mpm_init(void)
  2402. {
  2403. IPA_MPM_DBG("register ipa_mpm platform device\n");
  2404. return platform_driver_register(&ipa_ipa_mpm_driver);
  2405. }
  2406. /**
  2407. * ipa3_is_mhip_offload_enabled() - check if IPA MPM module was initialized
  2408. * successfully. If it is initialized, MHIP is enabled for teth
  2409. *
  2410. * Return value: 1 for yes; 0 for no
  2411. */
  2412. int ipa3_is_mhip_offload_enabled(void)
  2413. {
  2414. if (ipa_mpm_ctx == NULL)
  2415. return 0;
  2416. else
  2417. return 1;
  2418. }
  2419. int ipa_mpm_panic_handler(char *buf, int size)
  2420. {
  2421. int i;
  2422. int cnt = 0;
  2423. cnt = scnprintf(buf, size,
  2424. "\n---- MHIP Active Clients Table ----\n");
  2425. cnt += scnprintf(buf + cnt, size - cnt,
  2426. "Total PCIe active clients count: %d\n",
  2427. atomic_read(&ipa_mpm_ctx->pcie_clk_total_cnt));
  2428. cnt += scnprintf(buf + cnt, size - cnt,
  2429. "Total IPA active clients count: %d\n",
  2430. atomic_read(&ipa_mpm_ctx->ipa_clk_total_cnt));
  2431. for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
  2432. cnt += scnprintf(buf + cnt, size - cnt,
  2433. "client id: %d ipa vote cnt: %d pcie vote cnt\n", i,
  2434. atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.ipa_clk_cnt),
  2435. atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.pcie_clk_cnt));
  2436. }
  2437. return cnt;
  2438. }
  2439. /**
  2440. * ipa3_get_mhip_gsi_stats() - Query MHIP gsi stats from uc
  2441. * @stats: [inout] stats blob from client populated by driver
  2442. *
  2443. * Returns: 0 on success, negative on failure
  2444. *
  2445. * @note Cannot be called from atomic context
  2446. *
  2447. */
  2448. int ipa3_get_mhip_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
  2449. {
  2450. int i;
  2451. if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
  2452. IPAERR("bad parms NULL mhip_gsi_stats_mmio\n");
  2453. return -EINVAL;
  2454. }
  2455. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  2456. for (i = 0; i < MAX_MHIP_CHANNELS; i++) {
  2457. stats->ring[i].ringFull = ioread32(
  2458. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2459. + i * IPA3_UC_DEBUG_STATS_OFF +
  2460. IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
  2461. stats->ring[i].ringEmpty = ioread32(
  2462. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2463. + i * IPA3_UC_DEBUG_STATS_OFF +
  2464. IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
  2465. stats->ring[i].ringUsageHigh = ioread32(
  2466. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2467. + i * IPA3_UC_DEBUG_STATS_OFF +
  2468. IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
  2469. stats->ring[i].ringUsageLow = ioread32(
  2470. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2471. + i * IPA3_UC_DEBUG_STATS_OFF +
  2472. IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
  2473. stats->ring[i].RingUtilCount = ioread32(
  2474. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
  2475. + i * IPA3_UC_DEBUG_STATS_OFF +
  2476. IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
  2477. }
  2478. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2479. return 0;
  2480. }
  2481. late_initcall(ipa_mpm_init);
  2482. MODULE_LICENSE("GPL v2");
  2483. MODULE_DESCRIPTION("MHI Proxy Manager Driver");