ipa_mhi_client.c 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/export.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mutex.h>
  10. #include <linux/ipa.h>
  11. #include <linux/msm_gsi.h>
  12. #include <linux/ipa_qmi_service_v01.h>
  13. #include <linux/ipa_mhi.h>
  14. #include "../ipa_common_i.h"
  15. #include "../ipa_v3/ipa_pm.h"
  16. #define IPA_MHI_DRV_NAME "ipa_mhi_client"
  17. #define IPA_MHI_DBG(fmt, args...) \
  18. do { \
  19. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  20. __func__, __LINE__, ## args); \
  21. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  22. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  23. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  24. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  25. } while (0)
  26. #define IPA_MHI_DBG_LOW(fmt, args...) \
  27. do { \
  28. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  29. __func__, __LINE__, ## args); \
  30. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  31. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  32. } while (0)
  33. #define IPA_MHI_ERR(fmt, args...) \
  34. do { \
  35. pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  36. __func__, __LINE__, ## args); \
  37. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  38. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  39. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  40. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  41. } while (0)
  42. #define IPA_MHI_FUNC_ENTRY() \
  43. IPA_MHI_DBG("ENTRY\n")
  44. #define IPA_MHI_FUNC_EXIT() \
  45. IPA_MHI_DBG("EXIT\n")
  46. #define IPA_MHI_RM_TIMEOUT_MSEC 10000
  47. #define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
  48. #define IPA_MHI_SUSPEND_SLEEP_MIN 900
  49. #define IPA_MHI_SUSPEND_SLEEP_MAX 1100
  50. #define IPA_MHI_MAX_UL_CHANNELS 1
  51. #define IPA_MHI_MAX_DL_CHANNELS 2
  52. /* bit #40 in address should be asserted for MHI transfers over pcie */
  53. #define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
  54. ((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
  55. enum ipa_mhi_rm_state {
  56. IPA_MHI_RM_STATE_RELEASED,
  57. IPA_MHI_RM_STATE_REQUESTED,
  58. IPA_MHI_RM_STATE_GRANTED,
  59. IPA_MHI_RM_STATE_MAX
  60. };
  61. enum ipa_mhi_state {
  62. IPA_MHI_STATE_INITIALIZED,
  63. IPA_MHI_STATE_READY,
  64. IPA_MHI_STATE_STARTED,
  65. IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
  66. IPA_MHI_STATE_SUSPENDED,
  67. IPA_MHI_STATE_RESUME_IN_PROGRESS,
  68. IPA_MHI_STATE_MAX
  69. };
  70. static char *ipa_mhi_state_str[] = {
  71. __stringify(IPA_MHI_STATE_INITIALIZED),
  72. __stringify(IPA_MHI_STATE_READY),
  73. __stringify(IPA_MHI_STATE_STARTED),
  74. __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
  75. __stringify(IPA_MHI_STATE_SUSPENDED),
  76. __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
  77. };
  78. #define MHI_STATE_STR(state) \
  79. (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
  80. ipa_mhi_state_str[(state)] : \
  81. "INVALID")
  82. enum ipa_mhi_dma_dir {
  83. IPA_MHI_DMA_TO_HOST,
  84. IPA_MHI_DMA_FROM_HOST,
  85. };
  86. /**
  87. * struct ipa_mhi_channel_ctx - MHI Channel context
  88. * @valid: entry is valid
  89. * @id: MHI channel ID
  90. * @hdl: channel handle for uC
  91. * @client: IPA Client
  92. * @state: Channel state
  93. */
  94. struct ipa_mhi_channel_ctx {
  95. bool valid;
  96. u8 id;
  97. u8 index;
  98. enum ipa_client_type client;
  99. enum ipa_hw_mhi_channel_states state;
  100. bool stop_in_proc;
  101. struct gsi_chan_info ch_info;
  102. u64 channel_context_addr;
  103. struct ipa_mhi_ch_ctx ch_ctx_host;
  104. u64 event_context_addr;
  105. struct ipa_mhi_ev_ctx ev_ctx_host;
  106. bool brstmode_enabled;
  107. union __packed gsi_channel_scratch ch_scratch;
  108. unsigned long cached_gsi_evt_ring_hdl;
  109. };
  110. struct ipa_mhi_client_ctx {
  111. enum ipa_mhi_state state;
  112. spinlock_t state_lock;
  113. mhi_client_cb cb_notify;
  114. void *cb_priv;
  115. struct completion rm_prod_granted_comp;
  116. enum ipa_mhi_rm_state rm_cons_state;
  117. struct completion rm_cons_comp;
  118. bool trigger_wakeup;
  119. bool wakeup_notified;
  120. struct workqueue_struct *wq;
  121. struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
  122. struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
  123. u32 total_channels;
  124. struct ipa_mhi_msi_info msi;
  125. u32 mmio_addr;
  126. u32 first_ch_idx;
  127. u32 first_er_idx;
  128. u32 host_ctrl_addr;
  129. u32 host_data_addr;
  130. u64 channel_context_array_addr;
  131. u64 event_context_array_addr;
  132. u32 qmi_req_id;
  133. u32 use_ipadma;
  134. bool assert_bit40;
  135. bool test_mode;
  136. u32 pm_hdl;
  137. u32 modem_pm_hdl;
  138. };
  139. static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx;
  140. static DEFINE_MUTEX(mhi_client_general_mutex);
  141. #ifdef CONFIG_DEBUG_FS
  142. #define IPA_MHI_MAX_MSG_LEN 512
  143. static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
  144. static struct dentry *dent;
  145. static char *ipa_mhi_channel_state_str[] = {
  146. __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
  147. __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
  148. __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
  149. __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
  150. __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
  151. __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
  152. };
  153. #define MHI_CH_STATE_STR(state) \
  154. (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
  155. ipa_mhi_channel_state_str[(state)] : \
  156. "INVALID")
  157. static int ipa_mhi_set_lock_unlock(bool is_lock)
  158. {
  159. IPA_MHI_DBG("entry\n");
  160. if (is_lock)
  161. mutex_lock(&mhi_client_general_mutex);
  162. else
  163. mutex_unlock(&mhi_client_general_mutex);
  164. IPA_MHI_DBG("exit\n");
  165. return 0;
  166. }
  167. static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
  168. u64 host_addr, int size)
  169. {
  170. struct ipa_mem_buffer mem;
  171. int res;
  172. struct device *pdev;
  173. IPA_MHI_FUNC_ENTRY();
  174. if (ipa_mhi_client_ctx->use_ipadma) {
  175. pdev = ipa_get_dma_dev();
  176. host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr);
  177. mem.size = size;
  178. mem.base = dma_alloc_coherent(pdev, mem.size,
  179. &mem.phys_base, GFP_KERNEL);
  180. if (!mem.base) {
  181. IPA_MHI_ERR(
  182. "dma_alloc_coherent failed, DMA buff size %d\n"
  183. , mem.size);
  184. return -ENOMEM;
  185. }
  186. res = ipa_dma_enable();
  187. if (res) {
  188. IPA_MHI_ERR("failed to enable IPA DMA rc=%d\n", res);
  189. goto fail_dma_enable;
  190. }
  191. if (dir == IPA_MHI_DMA_FROM_HOST) {
  192. res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
  193. size);
  194. if (res) {
  195. IPA_MHI_ERR(
  196. "ipa_dma_sync_memcpy from host fail%d\n"
  197. , res);
  198. goto fail_memcopy;
  199. }
  200. memcpy(dev_addr, mem.base, size);
  201. } else {
  202. memcpy(mem.base, dev_addr, size);
  203. res = ipa_dma_sync_memcpy(host_addr, mem.phys_base,
  204. size);
  205. if (res) {
  206. IPA_MHI_ERR(
  207. "ipa_dma_sync_memcpy to host fail %d\n"
  208. , res);
  209. goto fail_memcopy;
  210. }
  211. }
  212. goto dma_succeed;
  213. } else {
  214. void *host_ptr;
  215. if (!ipa_mhi_client_ctx->test_mode)
  216. host_ptr = ioremap(host_addr, size);
  217. else
  218. host_ptr = phys_to_virt(host_addr);
  219. if (!host_ptr) {
  220. IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr);
  221. return -EFAULT;
  222. }
  223. if (dir == IPA_MHI_DMA_FROM_HOST)
  224. memcpy(dev_addr, host_ptr, size);
  225. else
  226. memcpy(host_ptr, dev_addr, size);
  227. if (!ipa_mhi_client_ctx->test_mode)
  228. iounmap(host_ptr);
  229. }
  230. IPA_MHI_FUNC_EXIT();
  231. return 0;
  232. dma_succeed:
  233. IPA_MHI_FUNC_EXIT();
  234. res = 0;
  235. fail_memcopy:
  236. if (ipa_dma_disable())
  237. IPA_MHI_ERR("failed to disable IPA DMA\n");
  238. fail_dma_enable:
  239. dma_free_coherent(pdev, mem.size, mem.base, mem.phys_base);
  240. return res;
  241. }
  242. static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel,
  243. char *buff, int len)
  244. {
  245. int nbytes = 0;
  246. if (channel->valid) {
  247. nbytes += scnprintf(&buff[nbytes],
  248. len - nbytes,
  249. "channel idx=%d ch_id=%d client=%d state=%s\n",
  250. channel->index, channel->id, channel->client,
  251. MHI_CH_STATE_STR(channel->state));
  252. nbytes += scnprintf(&buff[nbytes],
  253. len - nbytes,
  254. " ch_ctx=%llx\n",
  255. channel->channel_context_addr);
  256. nbytes += scnprintf(&buff[nbytes],
  257. len - nbytes,
  258. " gsi_evt_ring_hdl=%ld ev_ctx=%llx\n",
  259. channel->cached_gsi_evt_ring_hdl,
  260. channel->event_context_addr);
  261. }
  262. return nbytes;
  263. }
  264. static int ipa_mhi_print_host_channel_ctx_info(
  265. struct ipa_mhi_channel_ctx *channel, char *buff, int len)
  266. {
  267. int res, nbytes = 0;
  268. struct ipa_mhi_ch_ctx ch_ctx_host;
  269. memset(&ch_ctx_host, 0, sizeof(ch_ctx_host));
  270. /* reading ch context from host */
  271. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  272. &ch_ctx_host, channel->channel_context_addr,
  273. sizeof(ch_ctx_host));
  274. if (res) {
  275. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  276. "Failed to read from host %d\n", res);
  277. return nbytes;
  278. }
  279. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  280. "ch_id: %d\n", channel->id);
  281. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  282. "chstate: 0x%x\n", ch_ctx_host.chstate);
  283. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  284. "brstmode: 0x%x\n", ch_ctx_host.brstmode);
  285. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  286. "chtype: 0x%x\n", ch_ctx_host.chtype);
  287. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  288. "erindex: 0x%x\n", ch_ctx_host.erindex);
  289. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  290. "rbase: 0x%llx\n", ch_ctx_host.rbase);
  291. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  292. "rlen: 0x%llx\n", ch_ctx_host.rlen);
  293. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  294. "rp: 0x%llx\n", ch_ctx_host.rp);
  295. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  296. "wp: 0x%llx\n", ch_ctx_host.wp);
  297. return nbytes;
  298. }
  299. static ssize_t ipa_mhi_debugfs_stats(struct file *file,
  300. char __user *ubuf,
  301. size_t count,
  302. loff_t *ppos)
  303. {
  304. int nbytes = 0;
  305. int i;
  306. struct ipa_mhi_channel_ctx *channel;
  307. nbytes += scnprintf(&dbg_buff[nbytes],
  308. IPA_MHI_MAX_MSG_LEN - nbytes,
  309. "IPA MHI state: %s\n",
  310. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  311. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  312. channel = &ipa_mhi_client_ctx->ul_channels[i];
  313. nbytes += ipa_mhi_print_channel_info(channel,
  314. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  315. }
  316. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  317. channel = &ipa_mhi_client_ctx->dl_channels[i];
  318. nbytes += ipa_mhi_print_channel_info(channel,
  319. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  320. }
  321. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  322. }
  323. static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file,
  324. char __user *ubuf,
  325. size_t count,
  326. loff_t *ppos)
  327. {
  328. int nbytes = 0;
  329. nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
  330. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  331. }
  332. static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file,
  333. char __user *ubuf,
  334. size_t count,
  335. loff_t *ppos)
  336. {
  337. int i, nbytes = 0;
  338. struct ipa_mhi_channel_ctx *channel;
  339. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED ||
  340. ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) {
  341. nbytes += scnprintf(&dbg_buff[nbytes],
  342. IPA_MHI_MAX_MSG_LEN - nbytes,
  343. "Cannot dump host channel context ");
  344. nbytes += scnprintf(&dbg_buff[nbytes],
  345. IPA_MHI_MAX_MSG_LEN - nbytes,
  346. "before IPA MHI was STARTED\n");
  347. return simple_read_from_buffer(ubuf, count, ppos,
  348. dbg_buff, nbytes);
  349. }
  350. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
  351. nbytes += scnprintf(&dbg_buff[nbytes],
  352. IPA_MHI_MAX_MSG_LEN - nbytes,
  353. "IPA MHI is suspended, cannot dump channel ctx array");
  354. nbytes += scnprintf(&dbg_buff[nbytes],
  355. IPA_MHI_MAX_MSG_LEN - nbytes,
  356. " from host -PCIe can be in D3 state\n");
  357. return simple_read_from_buffer(ubuf, count, ppos,
  358. dbg_buff, nbytes);
  359. }
  360. nbytes += scnprintf(&dbg_buff[nbytes],
  361. IPA_MHI_MAX_MSG_LEN - nbytes,
  362. "channel contex array - dump from host\n");
  363. nbytes += scnprintf(&dbg_buff[nbytes],
  364. IPA_MHI_MAX_MSG_LEN - nbytes,
  365. "***** UL channels *******\n");
  366. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  367. channel = &ipa_mhi_client_ctx->ul_channels[i];
  368. if (!channel->valid)
  369. continue;
  370. nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
  371. &dbg_buff[nbytes],
  372. IPA_MHI_MAX_MSG_LEN - nbytes);
  373. }
  374. nbytes += scnprintf(&dbg_buff[nbytes],
  375. IPA_MHI_MAX_MSG_LEN - nbytes,
  376. "\n***** DL channels *******\n");
  377. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  378. channel = &ipa_mhi_client_ctx->dl_channels[i];
  379. if (!channel->valid)
  380. continue;
  381. nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
  382. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  383. }
  384. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  385. }
  386. const struct file_operations ipa_mhi_stats_ops = {
  387. .read = ipa_mhi_debugfs_stats,
  388. };
  389. const struct file_operations ipa_mhi_uc_stats_ops = {
  390. .read = ipa_mhi_debugfs_uc_stats,
  391. };
  392. const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = {
  393. .read = ipa_mhi_debugfs_dump_host_ch_ctx_arr,
  394. };
  395. static void ipa_mhi_debugfs_init(void)
  396. {
  397. const mode_t read_only_mode = 0444;
  398. const mode_t read_write_mode = 0664;
  399. struct dentry *file;
  400. IPA_MHI_FUNC_ENTRY();
  401. dent = debugfs_create_dir("ipa_mhi", 0);
  402. if (IS_ERR(dent)) {
  403. IPA_MHI_ERR("fail to create folder ipa_mhi\n");
  404. return;
  405. }
  406. file = debugfs_create_file("stats", read_only_mode, dent,
  407. 0, &ipa_mhi_stats_ops);
  408. if (!file || IS_ERR(file)) {
  409. IPA_MHI_ERR("fail to create file stats\n");
  410. goto fail;
  411. }
  412. file = debugfs_create_file("uc_stats", read_only_mode, dent,
  413. 0, &ipa_mhi_uc_stats_ops);
  414. if (!file || IS_ERR(file)) {
  415. IPA_MHI_ERR("fail to create file uc_stats\n");
  416. goto fail;
  417. }
  418. file = debugfs_create_u32("use_ipadma", read_write_mode, dent,
  419. &ipa_mhi_client_ctx->use_ipadma);
  420. if (!file || IS_ERR(file)) {
  421. IPA_MHI_ERR("fail to create file use_ipadma\n");
  422. goto fail;
  423. }
  424. file = debugfs_create_file("dump_host_channel_ctx_array",
  425. read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops);
  426. if (!file || IS_ERR(file)) {
  427. IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n");
  428. goto fail;
  429. }
  430. IPA_MHI_FUNC_EXIT();
  431. return;
  432. fail:
  433. debugfs_remove_recursive(dent);
  434. }
  435. #else
  436. static void ipa_mhi_debugfs_init(void) {}
  437. static void ipa_mhi_debugfs_destroy(void) {}
  438. #endif /* CONFIG_DEBUG_FS */
  439. static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info;
  440. static void ipa_mhi_wq_notify_wakeup(struct work_struct *work);
  441. static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup);
  442. static void ipa_mhi_wq_notify_ready(struct work_struct *work);
  443. static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready);
  444. /**
  445. * ipa_mhi_notify_wakeup() - Schedule work to notify data available
  446. *
  447. * This function will schedule a work to notify data available event.
  448. * In case this function is called more than once, only one notification will
  449. * be sent to MHI client driver. No further notifications will be sent until
  450. * IPA MHI state will become STARTED.
  451. */
  452. static void ipa_mhi_notify_wakeup(void)
  453. {
  454. IPA_MHI_FUNC_ENTRY();
  455. if (ipa_mhi_client_ctx->wakeup_notified) {
  456. IPA_MHI_DBG("wakeup already called\n");
  457. return;
  458. }
  459. queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work);
  460. ipa_mhi_client_ctx->wakeup_notified = true;
  461. IPA_MHI_FUNC_EXIT();
  462. }
  463. /**
  464. * ipa_mhi_rm_cons_request() - callback function for IPA RM request resource
  465. *
  466. * In case IPA MHI is not suspended, MHI CONS will be granted immediately.
  467. * In case IPA MHI is suspended, MHI CONS will be granted after resume.
  468. */
  469. static int ipa_mhi_rm_cons_request(void)
  470. {
  471. unsigned long flags;
  472. int res;
  473. IPA_MHI_FUNC_ENTRY();
  474. IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
  475. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  476. ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED;
  477. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_STARTED) {
  478. ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
  479. res = 0;
  480. } else if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
  481. ipa_mhi_notify_wakeup();
  482. res = -EINPROGRESS;
  483. } else if (ipa_mhi_client_ctx->state ==
  484. IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
  485. /* wakeup event will be trigger after suspend finishes */
  486. ipa_mhi_client_ctx->trigger_wakeup = true;
  487. res = -EINPROGRESS;
  488. } else {
  489. res = -EINPROGRESS;
  490. }
  491. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  492. IPA_MHI_DBG("EXIT with %d\n", res);
  493. return res;
  494. }
  495. static int ipa_mhi_rm_cons_release(void)
  496. {
  497. unsigned long flags;
  498. IPA_MHI_FUNC_ENTRY();
  499. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  500. ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
  501. complete_all(&ipa_mhi_client_ctx->rm_cons_comp);
  502. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  503. IPA_MHI_FUNC_EXIT();
  504. return 0;
  505. }
  506. static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event,
  507. unsigned long data)
  508. {
  509. IPA_MHI_FUNC_ENTRY();
  510. switch (event) {
  511. case IPA_RM_RESOURCE_GRANTED:
  512. IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n");
  513. complete_all(&ipa_mhi_client_ctx->rm_prod_granted_comp);
  514. break;
  515. case IPA_RM_RESOURCE_RELEASED:
  516. IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n");
  517. break;
  518. default:
  519. IPA_MHI_ERR("unexpected event %d\n", event);
  520. WARN_ON(1);
  521. break;
  522. }
  523. IPA_MHI_FUNC_EXIT();
  524. }
  525. /**
  526. * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
  527. *
  528. * This function is called from IPA MHI workqueue to notify
  529. * MHI client driver on data available event.
  530. */
  531. static void ipa_mhi_wq_notify_wakeup(struct work_struct *work)
  532. {
  533. IPA_MHI_FUNC_ENTRY();
  534. ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
  535. IPA_MHI_EVENT_DATA_AVAILABLE, 0);
  536. IPA_MHI_FUNC_EXIT();
  537. }
  538. /**
  539. * ipa_mhi_wq_notify_ready() - Notify MHI client on ready
  540. *
  541. * This function is called from IPA MHI workqueue to notify
  542. * MHI client driver on ready event when IPA uC is loaded
  543. */
  544. static void ipa_mhi_wq_notify_ready(struct work_struct *work)
  545. {
  546. IPA_MHI_FUNC_ENTRY();
  547. ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
  548. IPA_MHI_EVENT_READY, 0);
  549. IPA_MHI_FUNC_EXIT();
  550. }
  551. /**
  552. * ipa_mhi_notify_ready() - Schedule work to notify ready
  553. *
  554. * This function will schedule a work to notify ready event.
  555. */
  556. static void ipa_mhi_notify_ready(void)
  557. {
  558. IPA_MHI_FUNC_ENTRY();
  559. queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work);
  560. IPA_MHI_FUNC_EXIT();
  561. }
  562. /**
  563. * ipa_mhi_set_state() - Set new state to IPA MHI
  564. * @state: new state
  565. *
  566. * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
  567. * In some state transitions a wakeup request will be triggered.
  568. *
  569. * Returns: 0 on success, -1 otherwise
  570. */
  571. static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
  572. {
  573. unsigned long flags;
  574. int res = -EPERM;
  575. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  576. IPA_MHI_DBG("Current state: %s\n",
  577. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  578. switch (ipa_mhi_client_ctx->state) {
  579. case IPA_MHI_STATE_INITIALIZED:
  580. if (new_state == IPA_MHI_STATE_READY) {
  581. ipa_mhi_notify_ready();
  582. res = 0;
  583. }
  584. break;
  585. case IPA_MHI_STATE_READY:
  586. if (new_state == IPA_MHI_STATE_READY)
  587. res = 0;
  588. if (new_state == IPA_MHI_STATE_STARTED)
  589. res = 0;
  590. break;
  591. case IPA_MHI_STATE_STARTED:
  592. if (new_state == IPA_MHI_STATE_INITIALIZED)
  593. res = 0;
  594. else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
  595. res = 0;
  596. break;
  597. case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
  598. if (new_state == IPA_MHI_STATE_SUSPENDED) {
  599. if (ipa_mhi_client_ctx->trigger_wakeup) {
  600. ipa_mhi_client_ctx->trigger_wakeup = false;
  601. ipa_mhi_notify_wakeup();
  602. }
  603. res = 0;
  604. } else if (new_state == IPA_MHI_STATE_STARTED) {
  605. ipa_mhi_client_ctx->wakeup_notified = false;
  606. ipa_mhi_client_ctx->trigger_wakeup = false;
  607. if (ipa_mhi_client_ctx->rm_cons_state ==
  608. IPA_MHI_RM_STATE_REQUESTED) {
  609. ipa_rm_notify_completion(
  610. IPA_RM_RESOURCE_GRANTED,
  611. IPA_RM_RESOURCE_MHI_CONS);
  612. ipa_mhi_client_ctx->rm_cons_state =
  613. IPA_MHI_RM_STATE_GRANTED;
  614. }
  615. res = 0;
  616. }
  617. break;
  618. case IPA_MHI_STATE_SUSPENDED:
  619. if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
  620. res = 0;
  621. break;
  622. case IPA_MHI_STATE_RESUME_IN_PROGRESS:
  623. if (new_state == IPA_MHI_STATE_SUSPENDED) {
  624. if (ipa_mhi_client_ctx->trigger_wakeup) {
  625. ipa_mhi_client_ctx->trigger_wakeup = false;
  626. ipa_mhi_notify_wakeup();
  627. }
  628. res = 0;
  629. } else if (new_state == IPA_MHI_STATE_STARTED) {
  630. ipa_mhi_client_ctx->trigger_wakeup = false;
  631. ipa_mhi_client_ctx->wakeup_notified = false;
  632. if (ipa_mhi_client_ctx->rm_cons_state ==
  633. IPA_MHI_RM_STATE_REQUESTED) {
  634. ipa_rm_notify_completion(
  635. IPA_RM_RESOURCE_GRANTED,
  636. IPA_RM_RESOURCE_MHI_CONS);
  637. ipa_mhi_client_ctx->rm_cons_state =
  638. IPA_MHI_RM_STATE_GRANTED;
  639. }
  640. res = 0;
  641. }
  642. break;
  643. default:
  644. IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state);
  645. WARN_ON(1);
  646. }
  647. if (res)
  648. IPA_MHI_ERR("Invalid state change to %s\n",
  649. MHI_STATE_STR(new_state));
  650. else {
  651. IPA_MHI_DBG("New state change to %s\n",
  652. MHI_STATE_STR(new_state));
  653. ipa_mhi_client_ctx->state = new_state;
  654. }
  655. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  656. return res;
  657. }
  658. static void ipa_mhi_uc_ready_cb(void)
  659. {
  660. IPA_MHI_FUNC_ENTRY();
  661. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  662. IPA_MHI_FUNC_EXIT();
  663. }
  664. static void ipa_mhi_uc_wakeup_request_cb(void)
  665. {
  666. unsigned long flags;
  667. IPA_MHI_FUNC_ENTRY();
  668. IPA_MHI_DBG("MHI state: %s\n",
  669. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  670. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  671. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED)
  672. ipa_mhi_notify_wakeup();
  673. else if (ipa_mhi_client_ctx->state ==
  674. IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
  675. /* wakeup event will be triggered after suspend finishes */
  676. ipa_mhi_client_ctx->trigger_wakeup = true;
  677. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  678. IPA_MHI_FUNC_EXIT();
  679. }
  680. static int ipa_mhi_request_prod(void)
  681. {
  682. int res;
  683. IPA_MHI_FUNC_ENTRY();
  684. reinit_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
  685. IPA_MHI_DBG("requesting mhi prod\n");
  686. res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
  687. if (res) {
  688. if (res != -EINPROGRESS) {
  689. IPA_MHI_ERR("failed to request mhi prod %d\n", res);
  690. return res;
  691. }
  692. res = wait_for_completion_timeout(
  693. &ipa_mhi_client_ctx->rm_prod_granted_comp,
  694. msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
  695. if (res == 0) {
  696. IPA_MHI_ERR("timeout request mhi prod\n");
  697. return -ETIME;
  698. }
  699. }
  700. IPA_MHI_DBG("mhi prod granted\n");
  701. IPA_MHI_FUNC_EXIT();
  702. return 0;
  703. }
  704. static int ipa_mhi_release_prod(void)
  705. {
  706. int res;
  707. IPA_MHI_FUNC_ENTRY();
  708. res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
  709. IPA_MHI_FUNC_EXIT();
  710. return res;
  711. }
  712. /**
  713. * ipa_mhi_start() - Start IPA MHI engine
  714. * @params: pcie addresses for MHI
  715. *
  716. * This function is called by MHI client driver on MHI engine start for
  717. * handling MHI accelerated channels. This function is called after
  718. * ipa_mhi_init() was called and can be called after MHI reset to restart MHI
  719. * engine. When this function returns device can move to M0 state.
  720. *
  721. * Return codes: 0 : success
  722. * negative : error
  723. */
  724. int ipa_mhi_start(struct ipa_mhi_start_params *params)
  725. {
  726. int res;
  727. struct ipa_mhi_init_engine init_params;
  728. IPA_MHI_FUNC_ENTRY();
  729. if (!params) {
  730. IPA_MHI_ERR("null args\n");
  731. return -EINVAL;
  732. }
  733. if (!ipa_mhi_client_ctx) {
  734. IPA_MHI_ERR("not initialized\n");
  735. return -EPERM;
  736. }
  737. res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  738. if (res) {
  739. IPA_MHI_ERR("ipa_mhi_set_state %d\n", res);
  740. return res;
  741. }
  742. ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr;
  743. ipa_mhi_client_ctx->host_data_addr = params->host_data_addr;
  744. ipa_mhi_client_ctx->channel_context_array_addr =
  745. params->channel_context_array_addr;
  746. ipa_mhi_client_ctx->event_context_array_addr =
  747. params->event_context_array_addr;
  748. IPA_MHI_DBG("host_ctrl_addr 0x%x\n",
  749. ipa_mhi_client_ctx->host_ctrl_addr);
  750. IPA_MHI_DBG("host_data_addr 0x%x\n",
  751. ipa_mhi_client_ctx->host_data_addr);
  752. IPA_MHI_DBG("channel_context_array_addr 0x%llx\n",
  753. ipa_mhi_client_ctx->channel_context_array_addr);
  754. IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
  755. ipa_mhi_client_ctx->event_context_array_addr);
  756. if (ipa_pm_is_used()) {
  757. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
  758. if (res) {
  759. IPA_MHI_ERR("failed activate client %d\n", res);
  760. goto fail_pm_activate;
  761. }
  762. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  763. if (res) {
  764. IPA_MHI_ERR("failed activate modem client %d\n", res);
  765. goto fail_pm_activate_modem;
  766. }
  767. } else {
  768. /* Add MHI <-> Q6 dependencies to IPA RM */
  769. res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
  770. IPA_RM_RESOURCE_Q6_CONS);
  771. if (res && res != -EINPROGRESS) {
  772. IPA_MHI_ERR("failed to add dependency %d\n", res);
  773. goto fail_add_mhi_q6_dep;
  774. }
  775. res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
  776. IPA_RM_RESOURCE_MHI_CONS);
  777. if (res && res != -EINPROGRESS) {
  778. IPA_MHI_ERR("failed to add dependency %d\n", res);
  779. goto fail_add_q6_mhi_dep;
  780. }
  781. res = ipa_mhi_request_prod();
  782. if (res) {
  783. IPA_MHI_ERR("failed request prod %d\n", res);
  784. goto fail_request_prod;
  785. }
  786. }
  787. /* gsi params */
  788. init_params.gsi.first_ch_idx =
  789. ipa_mhi_client_ctx->first_ch_idx;
  790. /* uC params */
  791. init_params.uC.first_ch_idx =
  792. ipa_mhi_client_ctx->first_ch_idx;
  793. init_params.uC.first_er_idx =
  794. ipa_mhi_client_ctx->first_er_idx;
  795. init_params.uC.host_ctrl_addr = params->host_ctrl_addr;
  796. init_params.uC.host_data_addr = params->host_data_addr;
  797. init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr;
  798. init_params.uC.msi = &ipa_mhi_client_ctx->msi;
  799. init_params.uC.ipa_cached_dl_ul_sync_info =
  800. &ipa_cached_dl_ul_sync_info;
  801. res = ipa_mhi_init_engine(&init_params);
  802. if (res) {
  803. IPA_MHI_ERR("IPA core failed to start MHI %d\n", res);
  804. goto fail_init_engine;
  805. }
  806. IPA_MHI_FUNC_EXIT();
  807. return 0;
  808. fail_init_engine:
  809. if (!ipa_pm_is_used())
  810. ipa_mhi_release_prod();
  811. fail_request_prod:
  812. if (!ipa_pm_is_used())
  813. ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
  814. IPA_RM_RESOURCE_MHI_CONS);
  815. fail_add_q6_mhi_dep:
  816. if (!ipa_pm_is_used())
  817. ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
  818. IPA_RM_RESOURCE_Q6_CONS);
  819. fail_add_mhi_q6_dep:
  820. if (ipa_pm_is_used())
  821. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  822. fail_pm_activate_modem:
  823. if (ipa_pm_is_used())
  824. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  825. fail_pm_activate:
  826. ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
  827. return res;
  828. }
  829. /**
  830. * ipa_mhi_get_channel_context() - Get corresponding channel context
  831. * @ep: IPA ep
  832. * @channel_id: Channel ID
  833. *
  834. * This function will return the corresponding channel context or allocate new
  835. * one in case channel context for channel does not exist.
  836. */
  837. static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context(
  838. enum ipa_client_type client, u8 channel_id)
  839. {
  840. int ch_idx;
  841. struct ipa_mhi_channel_ctx *channels;
  842. int max_channels;
  843. if (IPA_CLIENT_IS_PROD(client)) {
  844. channels = ipa_mhi_client_ctx->ul_channels;
  845. max_channels = IPA_MHI_MAX_UL_CHANNELS;
  846. } else {
  847. channels = ipa_mhi_client_ctx->dl_channels;
  848. max_channels = IPA_MHI_MAX_DL_CHANNELS;
  849. }
  850. /* find the channel context according to channel id */
  851. for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
  852. if (channels[ch_idx].valid &&
  853. channels[ch_idx].id == channel_id)
  854. return &channels[ch_idx];
  855. }
  856. /* channel context does not exists, allocate a new one */
  857. for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
  858. if (!channels[ch_idx].valid)
  859. break;
  860. }
  861. if (ch_idx == max_channels) {
  862. IPA_MHI_ERR("no more channels available\n");
  863. return NULL;
  864. }
  865. channels[ch_idx].valid = true;
  866. channels[ch_idx].id = channel_id;
  867. channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++;
  868. channels[ch_idx].client = client;
  869. channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
  870. return &channels[ch_idx];
  871. }
  872. /**
  873. * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel
  874. * context
  875. * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe()
  876. *
  877. * This function will return the corresponding channel context or NULL in case
  878. * that channel does not exist.
  879. */
  880. static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
  881. u32 clnt_hdl)
  882. {
  883. int ch_idx;
  884. for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
  885. if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid &&
  886. ipa_get_ep_mapping(
  887. ipa_mhi_client_ctx->ul_channels[ch_idx].client)
  888. == clnt_hdl)
  889. return &ipa_mhi_client_ctx->ul_channels[ch_idx];
  890. }
  891. for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
  892. if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid &&
  893. ipa_get_ep_mapping(
  894. ipa_mhi_client_ctx->dl_channels[ch_idx].client)
  895. == clnt_hdl)
  896. return &ipa_mhi_client_ctx->dl_channels[ch_idx];
  897. }
  898. return NULL;
  899. }
  900. static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel)
  901. {
  902. IPA_MHI_DBG("ch_id %d\n", channel->id);
  903. IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate);
  904. IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
  905. IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
  906. IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype);
  907. IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex);
  908. IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
  909. IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen);
  910. IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp);
  911. IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp);
  912. }
  913. static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel)
  914. {
  915. IPA_MHI_DBG("ch_id %d event id %d\n", channel->id,
  916. channel->ch_ctx_host.erindex);
  917. IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc);
  918. IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt);
  919. IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype);
  920. IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec);
  921. IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase);
  922. IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen);
  923. IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp);
  924. IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp);
  925. }
  926. static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel)
  927. {
  928. int res;
  929. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  930. &channel->ch_ctx_host, channel->channel_context_addr,
  931. sizeof(channel->ch_ctx_host));
  932. if (res) {
  933. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  934. return res;
  935. }
  936. ipa_mhi_dump_ch_ctx(channel);
  937. channel->event_context_addr =
  938. ipa_mhi_client_ctx->event_context_array_addr +
  939. channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx);
  940. IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id,
  941. channel->event_context_addr);
  942. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  943. &channel->ev_ctx_host, channel->event_context_addr,
  944. sizeof(channel->ev_ctx_host));
  945. if (res) {
  946. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  947. return res;
  948. }
  949. ipa_mhi_dump_ev_ctx(channel);
  950. return 0;
  951. }
  952. static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify)
  953. {
  954. struct ipa_mhi_channel_ctx *channel = notify->user_data;
  955. IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
  956. channel->id, channel->client, channel->state);
  957. switch (notify->evt_id) {
  958. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  959. IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  960. break;
  961. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  962. IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  963. break;
  964. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  965. IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  966. break;
  967. case GSI_EVT_EVT_RING_EMPTY_ERR:
  968. IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n");
  969. break;
  970. default:
  971. IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
  972. }
  973. IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
  974. ipa_assert();
  975. }
  976. static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
  977. {
  978. struct ipa_mhi_channel_ctx *channel = notify->chan_user_data;
  979. IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
  980. channel->id, channel->client, channel->state);
  981. switch (notify->evt_id) {
  982. case GSI_CHAN_INVALID_TRE_ERR:
  983. IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
  984. break;
  985. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  986. IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  987. break;
  988. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  989. IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  990. break;
  991. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  992. IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  993. break;
  994. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  995. IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  996. break;
  997. case GSI_CHAN_HWO_1_ERR:
  998. IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n");
  999. break;
  1000. default:
  1001. IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
  1002. }
  1003. IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
  1004. ipa_assert();
  1005. }
  1006. static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel)
  1007. {
  1008. IPA_MHI_FUNC_ENTRY();
  1009. if (!channel->stop_in_proc) {
  1010. IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n");
  1011. return true;
  1012. }
  1013. if (ipa_mhi_stop_gsi_channel(channel->client)) {
  1014. channel->stop_in_proc = false;
  1015. return true;
  1016. }
  1017. return false;
  1018. }
  1019. /**
  1020. * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink
  1021. * @msecs: timeout to wait
  1022. *
  1023. * This function will poll until there are no packets pending in uplink channels
  1024. * or timeout occurred.
  1025. *
  1026. * Return code: true - no pending packets in uplink channels
  1027. * false - timeout occurred
  1028. */
  1029. static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
  1030. {
  1031. unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
  1032. unsigned long jiffies_start = jiffies;
  1033. bool empty = false;
  1034. int i;
  1035. IPA_MHI_FUNC_ENTRY();
  1036. while (!empty) {
  1037. empty = true;
  1038. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  1039. if (!ipa_mhi_client_ctx->ul_channels[i].valid)
  1040. continue;
  1041. if (ipa_get_transport_type() ==
  1042. IPA_TRANSPORT_TYPE_GSI)
  1043. empty &= ipa_mhi_gsi_channel_empty(
  1044. &ipa_mhi_client_ctx->ul_channels[i]);
  1045. else
  1046. empty &= ipa_mhi_sps_channel_empty(
  1047. ipa_mhi_client_ctx->ul_channels[i].client);
  1048. }
  1049. if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
  1050. IPA_MHI_DBG("finished waiting for UL empty\n");
  1051. break;
  1052. }
  1053. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI &&
  1054. IPA_MHI_MAX_UL_CHANNELS == 1)
  1055. usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
  1056. IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
  1057. }
  1058. IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
  1059. IPA_MHI_FUNC_EXIT();
  1060. return empty;
  1061. }
  1062. static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
  1063. {
  1064. struct ipa_enable_force_clear_datapath_req_msg_v01 req;
  1065. int i;
  1066. int res;
  1067. IPA_MHI_FUNC_ENTRY();
  1068. memset(&req, 0, sizeof(req));
  1069. req.request_id = request_id;
  1070. req.source_pipe_bitmask = 0;
  1071. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  1072. if (!ipa_mhi_client_ctx->ul_channels[i].valid)
  1073. continue;
  1074. req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping(
  1075. ipa_mhi_client_ctx->ul_channels[i].client);
  1076. }
  1077. if (throttle_source) {
  1078. req.throttle_source_valid = 1;
  1079. req.throttle_source = 1;
  1080. }
  1081. IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
  1082. req.request_id, req.source_pipe_bitmask,
  1083. req.throttle_source);
  1084. res = ipa_qmi_enable_force_clear_datapath_send(&req);
  1085. if (res) {
  1086. IPA_MHI_ERR(
  1087. "ipa_qmi_enable_force_clear_datapath_send failed %d\n"
  1088. , res);
  1089. return res;
  1090. }
  1091. IPA_MHI_FUNC_EXIT();
  1092. return 0;
  1093. }
  1094. static int ipa_mhi_disable_force_clear(u32 request_id)
  1095. {
  1096. struct ipa_disable_force_clear_datapath_req_msg_v01 req;
  1097. int res;
  1098. IPA_MHI_FUNC_ENTRY();
  1099. memset(&req, 0, sizeof(req));
  1100. req.request_id = request_id;
  1101. IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
  1102. res = ipa_qmi_disable_force_clear_datapath_send(&req);
  1103. if (res) {
  1104. IPA_MHI_ERR(
  1105. "ipa_qmi_disable_force_clear_datapath_send failed %d\n"
  1106. , res);
  1107. return res;
  1108. }
  1109. IPA_MHI_FUNC_EXIT();
  1110. return 0;
  1111. }
  1112. static void ipa_mhi_set_holb_on_dl_channels(bool enable,
  1113. struct ipa_ep_cfg_holb old_holb[])
  1114. {
  1115. int i;
  1116. struct ipa_ep_cfg_holb ep_holb;
  1117. int ep_idx;
  1118. int res;
  1119. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  1120. if (!ipa_mhi_client_ctx->dl_channels[i].valid)
  1121. continue;
  1122. if (ipa_mhi_client_ctx->dl_channels[i].state ==
  1123. IPA_HW_MHI_CHANNEL_STATE_INVALID)
  1124. continue;
  1125. ep_idx = ipa_get_ep_mapping(
  1126. ipa_mhi_client_ctx->dl_channels[i].client);
  1127. if (-1 == ep_idx) {
  1128. IPA_MHI_ERR("Client %u is not mapped\n",
  1129. ipa_mhi_client_ctx->dl_channels[i].client);
  1130. ipa_assert();
  1131. return;
  1132. }
  1133. memset(&ep_holb, 0, sizeof(ep_holb));
  1134. if (enable) {
  1135. ipa_get_holb(ep_idx, &old_holb[i]);
  1136. ep_holb.en = 1;
  1137. ep_holb.tmr_val = 0;
  1138. } else {
  1139. ep_holb = old_holb[i];
  1140. }
  1141. res = ipa_cfg_ep_holb(ep_idx, &ep_holb);
  1142. if (res) {
  1143. IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
  1144. ipa_assert();
  1145. return;
  1146. }
  1147. }
  1148. }
  1149. static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel)
  1150. {
  1151. int clnt_hdl;
  1152. int res;
  1153. IPA_MHI_FUNC_ENTRY();
  1154. clnt_hdl = ipa_get_ep_mapping(channel->client);
  1155. if (clnt_hdl < 0)
  1156. return -EFAULT;
  1157. res = ipa_stop_gsi_channel(clnt_hdl);
  1158. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1159. res != -GSI_STATUS_TIMED_OUT) {
  1160. IPA_MHI_ERR("GSI stop channel failed %d\n", res);
  1161. return -EFAULT;
  1162. }
  1163. /* check if channel was stopped completely */
  1164. if (res)
  1165. channel->stop_in_proc = true;
  1166. IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ?
  1167. "STOP_IN_PROC" : "STOP");
  1168. IPA_MHI_FUNC_EXIT();
  1169. return 0;
  1170. }
  1171. static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
  1172. {
  1173. int res;
  1174. bool empty;
  1175. struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
  1176. IPA_MHI_FUNC_ENTRY();
  1177. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1178. res = ipa_mhi_suspend_gsi_channel(channel);
  1179. if (res) {
  1180. IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n",
  1181. res);
  1182. return res;
  1183. }
  1184. } else {
  1185. res = ipa_uc_mhi_reset_channel(channel->index);
  1186. if (res) {
  1187. IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
  1188. res);
  1189. return res;
  1190. }
  1191. }
  1192. empty = ipa_mhi_wait_for_ul_empty_timeout(
  1193. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1194. if (!empty) {
  1195. IPA_MHI_DBG("%s not empty\n",
  1196. (ipa_get_transport_type() ==
  1197. IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM");
  1198. res = ipa_mhi_enable_force_clear(
  1199. ipa_mhi_client_ctx->qmi_req_id, false);
  1200. if (res) {
  1201. IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
  1202. res);
  1203. ipa_assert();
  1204. return res;
  1205. }
  1206. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1207. empty = ipa_mhi_wait_for_ul_empty_timeout(
  1208. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1209. IPA_MHI_DBG("empty=%d\n", empty);
  1210. } else {
  1211. /* enable packet drop on all DL channels */
  1212. ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb);
  1213. ipa_generate_tag_process();
  1214. /* disable packet drop on all DL channels */
  1215. ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb);
  1216. res = ipa_disable_sps_pipe(channel->client);
  1217. if (res) {
  1218. IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
  1219. ipa_assert();
  1220. return res;
  1221. }
  1222. }
  1223. res =
  1224. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
  1225. if (res) {
  1226. IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
  1227. res);
  1228. ipa_assert();
  1229. return res;
  1230. }
  1231. ipa_mhi_client_ctx->qmi_req_id++;
  1232. }
  1233. res = ipa_mhi_reset_channel_internal(channel->client);
  1234. if (res) {
  1235. IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n"
  1236. , res);
  1237. return res;
  1238. }
  1239. IPA_MHI_FUNC_EXIT();
  1240. return 0;
  1241. }
  1242. static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
  1243. {
  1244. int res;
  1245. IPA_MHI_FUNC_ENTRY();
  1246. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1247. res = ipa_mhi_suspend_gsi_channel(channel);
  1248. if (res) {
  1249. IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n"
  1250. , res);
  1251. return res;
  1252. }
  1253. res = ipa_mhi_reset_channel_internal(channel->client);
  1254. if (res) {
  1255. IPA_MHI_ERR(
  1256. "ipa_mhi_reset_ul_channel_internal failed %d\n"
  1257. , res);
  1258. return res;
  1259. }
  1260. } else {
  1261. res = ipa_mhi_reset_channel_internal(channel->client);
  1262. if (res) {
  1263. IPA_MHI_ERR(
  1264. "ipa_mhi_reset_ul_channel_internal failed %d\n"
  1265. , res);
  1266. return res;
  1267. }
  1268. res = ipa_uc_mhi_reset_channel(channel->index);
  1269. if (res) {
  1270. IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
  1271. res);
  1272. ipa_mhi_start_channel_internal(channel->client);
  1273. return res;
  1274. }
  1275. }
  1276. IPA_MHI_FUNC_EXIT();
  1277. return 0;
  1278. }
  1279. static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel)
  1280. {
  1281. int res;
  1282. IPA_MHI_FUNC_ENTRY();
  1283. if (IPA_CLIENT_IS_PROD(channel->client))
  1284. res = ipa_mhi_reset_ul_channel(channel);
  1285. else
  1286. res = ipa_mhi_reset_dl_channel(channel);
  1287. if (res) {
  1288. IPA_MHI_ERR("failed to reset channel error %d\n", res);
  1289. return res;
  1290. }
  1291. channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
  1292. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1293. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1294. &channel->state, channel->channel_context_addr +
  1295. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1296. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1297. if (res) {
  1298. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  1299. return res;
  1300. }
  1301. }
  1302. IPA_MHI_FUNC_EXIT();
  1303. return 0;
  1304. }
  1305. /**
  1306. * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
  1307. * MHI channel
  1308. * @in: connect parameters
  1309. * @clnt_hdl: [out] client handle for this pipe
  1310. *
  1311. * This function is called by MHI client driver on MHI channel start.
  1312. * This function is called after MHI engine was started.
  1313. *
  1314. * Return codes: 0 : success
  1315. * negative : error
  1316. */
  1317. int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
  1318. {
  1319. int res;
  1320. unsigned long flags;
  1321. struct ipa_mhi_channel_ctx *channel = NULL;
  1322. IPA_MHI_FUNC_ENTRY();
  1323. if (!in || !clnt_hdl) {
  1324. IPA_MHI_ERR("NULL args\n");
  1325. return -EINVAL;
  1326. }
  1327. if (in->sys.client >= IPA_CLIENT_MAX) {
  1328. IPA_MHI_ERR("bad param client:%d\n", in->sys.client);
  1329. return -EINVAL;
  1330. }
  1331. if (!IPA_CLIENT_IS_MHI(in->sys.client)) {
  1332. IPA_MHI_ERR(
  1333. "Invalid MHI client, client: %d\n", in->sys.client);
  1334. return -EINVAL;
  1335. }
  1336. IPA_MHI_DBG("channel=%d\n", in->channel_id);
  1337. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  1338. if (!ipa_mhi_client_ctx ||
  1339. ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) {
  1340. IPA_MHI_ERR("IPA MHI was not started\n");
  1341. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1342. return -EINVAL;
  1343. }
  1344. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1345. channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id);
  1346. if (!channel) {
  1347. IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
  1348. return -EINVAL;
  1349. }
  1350. if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID &&
  1351. channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
  1352. IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
  1353. return -EFAULT;
  1354. }
  1355. channel->channel_context_addr =
  1356. ipa_mhi_client_ctx->channel_context_array_addr +
  1357. channel->id * sizeof(struct ipa_mhi_ch_ctx);
  1358. /* for event context address index needs to read from host */
  1359. IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n",
  1360. channel->client, channel->index, channel->id, channel->state);
  1361. IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n",
  1362. channel->channel_context_addr,
  1363. channel->cached_gsi_evt_ring_hdl);
  1364. IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
  1365. mutex_lock(&mhi_client_general_mutex);
  1366. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1367. struct ipa_mhi_connect_params_internal internal;
  1368. IPA_MHI_DBG("reading ch/ev context from host\n");
  1369. res = ipa_mhi_read_ch_ctx(channel);
  1370. if (res) {
  1371. IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
  1372. goto fail_start_channel;
  1373. }
  1374. internal.channel_id = in->channel_id;
  1375. internal.sys = &in->sys;
  1376. internal.start.gsi.state = channel->state;
  1377. internal.start.gsi.msi = &ipa_mhi_client_ctx->msi;
  1378. internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host;
  1379. internal.start.gsi.event_context_addr =
  1380. channel->event_context_addr;
  1381. internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host;
  1382. internal.start.gsi.channel_context_addr =
  1383. channel->channel_context_addr;
  1384. internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb;
  1385. internal.start.gsi.channel = (void *)channel;
  1386. internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb;
  1387. internal.start.gsi.assert_bit40 =
  1388. ipa_mhi_client_ctx->assert_bit40;
  1389. internal.start.gsi.mhi = &channel->ch_scratch.mhi;
  1390. internal.start.gsi.cached_gsi_evt_ring_hdl =
  1391. &channel->cached_gsi_evt_ring_hdl;
  1392. internal.start.gsi.evchid = channel->index;
  1393. res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
  1394. if (res) {
  1395. IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
  1396. goto fail_connect_pipe;
  1397. }
  1398. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1399. channel->brstmode_enabled =
  1400. channel->ch_scratch.mhi.burst_mode_enabled;
  1401. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1402. &channel->state, channel->channel_context_addr +
  1403. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1404. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1405. if (res) {
  1406. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1407. mutex_unlock(&mhi_client_general_mutex);
  1408. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1409. return res;
  1410. }
  1411. } else {
  1412. struct ipa_mhi_connect_params_internal internal;
  1413. internal.channel_id = in->channel_id;
  1414. internal.sys = &in->sys;
  1415. internal.start.uC.index = channel->index;
  1416. internal.start.uC.id = channel->id;
  1417. internal.start.uC.state = channel->state;
  1418. res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
  1419. if (res) {
  1420. IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
  1421. goto fail_connect_pipe;
  1422. }
  1423. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1424. }
  1425. mutex_unlock(&mhi_client_general_mutex);
  1426. if (!in->sys.keep_ipa_awake)
  1427. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1428. IPA_MHI_FUNC_EXIT();
  1429. return 0;
  1430. fail_connect_pipe:
  1431. mutex_unlock(&mhi_client_general_mutex);
  1432. ipa_mhi_reset_channel(channel);
  1433. fail_start_channel:
  1434. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1435. return -EPERM;
  1436. }
  1437. /**
  1438. * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
  1439. * MHI channel
  1440. * @clnt_hdl: client handle for this pipe
  1441. *
  1442. * This function is called by MHI client driver on MHI channel reset.
  1443. * This function is called after MHI channel was started.
  1444. * This function is doing the following:
  1445. * - Send command to uC/GSI to reset corresponding MHI channel
  1446. * - Configure IPA EP control
  1447. *
  1448. * Return codes: 0 : success
  1449. * negative : error
  1450. */
  1451. int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
  1452. {
  1453. int res;
  1454. enum ipa_client_type client;
  1455. static struct ipa_mhi_channel_ctx *channel;
  1456. IPA_MHI_FUNC_ENTRY();
  1457. if (!ipa_mhi_client_ctx) {
  1458. IPA_MHI_ERR("IPA MHI was not initialized\n");
  1459. return -EINVAL;
  1460. }
  1461. client = ipa_get_client_mapping(clnt_hdl);
  1462. if (!IPA_CLIENT_IS_MHI(client)) {
  1463. IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client);
  1464. return -EINVAL;
  1465. }
  1466. channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
  1467. if (!channel) {
  1468. IPA_MHI_ERR("invalid clnt index\n");
  1469. return -EINVAL;
  1470. }
  1471. IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl));
  1472. res = ipa_mhi_reset_channel(channel);
  1473. if (res) {
  1474. IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
  1475. goto fail_reset_channel;
  1476. }
  1477. mutex_lock(&mhi_client_general_mutex);
  1478. res = ipa_disconnect_mhi_pipe(clnt_hdl);
  1479. if (res) {
  1480. IPA_MHI_ERR(
  1481. "IPA core driver failed to disconnect the pipe hdl %d, res %d"
  1482. , clnt_hdl, res);
  1483. goto fail_disconnect_pipe;
  1484. }
  1485. mutex_unlock(&mhi_client_general_mutex);
  1486. IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
  1487. IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
  1488. IPA_MHI_FUNC_EXIT();
  1489. return 0;
  1490. fail_disconnect_pipe:
  1491. mutex_unlock(&mhi_client_general_mutex);
  1492. fail_reset_channel:
  1493. IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
  1494. return res;
  1495. }
  1496. static int ipa_mhi_wait_for_cons_release(void)
  1497. {
  1498. unsigned long flags;
  1499. int res;
  1500. IPA_MHI_FUNC_ENTRY();
  1501. reinit_completion(&ipa_mhi_client_ctx->rm_cons_comp);
  1502. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  1503. if (ipa_mhi_client_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) {
  1504. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1505. return 0;
  1506. }
  1507. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1508. res = wait_for_completion_timeout(
  1509. &ipa_mhi_client_ctx->rm_cons_comp,
  1510. msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
  1511. if (res == 0) {
  1512. IPA_MHI_ERR("timeout release mhi cons\n");
  1513. return -ETIME;
  1514. }
  1515. IPA_MHI_FUNC_EXIT();
  1516. return 0;
  1517. }
  1518. static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels,
  1519. int max_channels)
  1520. {
  1521. int i;
  1522. int res;
  1523. IPA_MHI_FUNC_ENTRY();
  1524. for (i = 0; i < max_channels; i++) {
  1525. if (!channels[i].valid)
  1526. continue;
  1527. if (channels[i].state !=
  1528. IPA_HW_MHI_CHANNEL_STATE_RUN)
  1529. continue;
  1530. IPA_MHI_DBG("suspending channel %d\n",
  1531. channels[i].id);
  1532. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1533. res = ipa_mhi_suspend_gsi_channel(
  1534. &channels[i]);
  1535. else
  1536. res = ipa_uc_mhi_suspend_channel(
  1537. channels[i].index);
  1538. if (res) {
  1539. IPA_MHI_ERR("failed to suspend channel %d error %d\n",
  1540. i, res);
  1541. return res;
  1542. }
  1543. channels[i].state =
  1544. IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
  1545. }
  1546. IPA_MHI_FUNC_EXIT();
  1547. return 0;
  1548. }
  1549. static int ipa_mhi_stop_event_update_channels(
  1550. struct ipa_mhi_channel_ctx *channels, int max_channels)
  1551. {
  1552. int i;
  1553. int res;
  1554. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1555. return 0;
  1556. IPA_MHI_FUNC_ENTRY();
  1557. for (i = 0; i < max_channels; i++) {
  1558. if (!channels[i].valid)
  1559. continue;
  1560. if (channels[i].state !=
  1561. IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
  1562. continue;
  1563. IPA_MHI_DBG("stop update event channel %d\n",
  1564. channels[i].id);
  1565. res = ipa_uc_mhi_stop_event_update_channel(
  1566. channels[i].index);
  1567. if (res) {
  1568. IPA_MHI_ERR("failed stop event channel %d error %d\n",
  1569. i, res);
  1570. return res;
  1571. }
  1572. }
  1573. IPA_MHI_FUNC_EXIT();
  1574. return 0;
  1575. }
  1576. static bool ipa_mhi_check_pending_packets_from_host(void)
  1577. {
  1578. int i;
  1579. int res;
  1580. struct ipa_mhi_channel_ctx *channel;
  1581. IPA_MHI_FUNC_ENTRY();
  1582. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  1583. channel = &ipa_mhi_client_ctx->ul_channels[i];
  1584. if (!channel->valid)
  1585. continue;
  1586. res = ipa_mhi_query_ch_info(channel->client,
  1587. &channel->ch_info);
  1588. if (res) {
  1589. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1590. return true;
  1591. }
  1592. res = ipa_mhi_read_ch_ctx(channel);
  1593. if (res) {
  1594. IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
  1595. return true;
  1596. }
  1597. if (channel->ch_info.rp != channel->ch_ctx_host.wp) {
  1598. IPA_MHI_DBG("There are pending packets from host\n");
  1599. IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n",
  1600. channel->ch_info.rp, channel->ch_ctx_host.wp);
  1601. return true;
  1602. }
  1603. }
  1604. IPA_MHI_FUNC_EXIT();
  1605. return false;
  1606. }
  1607. static int ipa_mhi_resume_channels(bool LPTransitionRejected,
  1608. struct ipa_mhi_channel_ctx *channels, int max_channels)
  1609. {
  1610. int i;
  1611. int res;
  1612. struct ipa_mhi_channel_ctx *channel;
  1613. IPA_MHI_FUNC_ENTRY();
  1614. for (i = 0; i < max_channels; i++) {
  1615. if (!channels[i].valid)
  1616. continue;
  1617. if (channels[i].state !=
  1618. IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
  1619. continue;
  1620. channel = &channels[i];
  1621. IPA_MHI_DBG("resuming channel %d\n", channel->id);
  1622. res = ipa_mhi_resume_channels_internal(channel->client,
  1623. LPTransitionRejected, channel->brstmode_enabled,
  1624. channel->ch_scratch, channel->index);
  1625. if (res) {
  1626. IPA_MHI_ERR("failed to resume channel %d error %d\n",
  1627. i, res);
  1628. return res;
  1629. }
  1630. channel->stop_in_proc = false;
  1631. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1632. }
  1633. IPA_MHI_FUNC_EXIT();
  1634. return 0;
  1635. }
  1636. /**
  1637. * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels
  1638. * @force:
  1639. * false: in case of data pending in IPA, MHI channels will not be
  1640. * suspended and function will fail.
  1641. * true: in case of data pending in IPA, make sure no further access from
  1642. * IPA to PCIe is possible. In this case suspend cannot fail.
  1643. *
  1644. *
  1645. * This function is called by MHI client driver on MHI suspend.
  1646. * This function is called after MHI channel was started.
  1647. * When this function returns device can move to M1/M2/M3/D3cold state.
  1648. *
  1649. * Return codes: 0 : success
  1650. * negative : error
  1651. */
  1652. static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear)
  1653. {
  1654. int res;
  1655. *force_clear = false;
  1656. res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
  1657. IPA_MHI_MAX_UL_CHANNELS);
  1658. if (res) {
  1659. IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
  1660. goto fail_suspend_ul_channel;
  1661. }
  1662. *empty = ipa_mhi_wait_for_ul_empty_timeout(
  1663. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1664. if (!*empty) {
  1665. if (force) {
  1666. res = ipa_mhi_enable_force_clear(
  1667. ipa_mhi_client_ctx->qmi_req_id, false);
  1668. if (res) {
  1669. IPA_MHI_ERR("failed to enable force clear\n");
  1670. ipa_assert();
  1671. return res;
  1672. }
  1673. *force_clear = true;
  1674. IPA_MHI_DBG("force clear datapath enabled\n");
  1675. *empty = ipa_mhi_wait_for_ul_empty_timeout(
  1676. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1677. IPA_MHI_DBG("empty=%d\n", *empty);
  1678. if (!*empty && ipa_get_transport_type()
  1679. == IPA_TRANSPORT_TYPE_GSI) {
  1680. IPA_MHI_ERR("Failed to suspend UL channels\n");
  1681. if (ipa_mhi_client_ctx->test_mode) {
  1682. res = -EAGAIN;
  1683. goto fail_suspend_ul_channel;
  1684. }
  1685. ipa_assert();
  1686. }
  1687. } else {
  1688. IPA_MHI_DBG("IPA not empty\n");
  1689. res = -EAGAIN;
  1690. goto fail_suspend_ul_channel;
  1691. }
  1692. }
  1693. if (*force_clear) {
  1694. res =
  1695. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
  1696. if (res) {
  1697. IPA_MHI_ERR("failed to disable force clear\n");
  1698. ipa_assert();
  1699. return res;
  1700. }
  1701. IPA_MHI_DBG("force clear datapath disabled\n");
  1702. ipa_mhi_client_ctx->qmi_req_id++;
  1703. }
  1704. if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1705. if (ipa_mhi_check_pending_packets_from_host()) {
  1706. res = -EAGAIN;
  1707. goto fail_suspend_ul_channel;
  1708. }
  1709. }
  1710. res = ipa_mhi_stop_event_update_channels(
  1711. ipa_mhi_client_ctx->ul_channels, IPA_MHI_MAX_UL_CHANNELS);
  1712. if (res) {
  1713. IPA_MHI_ERR(
  1714. "ipa_mhi_stop_event_update_ul_channels failed %d\n",
  1715. res);
  1716. goto fail_suspend_ul_channel;
  1717. }
  1718. return 0;
  1719. fail_suspend_ul_channel:
  1720. return res;
  1721. }
  1722. static bool ipa_mhi_has_open_aggr_frame(void)
  1723. {
  1724. struct ipa_mhi_channel_ctx *channel;
  1725. int i;
  1726. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  1727. channel = &ipa_mhi_client_ctx->dl_channels[i];
  1728. if (!channel->valid)
  1729. continue;
  1730. if (ipa_has_open_aggr_frame(channel->client))
  1731. return true;
  1732. }
  1733. return false;
  1734. }
  1735. static void ipa_mhi_update_host_ch_state(bool update_rp)
  1736. {
  1737. int i;
  1738. int res;
  1739. struct ipa_mhi_channel_ctx *channel;
  1740. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  1741. channel = &ipa_mhi_client_ctx->ul_channels[i];
  1742. if (!channel->valid)
  1743. continue;
  1744. if (update_rp) {
  1745. res = ipa_mhi_query_ch_info(channel->client,
  1746. &channel->ch_info);
  1747. if (res) {
  1748. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1749. ipa_assert();
  1750. return;
  1751. }
  1752. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1753. &channel->ch_info.rp,
  1754. channel->channel_context_addr +
  1755. offsetof(struct ipa_mhi_ch_ctx, rp),
  1756. sizeof(channel->ch_info.rp));
  1757. if (res) {
  1758. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1759. ipa_assert();
  1760. return;
  1761. }
  1762. }
  1763. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1764. &channel->state, channel->channel_context_addr +
  1765. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1766. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1767. if (res) {
  1768. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1769. ipa_assert();
  1770. return;
  1771. }
  1772. IPA_MHI_DBG("Updated UL CH=%d state to %s on host\n",
  1773. i, MHI_CH_STATE_STR(channel->state));
  1774. }
  1775. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  1776. channel = &ipa_mhi_client_ctx->dl_channels[i];
  1777. if (!channel->valid)
  1778. continue;
  1779. if (update_rp) {
  1780. res = ipa_mhi_query_ch_info(channel->client,
  1781. &channel->ch_info);
  1782. if (res) {
  1783. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1784. ipa_assert();
  1785. return;
  1786. }
  1787. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1788. &channel->ch_info.rp,
  1789. channel->channel_context_addr +
  1790. offsetof(struct ipa_mhi_ch_ctx, rp),
  1791. sizeof(channel->ch_info.rp));
  1792. if (res) {
  1793. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1794. ipa_assert();
  1795. return;
  1796. }
  1797. }
  1798. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1799. &channel->state, channel->channel_context_addr +
  1800. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1801. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1802. if (res) {
  1803. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1804. ipa_assert();
  1805. return;
  1806. }
  1807. IPA_MHI_DBG("Updated DL CH=%d state to %s on host\n",
  1808. i, MHI_CH_STATE_STR(channel->state));
  1809. }
  1810. }
  1811. static int ipa_mhi_suspend_dl(bool force)
  1812. {
  1813. int res;
  1814. res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
  1815. IPA_MHI_MAX_DL_CHANNELS);
  1816. if (res) {
  1817. IPA_MHI_ERR(
  1818. "ipa_mhi_suspend_channels for dl failed %d\n", res);
  1819. goto fail_suspend_dl_channel;
  1820. }
  1821. res = ipa_mhi_stop_event_update_channels
  1822. (ipa_mhi_client_ctx->dl_channels,
  1823. IPA_MHI_MAX_DL_CHANNELS);
  1824. if (res) {
  1825. IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
  1826. goto fail_stop_event_update_dl_channel;
  1827. }
  1828. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1829. if (ipa_mhi_has_open_aggr_frame()) {
  1830. IPA_MHI_DBG("There is an open aggr frame\n");
  1831. if (force) {
  1832. ipa_mhi_client_ctx->trigger_wakeup = true;
  1833. } else {
  1834. res = -EAGAIN;
  1835. goto fail_stop_event_update_dl_channel;
  1836. }
  1837. }
  1838. }
  1839. return 0;
  1840. fail_stop_event_update_dl_channel:
  1841. ipa_mhi_resume_channels(true,
  1842. ipa_mhi_client_ctx->dl_channels,
  1843. IPA_MHI_MAX_DL_CHANNELS);
  1844. fail_suspend_dl_channel:
  1845. return res;
  1846. }
  1847. /**
  1848. * ipa_mhi_suspend() - Suspend MHI accelerated channels
  1849. * @force:
  1850. * false: in case of data pending in IPA, MHI channels will not be
  1851. * suspended and function will fail.
  1852. * true: in case of data pending in IPA, make sure no further access from
  1853. * IPA to PCIe is possible. In this case suspend cannot fail.
  1854. *
  1855. * This function is called by MHI client driver on MHI suspend.
  1856. * This function is called after MHI channel was started.
  1857. * When this function returns device can move to M1/M2/M3/D3cold state.
  1858. *
  1859. * Return codes: 0 : success
  1860. * negative : error
  1861. */
  1862. int ipa_mhi_suspend(bool force)
  1863. {
  1864. int res;
  1865. bool empty;
  1866. bool force_clear;
  1867. IPA_MHI_FUNC_ENTRY();
  1868. res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
  1869. if (res) {
  1870. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1871. return res;
  1872. }
  1873. res = ipa_mhi_suspend_dl(force);
  1874. if (res) {
  1875. IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res);
  1876. goto fail_suspend_dl_channel;
  1877. }
  1878. usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
  1879. res = ipa_mhi_suspend_ul(force, &empty, &force_clear);
  1880. if (res) {
  1881. IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res);
  1882. goto fail_suspend_ul_channel;
  1883. }
  1884. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1885. ipa_mhi_update_host_ch_state(true);
  1886. /*
  1887. * hold IPA clocks and release them after all
  1888. * IPA RM resource are released to make sure tag process will not start
  1889. */
  1890. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1891. if (ipa_pm_is_used()) {
  1892. res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  1893. if (res) {
  1894. IPA_MHI_ERR("fail to deactivate client %d\n", res);
  1895. goto fail_deactivate_pm;
  1896. }
  1897. res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1898. if (res) {
  1899. IPA_MHI_ERR("fail to deactivate client %d\n", res);
  1900. goto fail_deactivate_modem_pm;
  1901. }
  1902. } else {
  1903. IPA_MHI_DBG("release prod\n");
  1904. res = ipa_mhi_release_prod();
  1905. if (res) {
  1906. IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
  1907. goto fail_release_prod;
  1908. }
  1909. IPA_MHI_DBG("wait for cons release\n");
  1910. res = ipa_mhi_wait_for_cons_release();
  1911. if (res) {
  1912. IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed\n");
  1913. goto fail_release_cons;
  1914. }
  1915. }
  1916. usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
  1917. if (!empty)
  1918. ipa_set_tag_process_before_gating(false);
  1919. res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
  1920. if (res) {
  1921. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1922. goto fail_release_cons;
  1923. }
  1924. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1925. IPA_MHI_FUNC_EXIT();
  1926. return 0;
  1927. fail_release_cons:
  1928. if (!ipa_pm_is_used())
  1929. ipa_mhi_request_prod();
  1930. fail_release_prod:
  1931. if (ipa_pm_is_used())
  1932. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1933. fail_deactivate_modem_pm:
  1934. if (ipa_pm_is_used())
  1935. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  1936. fail_deactivate_pm:
  1937. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1938. fail_suspend_ul_channel:
  1939. ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels,
  1940. IPA_MHI_MAX_UL_CHANNELS);
  1941. if (force_clear) {
  1942. if (
  1943. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) {
  1944. IPA_MHI_ERR("failed to disable force clear\n");
  1945. ipa_assert();
  1946. }
  1947. IPA_MHI_DBG("force clear datapath disabled\n");
  1948. ipa_mhi_client_ctx->qmi_req_id++;
  1949. }
  1950. fail_suspend_dl_channel:
  1951. ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->dl_channels,
  1952. IPA_MHI_MAX_DL_CHANNELS);
  1953. ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  1954. return res;
  1955. }
  1956. /**
  1957. * ipa_mhi_resume() - Resume MHI accelerated channels
  1958. *
  1959. * This function is called by MHI client driver on MHI resume.
  1960. * This function is called after MHI channel was suspended.
  1961. * When this function returns device can move to M0 state.
  1962. * This function is doing the following:
  1963. * - Send command to uC/GSI to resume corresponding MHI channel
  1964. * - Request MHI_PROD in IPA RM
  1965. * - Resume data to IPA
  1966. *
  1967. * Return codes: 0 : success
  1968. * negative : error
  1969. */
  1970. int ipa_mhi_resume(void)
  1971. {
  1972. int res;
  1973. bool dl_channel_resumed = false;
  1974. IPA_MHI_FUNC_ENTRY();
  1975. res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
  1976. if (res) {
  1977. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1978. return res;
  1979. }
  1980. if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
  1981. /* resume all DL channels */
  1982. res = ipa_mhi_resume_channels(false,
  1983. ipa_mhi_client_ctx->dl_channels,
  1984. IPA_MHI_MAX_DL_CHANNELS);
  1985. if (res) {
  1986. IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
  1987. res);
  1988. goto fail_resume_dl_channels;
  1989. }
  1990. dl_channel_resumed = true;
  1991. ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
  1992. IPA_RM_RESOURCE_MHI_CONS);
  1993. ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
  1994. }
  1995. if (ipa_pm_is_used()) {
  1996. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
  1997. if (res) {
  1998. IPA_MHI_ERR("fail to activate client %d\n", res);
  1999. goto fail_pm_activate;
  2000. }
  2001. ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  2002. if (res) {
  2003. IPA_MHI_ERR("fail to activate client %d\n", res);
  2004. goto fail_pm_activate_modem;
  2005. }
  2006. } else {
  2007. res = ipa_mhi_request_prod();
  2008. if (res) {
  2009. IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
  2010. goto fail_request_prod;
  2011. }
  2012. }
  2013. /* resume all UL channels */
  2014. res = ipa_mhi_resume_channels(false,
  2015. ipa_mhi_client_ctx->ul_channels,
  2016. IPA_MHI_MAX_UL_CHANNELS);
  2017. if (res) {
  2018. IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
  2019. goto fail_resume_ul_channels;
  2020. }
  2021. if (!dl_channel_resumed) {
  2022. res = ipa_mhi_resume_channels(false,
  2023. ipa_mhi_client_ctx->dl_channels,
  2024. IPA_MHI_MAX_DL_CHANNELS);
  2025. if (res) {
  2026. IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
  2027. res);
  2028. goto fail_resume_dl_channels2;
  2029. }
  2030. }
  2031. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  2032. ipa_mhi_update_host_ch_state(false);
  2033. res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  2034. if (res) {
  2035. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  2036. goto fail_set_state;
  2037. }
  2038. IPA_MHI_FUNC_EXIT();
  2039. return 0;
  2040. fail_set_state:
  2041. ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
  2042. IPA_MHI_MAX_DL_CHANNELS);
  2043. fail_resume_dl_channels2:
  2044. ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
  2045. IPA_MHI_MAX_UL_CHANNELS);
  2046. fail_resume_ul_channels:
  2047. if (!ipa_pm_is_used())
  2048. ipa_mhi_release_prod();
  2049. fail_request_prod:
  2050. if (ipa_pm_is_used())
  2051. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  2052. fail_pm_activate_modem:
  2053. if (ipa_pm_is_used())
  2054. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  2055. fail_pm_activate:
  2056. ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
  2057. IPA_MHI_MAX_DL_CHANNELS);
  2058. fail_resume_dl_channels:
  2059. ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
  2060. return res;
  2061. }
  2062. static int ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
  2063. int num_of_channels)
  2064. {
  2065. struct ipa_mhi_channel_ctx *channel;
  2066. int i, res;
  2067. u32 clnt_hdl;
  2068. for (i = 0; i < num_of_channels; i++) {
  2069. channel = &channels[i];
  2070. if (!channel->valid)
  2071. continue;
  2072. if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID)
  2073. continue;
  2074. if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
  2075. clnt_hdl = ipa_get_ep_mapping(channel->client);
  2076. IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
  2077. res = ipa_mhi_disconnect_pipe(clnt_hdl);
  2078. if (res) {
  2079. IPA_MHI_ERR(
  2080. "failed to disconnect pipe %d, err %d\n"
  2081. , clnt_hdl, res);
  2082. goto fail;
  2083. }
  2084. }
  2085. res = ipa_mhi_destroy_channel(channel->client);
  2086. if (res) {
  2087. IPA_MHI_ERR(
  2088. "ipa_mhi_destroy_channel failed %d"
  2089. , res);
  2090. goto fail;
  2091. }
  2092. }
  2093. return 0;
  2094. fail:
  2095. return res;
  2096. }
  2097. /**
  2098. * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels
  2099. *
  2100. * This function is called by IPA MHI client driver on MHI reset to destroy all
  2101. * IPA MHI channels.
  2102. */
  2103. int ipa_mhi_destroy_all_channels(void)
  2104. {
  2105. int res;
  2106. IPA_MHI_FUNC_ENTRY();
  2107. /* reset all UL and DL acc channels and its accociated event rings */
  2108. res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels,
  2109. IPA_MHI_MAX_UL_CHANNELS);
  2110. if (res) {
  2111. IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n",
  2112. res);
  2113. return -EPERM;
  2114. }
  2115. IPA_MHI_DBG("All UL channels are disconnected\n");
  2116. res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels,
  2117. IPA_MHI_MAX_DL_CHANNELS);
  2118. if (res) {
  2119. IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n",
  2120. res);
  2121. return -EPERM;
  2122. }
  2123. IPA_MHI_DBG("All DL channels are disconnected\n");
  2124. IPA_MHI_FUNC_EXIT();
  2125. return 0;
  2126. }
  2127. static void ipa_mhi_debugfs_destroy(void)
  2128. {
  2129. debugfs_remove_recursive(dent);
  2130. }
  2131. static void ipa_mhi_delete_rm_resources(void)
  2132. {
  2133. int res;
  2134. if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED &&
  2135. ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) {
  2136. IPA_MHI_DBG("release prod\n");
  2137. res = ipa_mhi_release_prod();
  2138. if (res) {
  2139. IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n",
  2140. res);
  2141. goto fail;
  2142. }
  2143. IPA_MHI_DBG("wait for cons release\n");
  2144. res = ipa_mhi_wait_for_cons_release();
  2145. if (res) {
  2146. IPA_MHI_ERR("ipa_mhi_wait_for_cons_release%d\n",
  2147. res);
  2148. goto fail;
  2149. }
  2150. usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
  2151. IPA_MHI_SUSPEND_SLEEP_MAX);
  2152. IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
  2153. res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
  2154. IPA_RM_RESOURCE_MHI_CONS);
  2155. if (res) {
  2156. IPA_MHI_ERR(
  2157. "Error deleting dependency %d->%d, res=%d\n",
  2158. IPA_RM_RESOURCE_Q6_PROD,
  2159. IPA_RM_RESOURCE_MHI_CONS,
  2160. res);
  2161. goto fail;
  2162. }
  2163. IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
  2164. res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
  2165. IPA_RM_RESOURCE_Q6_CONS);
  2166. if (res) {
  2167. IPA_MHI_ERR(
  2168. "Error deleting dependency %d->%d, res=%d\n",
  2169. IPA_RM_RESOURCE_MHI_PROD,
  2170. IPA_RM_RESOURCE_Q6_CONS,
  2171. res);
  2172. goto fail;
  2173. }
  2174. }
  2175. res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
  2176. if (res) {
  2177. IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
  2178. IPA_RM_RESOURCE_MHI_PROD, res);
  2179. goto fail;
  2180. }
  2181. res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
  2182. if (res) {
  2183. IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
  2184. IPA_RM_RESOURCE_MHI_CONS, res);
  2185. goto fail;
  2186. }
  2187. return;
  2188. fail:
  2189. ipa_assert();
  2190. }
  2191. static void ipa_mhi_deregister_pm(void)
  2192. {
  2193. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  2194. ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
  2195. ipa_mhi_client_ctx->pm_hdl = ~0;
  2196. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  2197. ipa_pm_deregister(ipa_mhi_client_ctx->modem_pm_hdl);
  2198. ipa_mhi_client_ctx->modem_pm_hdl = ~0;
  2199. }
  2200. /**
  2201. * ipa_mhi_destroy() - Destroy MHI IPA
  2202. *
  2203. * This function is called by MHI client driver on MHI reset to destroy all IPA
  2204. * MHI resources.
  2205. * When this function returns ipa_mhi can re-initialize.
  2206. */
  2207. void ipa_mhi_destroy(void)
  2208. {
  2209. int res;
  2210. IPA_MHI_FUNC_ENTRY();
  2211. if (!ipa_mhi_client_ctx) {
  2212. IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n");
  2213. return;
  2214. }
  2215. ipa_deregister_client_callback(IPA_CLIENT_MHI_PROD);
  2216. /* reset all UL and DL acc channels and its accociated event rings */
  2217. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  2218. res = ipa_mhi_destroy_all_channels();
  2219. if (res) {
  2220. IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n",
  2221. res);
  2222. goto fail;
  2223. }
  2224. }
  2225. IPA_MHI_DBG("All channels are disconnected\n");
  2226. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) {
  2227. IPA_MHI_DBG("cleanup uC MHI\n");
  2228. ipa_uc_mhi_cleanup();
  2229. }
  2230. if (ipa_pm_is_used())
  2231. ipa_mhi_deregister_pm();
  2232. else
  2233. ipa_mhi_delete_rm_resources();
  2234. ipa_dma_destroy();
  2235. ipa_mhi_debugfs_destroy();
  2236. destroy_workqueue(ipa_mhi_client_ctx->wq);
  2237. kfree(ipa_mhi_client_ctx);
  2238. ipa_mhi_client_ctx = NULL;
  2239. IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n");
  2240. IPA_MHI_FUNC_EXIT();
  2241. return;
  2242. fail:
  2243. ipa_assert();
  2244. }
  2245. static void ipa_mhi_pm_cb(void *p, enum ipa_pm_cb_event event)
  2246. {
  2247. unsigned long flags;
  2248. IPA_MHI_FUNC_ENTRY();
  2249. if (event != IPA_PM_REQUEST_WAKEUP) {
  2250. IPA_MHI_ERR("Unexpected event %d\n", event);
  2251. WARN_ON(1);
  2252. return;
  2253. }
  2254. IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
  2255. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  2256. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
  2257. ipa_mhi_notify_wakeup();
  2258. } else if (ipa_mhi_client_ctx->state ==
  2259. IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
  2260. /* wakeup event will be trigger after suspend finishes */
  2261. ipa_mhi_client_ctx->trigger_wakeup = true;
  2262. }
  2263. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  2264. IPA_MHI_DBG("EXIT");
  2265. }
  2266. static int ipa_mhi_register_pm(void)
  2267. {
  2268. int res;
  2269. struct ipa_pm_register_params params;
  2270. memset(&params, 0, sizeof(params));
  2271. params.name = "MHI";
  2272. params.callback = ipa_mhi_pm_cb;
  2273. params.group = IPA_PM_GROUP_DEFAULT;
  2274. res = ipa_pm_register(&params, &ipa_mhi_client_ctx->pm_hdl);
  2275. if (res) {
  2276. IPA_MHI_ERR("fail to register with PM %d\n", res);
  2277. return res;
  2278. }
  2279. res = ipa_pm_associate_ipa_cons_to_client(ipa_mhi_client_ctx->pm_hdl,
  2280. IPA_CLIENT_MHI_CONS);
  2281. if (res) {
  2282. IPA_MHI_ERR("fail to associate cons with PM %d\n", res);
  2283. goto fail_pm_cons;
  2284. }
  2285. res = ipa_pm_set_throughput(ipa_mhi_client_ctx->pm_hdl, 1000);
  2286. if (res) {
  2287. IPA_MHI_ERR("fail to set perf profile to PM %d\n", res);
  2288. goto fail_pm_cons;
  2289. }
  2290. /* create a modem client for clock scaling */
  2291. memset(&params, 0, sizeof(params));
  2292. params.name = "MODEM (MHI)";
  2293. params.group = IPA_PM_GROUP_MODEM;
  2294. params.skip_clk_vote = true;
  2295. res = ipa_pm_register(&params, &ipa_mhi_client_ctx->modem_pm_hdl);
  2296. if (res) {
  2297. IPA_MHI_ERR("fail to register with PM %d\n", res);
  2298. goto fail_pm_cons;
  2299. }
  2300. return 0;
  2301. fail_pm_cons:
  2302. ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
  2303. ipa_mhi_client_ctx->pm_hdl = ~0;
  2304. return res;
  2305. }
  2306. static int ipa_mhi_create_rm_resources(void)
  2307. {
  2308. int res;
  2309. struct ipa_rm_create_params mhi_prod_params;
  2310. struct ipa_rm_create_params mhi_cons_params;
  2311. struct ipa_rm_perf_profile profile;
  2312. /* Create PROD in IPA RM */
  2313. memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
  2314. mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
  2315. mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
  2316. mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
  2317. res = ipa_rm_create_resource(&mhi_prod_params);
  2318. if (res) {
  2319. IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
  2320. goto fail_create_rm_prod;
  2321. }
  2322. memset(&profile, 0, sizeof(profile));
  2323. profile.max_supported_bandwidth_mbps = 1000;
  2324. res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile);
  2325. if (res) {
  2326. IPA_MHI_ERR("fail to set profile to MHI_PROD\n");
  2327. goto fail_perf_rm_prod;
  2328. }
  2329. /* Create CONS in IPA RM */
  2330. memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
  2331. mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
  2332. mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
  2333. mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
  2334. mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
  2335. res = ipa_rm_create_resource(&mhi_cons_params);
  2336. if (res) {
  2337. IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
  2338. goto fail_create_rm_cons;
  2339. }
  2340. memset(&profile, 0, sizeof(profile));
  2341. profile.max_supported_bandwidth_mbps = 1000;
  2342. res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile);
  2343. if (res) {
  2344. IPA_MHI_ERR("fail to set profile to MHI_CONS\n");
  2345. goto fail_perf_rm_cons;
  2346. }
  2347. fail_perf_rm_cons:
  2348. ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
  2349. fail_create_rm_cons:
  2350. fail_perf_rm_prod:
  2351. ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
  2352. fail_create_rm_prod:
  2353. return res;
  2354. }
  2355. /**
  2356. * ipa_mhi_init() - Initialize IPA MHI driver
  2357. * @params: initialization params
  2358. *
  2359. * This function is called by MHI client driver on boot to initialize IPA MHI
  2360. * Driver. When this function returns device can move to READY state.
  2361. * This function is doing the following:
  2362. * - Initialize MHI IPA internal data structures
  2363. * - Create IPA RM resources
  2364. * - Initialize debugfs
  2365. *
  2366. * Return codes: 0 : success
  2367. * negative : error
  2368. */
  2369. int ipa_mhi_init(struct ipa_mhi_init_params *params)
  2370. {
  2371. int res;
  2372. IPA_MHI_FUNC_ENTRY();
  2373. if (!params) {
  2374. IPA_MHI_ERR("null args\n");
  2375. return -EINVAL;
  2376. }
  2377. if (!params->notify) {
  2378. IPA_MHI_ERR("null notify function\n");
  2379. return -EINVAL;
  2380. }
  2381. if (ipa_mhi_client_ctx) {
  2382. IPA_MHI_ERR("already initialized\n");
  2383. return -EPERM;
  2384. }
  2385. IPA_MHI_DBG("notify = %pS priv = %pK\n", params->notify, params->priv);
  2386. IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
  2387. params->msi.addr_low, params->msi.addr_hi);
  2388. IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
  2389. params->msi.data, params->msi.mask);
  2390. IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
  2391. IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
  2392. IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
  2393. IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40);
  2394. IPA_MHI_DBG("test_mode=%d\n", params->test_mode);
  2395. /* Initialize context */
  2396. ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL);
  2397. if (!ipa_mhi_client_ctx) {
  2398. res = -EFAULT;
  2399. goto fail_alloc_ctx;
  2400. }
  2401. ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
  2402. ipa_mhi_client_ctx->cb_notify = params->notify;
  2403. ipa_mhi_client_ctx->cb_priv = params->priv;
  2404. ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
  2405. init_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
  2406. spin_lock_init(&ipa_mhi_client_ctx->state_lock);
  2407. init_completion(&ipa_mhi_client_ctx->rm_cons_comp);
  2408. ipa_mhi_client_ctx->msi = params->msi;
  2409. ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
  2410. ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
  2411. ipa_mhi_client_ctx->first_er_idx = params->first_er_idx;
  2412. ipa_mhi_client_ctx->qmi_req_id = 0;
  2413. ipa_mhi_client_ctx->use_ipadma = true;
  2414. ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40;
  2415. ipa_mhi_client_ctx->test_mode = params->test_mode;
  2416. ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
  2417. if (!ipa_mhi_client_ctx->wq) {
  2418. IPA_MHI_ERR("failed to create workqueue\n");
  2419. res = -EFAULT;
  2420. goto fail_create_wq;
  2421. }
  2422. res = ipa_dma_init();
  2423. if (res) {
  2424. IPA_MHI_ERR("failed to init ipa dma %d\n", res);
  2425. goto fail_dma_init;
  2426. }
  2427. if (ipa_pm_is_used())
  2428. res = ipa_mhi_register_pm();
  2429. else
  2430. res = ipa_mhi_create_rm_resources();
  2431. if (res) {
  2432. IPA_MHI_ERR("failed to create RM resources\n");
  2433. res = -EFAULT;
  2434. goto fail_rm;
  2435. }
  2436. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  2437. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  2438. } else {
  2439. /* Initialize uC interface */
  2440. ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
  2441. ipa_mhi_uc_wakeup_request_cb);
  2442. if (ipa_uc_state_check() == 0)
  2443. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  2444. }
  2445. ipa_register_client_callback(&ipa_mhi_set_lock_unlock, NULL,
  2446. IPA_CLIENT_MHI_PROD);
  2447. /* Initialize debugfs */
  2448. ipa_mhi_debugfs_init();
  2449. IPA_MHI_FUNC_EXIT();
  2450. return 0;
  2451. fail_rm:
  2452. ipa_dma_destroy();
  2453. fail_dma_init:
  2454. destroy_workqueue(ipa_mhi_client_ctx->wq);
  2455. fail_create_wq:
  2456. kfree(ipa_mhi_client_ctx);
  2457. ipa_mhi_client_ctx = NULL;
  2458. fail_alloc_ctx:
  2459. return res;
  2460. }
  2461. static void ipa_mhi_cache_dl_ul_sync_info(
  2462. struct ipa_config_req_msg_v01 *config_req)
  2463. {
  2464. ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
  2465. ipa_cached_dl_ul_sync_info.params.UlAccmVal =
  2466. (config_req->ul_accumulation_time_limit_valid) ?
  2467. config_req->ul_accumulation_time_limit : 0;
  2468. ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold =
  2469. (config_req->ul_msi_event_threshold_valid) ?
  2470. config_req->ul_msi_event_threshold : 0;
  2471. ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold =
  2472. (config_req->dl_msi_event_threshold_valid) ?
  2473. config_req->dl_msi_event_threshold : 0;
  2474. }
  2475. /**
  2476. * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
  2477. *
  2478. * This function is called by by IPA QMI service to indicate that IPA CONFIG
  2479. * message was sent from modem. IPA MHI will update this information to IPA uC
  2480. * or will cache it until IPA MHI will be initialized.
  2481. *
  2482. * Return codes: 0 : success
  2483. * negative : error
  2484. */
  2485. int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
  2486. {
  2487. IPA_MHI_FUNC_ENTRY();
  2488. if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) {
  2489. ipa_mhi_cache_dl_ul_sync_info(config_req);
  2490. if (ipa_mhi_client_ctx &&
  2491. ipa_mhi_client_ctx->state !=
  2492. IPA_MHI_STATE_INITIALIZED)
  2493. ipa_uc_mhi_send_dl_ul_sync_info(
  2494. &ipa_cached_dl_ul_sync_info);
  2495. }
  2496. IPA_MHI_FUNC_EXIT();
  2497. return 0;
  2498. }
  2499. int ipa_mhi_is_using_dma(bool *flag)
  2500. {
  2501. IPA_MHI_FUNC_ENTRY();
  2502. if (!ipa_mhi_client_ctx) {
  2503. IPA_MHI_ERR("not initialized\n");
  2504. return -EPERM;
  2505. }
  2506. *flag = ipa_mhi_client_ctx->use_ipadma ? true : false;
  2507. IPA_MHI_FUNC_EXIT();
  2508. return 0;
  2509. }
  2510. EXPORT_SYMBOL(ipa_mhi_is_using_dma);
  2511. const char *ipa_mhi_get_state_str(int state)
  2512. {
  2513. return MHI_STATE_STR(state);
  2514. }
  2515. EXPORT_SYMBOL(ipa_mhi_get_state_str);
  2516. MODULE_LICENSE("GPL v2");
  2517. MODULE_DESCRIPTION("IPA MHI client driver");