ipa_mhi_client.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/export.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mutex.h>
  10. #include <linux/ipa.h>
  11. #include <linux/msm_gsi.h>
  12. #include <linux/ipa_qmi_service_v01.h>
  13. #include <linux/ipa_mhi.h>
  14. #include "../ipa_common_i.h"
  15. #include "../ipa_v3/ipa_pm.h"
  16. #define IPA_MHI_DRV_NAME "ipa_mhi_client"
  17. #define IPA_MHI_DBG(fmt, args...) \
  18. do { \
  19. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  20. __func__, __LINE__, ## args); \
  21. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  22. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  23. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  24. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  25. } while (0)
  26. #define IPA_MHI_DBG_LOW(fmt, args...) \
  27. do { \
  28. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  29. __func__, __LINE__, ## args); \
  30. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  31. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  32. } while (0)
  33. #define IPA_MHI_ERR(fmt, args...) \
  34. do { \
  35. pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  36. __func__, __LINE__, ## args); \
  37. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  38. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  39. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  40. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  41. } while (0)
  42. #define IPA_MHI_FUNC_ENTRY() \
  43. IPA_MHI_DBG("ENTRY\n")
  44. #define IPA_MHI_FUNC_EXIT() \
  45. IPA_MHI_DBG("EXIT\n")
  46. #define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
  47. #define IPA_MHI_SUSPEND_SLEEP_MIN 900
  48. #define IPA_MHI_SUSPEND_SLEEP_MAX 1100
  49. #define IPA_MHI_MAX_UL_CHANNELS 1
  50. #define IPA_MHI_MAX_DL_CHANNELS 2
  51. /* bit #40 in address should be asserted for MHI transfers over pcie */
  52. #define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
  53. ((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
  54. enum ipa_mhi_state {
  55. IPA_MHI_STATE_INITIALIZED,
  56. IPA_MHI_STATE_READY,
  57. IPA_MHI_STATE_STARTED,
  58. IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
  59. IPA_MHI_STATE_SUSPENDED,
  60. IPA_MHI_STATE_RESUME_IN_PROGRESS,
  61. IPA_MHI_STATE_MAX
  62. };
  63. static char *ipa_mhi_state_str[] = {
  64. __stringify(IPA_MHI_STATE_INITIALIZED),
  65. __stringify(IPA_MHI_STATE_READY),
  66. __stringify(IPA_MHI_STATE_STARTED),
  67. __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
  68. __stringify(IPA_MHI_STATE_SUSPENDED),
  69. __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
  70. };
  71. #define MHI_STATE_STR(state) \
  72. (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
  73. ipa_mhi_state_str[(state)] : \
  74. "INVALID")
  75. enum ipa_mhi_dma_dir {
  76. IPA_MHI_DMA_TO_HOST,
  77. IPA_MHI_DMA_FROM_HOST,
  78. };
  79. /**
  80. * struct ipa_mhi_channel_ctx - MHI Channel context
  81. * @valid: entry is valid
  82. * @id: MHI channel ID
  83. * @hdl: channel handle for uC
  84. * @client: IPA Client
  85. * @state: Channel state
  86. */
  87. struct ipa_mhi_channel_ctx {
  88. bool valid;
  89. u8 id;
  90. u8 index;
  91. enum ipa_client_type client;
  92. enum ipa_hw_mhi_channel_states state;
  93. bool stop_in_proc;
  94. struct gsi_chan_info ch_info;
  95. u64 channel_context_addr;
  96. struct ipa_mhi_ch_ctx ch_ctx_host;
  97. u64 event_context_addr;
  98. struct ipa_mhi_ev_ctx ev_ctx_host;
  99. bool brstmode_enabled;
  100. union __packed gsi_channel_scratch ch_scratch;
  101. unsigned long cached_gsi_evt_ring_hdl;
  102. };
  103. struct ipa_mhi_client_ctx {
  104. enum ipa_mhi_state state;
  105. spinlock_t state_lock;
  106. mhi_client_cb cb_notify;
  107. void *cb_priv;
  108. bool trigger_wakeup;
  109. bool wakeup_notified;
  110. struct workqueue_struct *wq;
  111. struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
  112. struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
  113. u32 total_channels;
  114. struct ipa_mhi_msi_info msi;
  115. u32 mmio_addr;
  116. u32 first_ch_idx;
  117. u32 first_er_idx;
  118. u32 host_ctrl_addr;
  119. u32 host_data_addr;
  120. u64 channel_context_array_addr;
  121. u64 event_context_array_addr;
  122. u32 qmi_req_id;
  123. u32 use_ipadma;
  124. bool assert_bit40;
  125. bool test_mode;
  126. u32 pm_hdl;
  127. u32 modem_pm_hdl;
  128. };
  129. static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx;
  130. static DEFINE_MUTEX(mhi_client_general_mutex);
  131. #ifdef CONFIG_DEBUG_FS
  132. #define IPA_MHI_MAX_MSG_LEN 512
  133. static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
  134. static struct dentry *dent;
  135. static char *ipa_mhi_channel_state_str[] = {
  136. __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
  137. __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
  138. __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
  139. __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
  140. __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
  141. __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
  142. };
  143. #define MHI_CH_STATE_STR(state) \
  144. (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
  145. ipa_mhi_channel_state_str[(state)] : \
  146. "INVALID")
  147. static int ipa_mhi_set_lock_unlock(bool is_lock)
  148. {
  149. IPA_MHI_DBG("entry\n");
  150. if (is_lock)
  151. mutex_lock(&mhi_client_general_mutex);
  152. else
  153. mutex_unlock(&mhi_client_general_mutex);
  154. IPA_MHI_DBG("exit\n");
  155. return 0;
  156. }
  157. static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
  158. u64 host_addr, int size)
  159. {
  160. struct ipa_mem_buffer mem;
  161. int res;
  162. struct device *pdev;
  163. IPA_MHI_FUNC_ENTRY();
  164. if (ipa_mhi_client_ctx->use_ipadma) {
  165. pdev = ipa_get_dma_dev();
  166. host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr);
  167. mem.size = size;
  168. mem.base = dma_alloc_coherent(pdev, mem.size,
  169. &mem.phys_base, GFP_KERNEL);
  170. if (!mem.base) {
  171. IPA_MHI_ERR(
  172. "dma_alloc_coherent failed, DMA buff size %d\n"
  173. , mem.size);
  174. return -ENOMEM;
  175. }
  176. res = ipa_dma_enable();
  177. if (res) {
  178. IPA_MHI_ERR("failed to enable IPA DMA rc=%d\n", res);
  179. goto fail_dma_enable;
  180. }
  181. if (dir == IPA_MHI_DMA_FROM_HOST) {
  182. res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
  183. size);
  184. if (res) {
  185. IPA_MHI_ERR(
  186. "ipa_dma_sync_memcpy from host fail%d\n"
  187. , res);
  188. goto fail_memcopy;
  189. }
  190. memcpy(dev_addr, mem.base, size);
  191. } else {
  192. memcpy(mem.base, dev_addr, size);
  193. res = ipa_dma_sync_memcpy(host_addr, mem.phys_base,
  194. size);
  195. if (res) {
  196. IPA_MHI_ERR(
  197. "ipa_dma_sync_memcpy to host fail %d\n"
  198. , res);
  199. goto fail_memcopy;
  200. }
  201. }
  202. goto dma_succeed;
  203. } else {
  204. void *host_ptr;
  205. if (!ipa_mhi_client_ctx->test_mode)
  206. host_ptr = ioremap(host_addr, size);
  207. else
  208. host_ptr = phys_to_virt(host_addr);
  209. if (!host_ptr) {
  210. IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr);
  211. return -EFAULT;
  212. }
  213. if (dir == IPA_MHI_DMA_FROM_HOST)
  214. memcpy(dev_addr, host_ptr, size);
  215. else
  216. memcpy(host_ptr, dev_addr, size);
  217. if (!ipa_mhi_client_ctx->test_mode)
  218. iounmap(host_ptr);
  219. }
  220. IPA_MHI_FUNC_EXIT();
  221. return 0;
  222. dma_succeed:
  223. IPA_MHI_FUNC_EXIT();
  224. res = 0;
  225. fail_memcopy:
  226. if (ipa_dma_disable())
  227. IPA_MHI_ERR("failed to disable IPA DMA\n");
  228. fail_dma_enable:
  229. dma_free_coherent(pdev, mem.size, mem.base, mem.phys_base);
  230. return res;
  231. }
  232. static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel,
  233. char *buff, int len)
  234. {
  235. int nbytes = 0;
  236. if (channel->valid) {
  237. nbytes += scnprintf(&buff[nbytes],
  238. len - nbytes,
  239. "channel idx=%d ch_id=%d client=%d state=%s\n",
  240. channel->index, channel->id, channel->client,
  241. MHI_CH_STATE_STR(channel->state));
  242. nbytes += scnprintf(&buff[nbytes],
  243. len - nbytes,
  244. " ch_ctx=%llx\n",
  245. channel->channel_context_addr);
  246. nbytes += scnprintf(&buff[nbytes],
  247. len - nbytes,
  248. " gsi_evt_ring_hdl=%ld ev_ctx=%llx\n",
  249. channel->cached_gsi_evt_ring_hdl,
  250. channel->event_context_addr);
  251. }
  252. return nbytes;
  253. }
  254. static int ipa_mhi_print_host_channel_ctx_info(
  255. struct ipa_mhi_channel_ctx *channel, char *buff, int len)
  256. {
  257. int res, nbytes = 0;
  258. struct ipa_mhi_ch_ctx ch_ctx_host;
  259. memset(&ch_ctx_host, 0, sizeof(ch_ctx_host));
  260. /* reading ch context from host */
  261. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  262. &ch_ctx_host, channel->channel_context_addr,
  263. sizeof(ch_ctx_host));
  264. if (res) {
  265. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  266. "Failed to read from host %d\n", res);
  267. return nbytes;
  268. }
  269. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  270. "ch_id: %d\n", channel->id);
  271. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  272. "chstate: 0x%x\n", ch_ctx_host.chstate);
  273. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  274. "brstmode: 0x%x\n", ch_ctx_host.brstmode);
  275. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  276. "chtype: 0x%x\n", ch_ctx_host.chtype);
  277. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  278. "erindex: 0x%x\n", ch_ctx_host.erindex);
  279. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  280. "rbase: 0x%llx\n", ch_ctx_host.rbase);
  281. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  282. "rlen: 0x%llx\n", ch_ctx_host.rlen);
  283. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  284. "rp: 0x%llx\n", ch_ctx_host.rp);
  285. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  286. "wp: 0x%llx\n", ch_ctx_host.wp);
  287. return nbytes;
  288. }
  289. static ssize_t ipa_mhi_debugfs_stats(struct file *file,
  290. char __user *ubuf,
  291. size_t count,
  292. loff_t *ppos)
  293. {
  294. int nbytes = 0;
  295. int i;
  296. struct ipa_mhi_channel_ctx *channel;
  297. nbytes += scnprintf(&dbg_buff[nbytes],
  298. IPA_MHI_MAX_MSG_LEN - nbytes,
  299. "IPA MHI state: %s\n",
  300. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  301. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  302. channel = &ipa_mhi_client_ctx->ul_channels[i];
  303. nbytes += ipa_mhi_print_channel_info(channel,
  304. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  305. }
  306. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  307. channel = &ipa_mhi_client_ctx->dl_channels[i];
  308. nbytes += ipa_mhi_print_channel_info(channel,
  309. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  310. }
  311. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  312. }
  313. static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file,
  314. char __user *ubuf,
  315. size_t count,
  316. loff_t *ppos)
  317. {
  318. int nbytes = 0;
  319. nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
  320. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  321. }
  322. static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file,
  323. char __user *ubuf,
  324. size_t count,
  325. loff_t *ppos)
  326. {
  327. int i, nbytes = 0;
  328. struct ipa_mhi_channel_ctx *channel;
  329. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED ||
  330. ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) {
  331. nbytes += scnprintf(&dbg_buff[nbytes],
  332. IPA_MHI_MAX_MSG_LEN - nbytes,
  333. "Cannot dump host channel context ");
  334. nbytes += scnprintf(&dbg_buff[nbytes],
  335. IPA_MHI_MAX_MSG_LEN - nbytes,
  336. "before IPA MHI was STARTED\n");
  337. return simple_read_from_buffer(ubuf, count, ppos,
  338. dbg_buff, nbytes);
  339. }
  340. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
  341. nbytes += scnprintf(&dbg_buff[nbytes],
  342. IPA_MHI_MAX_MSG_LEN - nbytes,
  343. "IPA MHI is suspended, cannot dump channel ctx array");
  344. nbytes += scnprintf(&dbg_buff[nbytes],
  345. IPA_MHI_MAX_MSG_LEN - nbytes,
  346. " from host -PCIe can be in D3 state\n");
  347. return simple_read_from_buffer(ubuf, count, ppos,
  348. dbg_buff, nbytes);
  349. }
  350. nbytes += scnprintf(&dbg_buff[nbytes],
  351. IPA_MHI_MAX_MSG_LEN - nbytes,
  352. "channel contex array - dump from host\n");
  353. nbytes += scnprintf(&dbg_buff[nbytes],
  354. IPA_MHI_MAX_MSG_LEN - nbytes,
  355. "***** UL channels *******\n");
  356. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  357. channel = &ipa_mhi_client_ctx->ul_channels[i];
  358. if (!channel->valid)
  359. continue;
  360. nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
  361. &dbg_buff[nbytes],
  362. IPA_MHI_MAX_MSG_LEN - nbytes);
  363. }
  364. nbytes += scnprintf(&dbg_buff[nbytes],
  365. IPA_MHI_MAX_MSG_LEN - nbytes,
  366. "\n***** DL channels *******\n");
  367. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  368. channel = &ipa_mhi_client_ctx->dl_channels[i];
  369. if (!channel->valid)
  370. continue;
  371. nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
  372. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  373. }
  374. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  375. }
  376. const struct file_operations ipa_mhi_stats_ops = {
  377. .read = ipa_mhi_debugfs_stats,
  378. };
  379. const struct file_operations ipa_mhi_uc_stats_ops = {
  380. .read = ipa_mhi_debugfs_uc_stats,
  381. };
  382. const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = {
  383. .read = ipa_mhi_debugfs_dump_host_ch_ctx_arr,
  384. };
  385. static void ipa_mhi_debugfs_init(void)
  386. {
  387. const mode_t read_only_mode = 0444;
  388. const mode_t read_write_mode = 0664;
  389. struct dentry *file;
  390. IPA_MHI_FUNC_ENTRY();
  391. dent = debugfs_create_dir("ipa_mhi", 0);
  392. if (IS_ERR(dent)) {
  393. IPA_MHI_ERR("fail to create folder ipa_mhi\n");
  394. return;
  395. }
  396. file = debugfs_create_file("stats", read_only_mode, dent,
  397. 0, &ipa_mhi_stats_ops);
  398. if (!file || IS_ERR(file)) {
  399. IPA_MHI_ERR("fail to create file stats\n");
  400. goto fail;
  401. }
  402. file = debugfs_create_file("uc_stats", read_only_mode, dent,
  403. 0, &ipa_mhi_uc_stats_ops);
  404. if (!file || IS_ERR(file)) {
  405. IPA_MHI_ERR("fail to create file uc_stats\n");
  406. goto fail;
  407. }
  408. file = debugfs_create_u32("use_ipadma", read_write_mode, dent,
  409. &ipa_mhi_client_ctx->use_ipadma);
  410. if (!file || IS_ERR(file)) {
  411. IPA_MHI_ERR("fail to create file use_ipadma\n");
  412. goto fail;
  413. }
  414. file = debugfs_create_file("dump_host_channel_ctx_array",
  415. read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops);
  416. if (!file || IS_ERR(file)) {
  417. IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n");
  418. goto fail;
  419. }
  420. IPA_MHI_FUNC_EXIT();
  421. return;
  422. fail:
  423. debugfs_remove_recursive(dent);
  424. }
  425. #else
  426. static void ipa_mhi_debugfs_init(void) {}
  427. static void ipa_mhi_debugfs_destroy(void) {}
  428. #endif /* CONFIG_DEBUG_FS */
  429. static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info;
  430. static void ipa_mhi_wq_notify_wakeup(struct work_struct *work);
  431. static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup);
  432. static void ipa_mhi_wq_notify_ready(struct work_struct *work);
  433. static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready);
  434. /**
  435. * ipa_mhi_notify_wakeup() - Schedule work to notify data available
  436. *
  437. * This function will schedule a work to notify data available event.
  438. * In case this function is called more than once, only one notification will
  439. * be sent to MHI client driver. No further notifications will be sent until
  440. * IPA MHI state will become STARTED.
  441. */
  442. static void ipa_mhi_notify_wakeup(void)
  443. {
  444. IPA_MHI_FUNC_ENTRY();
  445. if (ipa_mhi_client_ctx->wakeup_notified) {
  446. IPA_MHI_DBG("wakeup already called\n");
  447. return;
  448. }
  449. queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work);
  450. ipa_mhi_client_ctx->wakeup_notified = true;
  451. IPA_MHI_FUNC_EXIT();
  452. }
  453. /**
  454. * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
  455. *
  456. * This function is called from IPA MHI workqueue to notify
  457. * MHI client driver on data available event.
  458. */
  459. static void ipa_mhi_wq_notify_wakeup(struct work_struct *work)
  460. {
  461. IPA_MHI_FUNC_ENTRY();
  462. ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
  463. IPA_MHI_EVENT_DATA_AVAILABLE, 0);
  464. IPA_MHI_FUNC_EXIT();
  465. }
  466. /**
  467. * ipa_mhi_wq_notify_ready() - Notify MHI client on ready
  468. *
  469. * This function is called from IPA MHI workqueue to notify
  470. * MHI client driver on ready event when IPA uC is loaded
  471. */
  472. static void ipa_mhi_wq_notify_ready(struct work_struct *work)
  473. {
  474. IPA_MHI_FUNC_ENTRY();
  475. ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
  476. IPA_MHI_EVENT_READY, 0);
  477. IPA_MHI_FUNC_EXIT();
  478. }
  479. /**
  480. * ipa_mhi_notify_ready() - Schedule work to notify ready
  481. *
  482. * This function will schedule a work to notify ready event.
  483. */
  484. static void ipa_mhi_notify_ready(void)
  485. {
  486. IPA_MHI_FUNC_ENTRY();
  487. queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work);
  488. IPA_MHI_FUNC_EXIT();
  489. }
  490. /**
  491. * ipa_mhi_set_state() - Set new state to IPA MHI
  492. * @state: new state
  493. *
  494. * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
  495. * In some state transitions a wakeup request will be triggered.
  496. *
  497. * Returns: 0 on success, -1 otherwise
  498. */
  499. static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
  500. {
  501. unsigned long flags;
  502. int res = -EPERM;
  503. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  504. IPA_MHI_DBG("Current state: %s\n",
  505. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  506. switch (ipa_mhi_client_ctx->state) {
  507. case IPA_MHI_STATE_INITIALIZED:
  508. if (new_state == IPA_MHI_STATE_READY) {
  509. ipa_mhi_notify_ready();
  510. res = 0;
  511. }
  512. break;
  513. case IPA_MHI_STATE_READY:
  514. if (new_state == IPA_MHI_STATE_READY)
  515. res = 0;
  516. if (new_state == IPA_MHI_STATE_STARTED)
  517. res = 0;
  518. break;
  519. case IPA_MHI_STATE_STARTED:
  520. if (new_state == IPA_MHI_STATE_INITIALIZED)
  521. res = 0;
  522. else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
  523. res = 0;
  524. break;
  525. case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
  526. if (new_state == IPA_MHI_STATE_SUSPENDED) {
  527. if (ipa_mhi_client_ctx->trigger_wakeup) {
  528. ipa_mhi_client_ctx->trigger_wakeup = false;
  529. ipa_mhi_notify_wakeup();
  530. }
  531. res = 0;
  532. } else if (new_state == IPA_MHI_STATE_STARTED) {
  533. ipa_mhi_client_ctx->wakeup_notified = false;
  534. ipa_mhi_client_ctx->trigger_wakeup = false;
  535. res = 0;
  536. }
  537. break;
  538. case IPA_MHI_STATE_SUSPENDED:
  539. if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
  540. res = 0;
  541. break;
  542. case IPA_MHI_STATE_RESUME_IN_PROGRESS:
  543. if (new_state == IPA_MHI_STATE_SUSPENDED) {
  544. if (ipa_mhi_client_ctx->trigger_wakeup) {
  545. ipa_mhi_client_ctx->trigger_wakeup = false;
  546. ipa_mhi_notify_wakeup();
  547. }
  548. res = 0;
  549. } else if (new_state == IPA_MHI_STATE_STARTED) {
  550. ipa_mhi_client_ctx->trigger_wakeup = false;
  551. ipa_mhi_client_ctx->wakeup_notified = false;
  552. res = 0;
  553. }
  554. break;
  555. default:
  556. IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state);
  557. WARN_ON(1);
  558. }
  559. if (res)
  560. IPA_MHI_ERR("Invalid state change to %s\n",
  561. MHI_STATE_STR(new_state));
  562. else {
  563. IPA_MHI_DBG("New state change to %s\n",
  564. MHI_STATE_STR(new_state));
  565. ipa_mhi_client_ctx->state = new_state;
  566. }
  567. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  568. return res;
  569. }
  570. static void ipa_mhi_uc_ready_cb(void)
  571. {
  572. IPA_MHI_FUNC_ENTRY();
  573. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  574. IPA_MHI_FUNC_EXIT();
  575. }
  576. static void ipa_mhi_uc_wakeup_request_cb(void)
  577. {
  578. unsigned long flags;
  579. IPA_MHI_FUNC_ENTRY();
  580. IPA_MHI_DBG("MHI state: %s\n",
  581. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  582. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  583. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED)
  584. ipa_mhi_notify_wakeup();
  585. else if (ipa_mhi_client_ctx->state ==
  586. IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
  587. /* wakeup event will be triggered after suspend finishes */
  588. ipa_mhi_client_ctx->trigger_wakeup = true;
  589. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  590. IPA_MHI_FUNC_EXIT();
  591. }
  592. /**
  593. * ipa_mhi_start() - Start IPA MHI engine
  594. * @params: pcie addresses for MHI
  595. *
  596. * This function is called by MHI client driver on MHI engine start for
  597. * handling MHI accelerated channels. This function is called after
  598. * ipa_mhi_init() was called and can be called after MHI reset to restart MHI
  599. * engine. When this function returns device can move to M0 state.
  600. *
  601. * Return codes: 0 : success
  602. * negative : error
  603. */
  604. int ipa_mhi_start(struct ipa_mhi_start_params *params)
  605. {
  606. int res;
  607. struct ipa_mhi_init_engine init_params;
  608. IPA_MHI_FUNC_ENTRY();
  609. if (!params) {
  610. IPA_MHI_ERR("null args\n");
  611. return -EINVAL;
  612. }
  613. if (!ipa_mhi_client_ctx) {
  614. IPA_MHI_ERR("not initialized\n");
  615. return -EPERM;
  616. }
  617. res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  618. if (res) {
  619. IPA_MHI_ERR("ipa_mhi_set_state %d\n", res);
  620. return res;
  621. }
  622. ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr;
  623. ipa_mhi_client_ctx->host_data_addr = params->host_data_addr;
  624. ipa_mhi_client_ctx->channel_context_array_addr =
  625. params->channel_context_array_addr;
  626. ipa_mhi_client_ctx->event_context_array_addr =
  627. params->event_context_array_addr;
  628. IPA_MHI_DBG("host_ctrl_addr 0x%x\n",
  629. ipa_mhi_client_ctx->host_ctrl_addr);
  630. IPA_MHI_DBG("host_data_addr 0x%x\n",
  631. ipa_mhi_client_ctx->host_data_addr);
  632. IPA_MHI_DBG("channel_context_array_addr 0x%llx\n",
  633. ipa_mhi_client_ctx->channel_context_array_addr);
  634. IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
  635. ipa_mhi_client_ctx->event_context_array_addr);
  636. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
  637. if (res) {
  638. IPA_MHI_ERR("failed activate client %d\n", res);
  639. goto fail_pm_activate;
  640. }
  641. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  642. if (res) {
  643. IPA_MHI_ERR("failed activate modem client %d\n", res);
  644. goto fail_pm_activate_modem;
  645. }
  646. /* gsi params */
  647. init_params.gsi.first_ch_idx =
  648. ipa_mhi_client_ctx->first_ch_idx;
  649. /* uC params */
  650. init_params.uC.first_ch_idx =
  651. ipa_mhi_client_ctx->first_ch_idx;
  652. init_params.uC.first_er_idx =
  653. ipa_mhi_client_ctx->first_er_idx;
  654. init_params.uC.host_ctrl_addr = params->host_ctrl_addr;
  655. init_params.uC.host_data_addr = params->host_data_addr;
  656. init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr;
  657. init_params.uC.msi = &ipa_mhi_client_ctx->msi;
  658. init_params.uC.ipa_cached_dl_ul_sync_info =
  659. &ipa_cached_dl_ul_sync_info;
  660. res = ipa_mhi_init_engine(&init_params);
  661. if (res) {
  662. IPA_MHI_ERR("IPA core failed to start MHI %d\n", res);
  663. goto fail_init_engine;
  664. }
  665. IPA_MHI_FUNC_EXIT();
  666. return 0;
  667. fail_init_engine:
  668. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  669. fail_pm_activate_modem:
  670. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  671. fail_pm_activate:
  672. ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
  673. return res;
  674. }
  675. /**
  676. * ipa_mhi_get_channel_context() - Get corresponding channel context
  677. * @ep: IPA ep
  678. * @channel_id: Channel ID
  679. *
  680. * This function will return the corresponding channel context or allocate new
  681. * one in case channel context for channel does not exist.
  682. */
  683. static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context(
  684. enum ipa_client_type client, u8 channel_id)
  685. {
  686. int ch_idx;
  687. struct ipa_mhi_channel_ctx *channels;
  688. int max_channels;
  689. if (IPA_CLIENT_IS_PROD(client)) {
  690. channels = ipa_mhi_client_ctx->ul_channels;
  691. max_channels = IPA_MHI_MAX_UL_CHANNELS;
  692. } else {
  693. channels = ipa_mhi_client_ctx->dl_channels;
  694. max_channels = IPA_MHI_MAX_DL_CHANNELS;
  695. }
  696. /* find the channel context according to channel id */
  697. for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
  698. if (channels[ch_idx].valid &&
  699. channels[ch_idx].id == channel_id)
  700. return &channels[ch_idx];
  701. }
  702. /* channel context does not exists, allocate a new one */
  703. for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
  704. if (!channels[ch_idx].valid)
  705. break;
  706. }
  707. if (ch_idx == max_channels) {
  708. IPA_MHI_ERR("no more channels available\n");
  709. return NULL;
  710. }
  711. channels[ch_idx].valid = true;
  712. channels[ch_idx].id = channel_id;
  713. channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++;
  714. channels[ch_idx].client = client;
  715. channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
  716. return &channels[ch_idx];
  717. }
  718. /**
  719. * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel
  720. * context
  721. * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe()
  722. *
  723. * This function will return the corresponding channel context or NULL in case
  724. * that channel does not exist.
  725. */
  726. static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
  727. u32 clnt_hdl)
  728. {
  729. int ch_idx;
  730. for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
  731. if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid &&
  732. ipa_get_ep_mapping(
  733. ipa_mhi_client_ctx->ul_channels[ch_idx].client)
  734. == clnt_hdl)
  735. return &ipa_mhi_client_ctx->ul_channels[ch_idx];
  736. }
  737. for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
  738. if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid &&
  739. ipa_get_ep_mapping(
  740. ipa_mhi_client_ctx->dl_channels[ch_idx].client)
  741. == clnt_hdl)
  742. return &ipa_mhi_client_ctx->dl_channels[ch_idx];
  743. }
  744. return NULL;
  745. }
  746. static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel)
  747. {
  748. IPA_MHI_DBG("ch_id %d\n", channel->id);
  749. IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate);
  750. IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
  751. IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
  752. IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype);
  753. IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex);
  754. IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
  755. IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen);
  756. IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp);
  757. IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp);
  758. }
  759. static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel)
  760. {
  761. IPA_MHI_DBG("ch_id %d event id %d\n", channel->id,
  762. channel->ch_ctx_host.erindex);
  763. IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc);
  764. IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt);
  765. IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype);
  766. IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec);
  767. IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase);
  768. IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen);
  769. IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp);
  770. IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp);
  771. }
  772. static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel)
  773. {
  774. int res;
  775. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  776. &channel->ch_ctx_host, channel->channel_context_addr,
  777. sizeof(channel->ch_ctx_host));
  778. if (res) {
  779. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  780. return res;
  781. }
  782. ipa_mhi_dump_ch_ctx(channel);
  783. channel->event_context_addr =
  784. ipa_mhi_client_ctx->event_context_array_addr +
  785. channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx);
  786. IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id,
  787. channel->event_context_addr);
  788. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  789. &channel->ev_ctx_host, channel->event_context_addr,
  790. sizeof(channel->ev_ctx_host));
  791. if (res) {
  792. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  793. return res;
  794. }
  795. ipa_mhi_dump_ev_ctx(channel);
  796. return 0;
  797. }
  798. static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify)
  799. {
  800. struct ipa_mhi_channel_ctx *channel = notify->user_data;
  801. IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
  802. channel->id, channel->client, channel->state);
  803. switch (notify->evt_id) {
  804. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  805. IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  806. break;
  807. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  808. IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  809. break;
  810. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  811. IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  812. break;
  813. case GSI_EVT_EVT_RING_EMPTY_ERR:
  814. IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n");
  815. break;
  816. default:
  817. IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
  818. }
  819. IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
  820. ipa_assert();
  821. }
  822. static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
  823. {
  824. struct ipa_mhi_channel_ctx *channel = notify->chan_user_data;
  825. IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
  826. channel->id, channel->client, channel->state);
  827. switch (notify->evt_id) {
  828. case GSI_CHAN_INVALID_TRE_ERR:
  829. IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
  830. break;
  831. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  832. IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  833. break;
  834. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  835. IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  836. break;
  837. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  838. IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  839. break;
  840. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  841. IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  842. break;
  843. case GSI_CHAN_HWO_1_ERR:
  844. IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n");
  845. break;
  846. default:
  847. IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
  848. }
  849. IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
  850. ipa_assert();
  851. }
  852. static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel)
  853. {
  854. IPA_MHI_FUNC_ENTRY();
  855. if (!channel->stop_in_proc) {
  856. IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n");
  857. return true;
  858. }
  859. if (ipa_mhi_stop_gsi_channel(channel->client)) {
  860. channel->stop_in_proc = false;
  861. return true;
  862. }
  863. return false;
  864. }
  865. /**
  866. * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink
  867. * @msecs: timeout to wait
  868. *
  869. * This function will poll until there are no packets pending in uplink channels
  870. * or timeout occurred.
  871. *
  872. * Return code: true - no pending packets in uplink channels
  873. * false - timeout occurred
  874. */
  875. static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
  876. {
  877. unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
  878. unsigned long jiffies_start = jiffies;
  879. bool empty = false;
  880. int i;
  881. IPA_MHI_FUNC_ENTRY();
  882. while (!empty) {
  883. empty = true;
  884. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  885. if (!ipa_mhi_client_ctx->ul_channels[i].valid)
  886. continue;
  887. if (ipa_get_transport_type() ==
  888. IPA_TRANSPORT_TYPE_GSI)
  889. empty &= ipa_mhi_gsi_channel_empty(
  890. &ipa_mhi_client_ctx->ul_channels[i]);
  891. else
  892. empty &= ipa_mhi_sps_channel_empty(
  893. ipa_mhi_client_ctx->ul_channels[i].client);
  894. }
  895. if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
  896. IPA_MHI_DBG("finished waiting for UL empty\n");
  897. break;
  898. }
  899. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI &&
  900. IPA_MHI_MAX_UL_CHANNELS == 1)
  901. usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
  902. IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
  903. }
  904. IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
  905. IPA_MHI_FUNC_EXIT();
  906. return empty;
  907. }
  908. static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
  909. {
  910. struct ipa_enable_force_clear_datapath_req_msg_v01 req;
  911. int i;
  912. int res;
  913. IPA_MHI_FUNC_ENTRY();
  914. memset(&req, 0, sizeof(req));
  915. req.request_id = request_id;
  916. req.source_pipe_bitmask = 0;
  917. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  918. if (!ipa_mhi_client_ctx->ul_channels[i].valid)
  919. continue;
  920. req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping(
  921. ipa_mhi_client_ctx->ul_channels[i].client);
  922. }
  923. if (throttle_source) {
  924. req.throttle_source_valid = 1;
  925. req.throttle_source = 1;
  926. }
  927. IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
  928. req.request_id, req.source_pipe_bitmask,
  929. req.throttle_source);
  930. res = ipa_qmi_enable_force_clear_datapath_send(&req);
  931. if (res) {
  932. IPA_MHI_ERR(
  933. "ipa_qmi_enable_force_clear_datapath_send failed %d\n"
  934. , res);
  935. return res;
  936. }
  937. IPA_MHI_FUNC_EXIT();
  938. return 0;
  939. }
  940. static int ipa_mhi_disable_force_clear(u32 request_id)
  941. {
  942. struct ipa_disable_force_clear_datapath_req_msg_v01 req;
  943. int res;
  944. IPA_MHI_FUNC_ENTRY();
  945. memset(&req, 0, sizeof(req));
  946. req.request_id = request_id;
  947. IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
  948. res = ipa_qmi_disable_force_clear_datapath_send(&req);
  949. if (res) {
  950. IPA_MHI_ERR(
  951. "ipa_qmi_disable_force_clear_datapath_send failed %d\n"
  952. , res);
  953. return res;
  954. }
  955. IPA_MHI_FUNC_EXIT();
  956. return 0;
  957. }
  958. static void ipa_mhi_set_holb_on_dl_channels(bool enable,
  959. struct ipa_ep_cfg_holb old_holb[])
  960. {
  961. int i;
  962. struct ipa_ep_cfg_holb ep_holb;
  963. int ep_idx;
  964. int res;
  965. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  966. if (!ipa_mhi_client_ctx->dl_channels[i].valid)
  967. continue;
  968. if (ipa_mhi_client_ctx->dl_channels[i].state ==
  969. IPA_HW_MHI_CHANNEL_STATE_INVALID)
  970. continue;
  971. ep_idx = ipa_get_ep_mapping(
  972. ipa_mhi_client_ctx->dl_channels[i].client);
  973. if (-1 == ep_idx) {
  974. IPA_MHI_ERR("Client %u is not mapped\n",
  975. ipa_mhi_client_ctx->dl_channels[i].client);
  976. ipa_assert();
  977. return;
  978. }
  979. memset(&ep_holb, 0, sizeof(ep_holb));
  980. if (enable) {
  981. ipa_get_holb(ep_idx, &old_holb[i]);
  982. ep_holb.en = 1;
  983. ep_holb.tmr_val = 0;
  984. } else {
  985. ep_holb = old_holb[i];
  986. }
  987. res = ipa_cfg_ep_holb(ep_idx, &ep_holb);
  988. if (res) {
  989. IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
  990. ipa_assert();
  991. return;
  992. }
  993. }
  994. }
  995. static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel)
  996. {
  997. int clnt_hdl;
  998. int res;
  999. IPA_MHI_FUNC_ENTRY();
  1000. clnt_hdl = ipa_get_ep_mapping(channel->client);
  1001. if (clnt_hdl < 0)
  1002. return -EFAULT;
  1003. res = ipa_stop_gsi_channel(clnt_hdl);
  1004. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1005. res != -GSI_STATUS_TIMED_OUT) {
  1006. IPA_MHI_ERR("GSI stop channel failed %d\n", res);
  1007. return -EFAULT;
  1008. }
  1009. /* check if channel was stopped completely */
  1010. if (res)
  1011. channel->stop_in_proc = true;
  1012. IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ?
  1013. "STOP_IN_PROC" : "STOP");
  1014. IPA_MHI_FUNC_EXIT();
  1015. return 0;
  1016. }
  1017. static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
  1018. {
  1019. int res;
  1020. bool empty;
  1021. struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
  1022. IPA_MHI_FUNC_ENTRY();
  1023. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1024. res = ipa_mhi_suspend_gsi_channel(channel);
  1025. if (res) {
  1026. IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n",
  1027. res);
  1028. return res;
  1029. }
  1030. } else {
  1031. res = ipa_uc_mhi_reset_channel(channel->index);
  1032. if (res) {
  1033. IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
  1034. res);
  1035. return res;
  1036. }
  1037. }
  1038. empty = ipa_mhi_wait_for_ul_empty_timeout(
  1039. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1040. if (!empty) {
  1041. IPA_MHI_DBG("%s not empty\n",
  1042. (ipa_get_transport_type() ==
  1043. IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM");
  1044. res = ipa_mhi_enable_force_clear(
  1045. ipa_mhi_client_ctx->qmi_req_id, false);
  1046. if (res) {
  1047. IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
  1048. res);
  1049. ipa_assert();
  1050. return res;
  1051. }
  1052. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1053. empty = ipa_mhi_wait_for_ul_empty_timeout(
  1054. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1055. IPA_MHI_DBG("empty=%d\n", empty);
  1056. } else {
  1057. /* enable packet drop on all DL channels */
  1058. ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb);
  1059. ipa_generate_tag_process();
  1060. /* disable packet drop on all DL channels */
  1061. ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb);
  1062. res = ipa_disable_sps_pipe(channel->client);
  1063. if (res) {
  1064. IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
  1065. ipa_assert();
  1066. return res;
  1067. }
  1068. }
  1069. res =
  1070. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
  1071. if (res) {
  1072. IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
  1073. res);
  1074. ipa_assert();
  1075. return res;
  1076. }
  1077. ipa_mhi_client_ctx->qmi_req_id++;
  1078. }
  1079. res = ipa_mhi_reset_channel_internal(channel->client);
  1080. if (res) {
  1081. IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n"
  1082. , res);
  1083. return res;
  1084. }
  1085. IPA_MHI_FUNC_EXIT();
  1086. return 0;
  1087. }
  1088. static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
  1089. {
  1090. int res;
  1091. IPA_MHI_FUNC_ENTRY();
  1092. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1093. res = ipa_mhi_suspend_gsi_channel(channel);
  1094. if (res) {
  1095. IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n"
  1096. , res);
  1097. return res;
  1098. }
  1099. res = ipa_mhi_reset_channel_internal(channel->client);
  1100. if (res) {
  1101. IPA_MHI_ERR(
  1102. "ipa_mhi_reset_ul_channel_internal failed %d\n"
  1103. , res);
  1104. return res;
  1105. }
  1106. } else {
  1107. res = ipa_mhi_reset_channel_internal(channel->client);
  1108. if (res) {
  1109. IPA_MHI_ERR(
  1110. "ipa_mhi_reset_ul_channel_internal failed %d\n"
  1111. , res);
  1112. return res;
  1113. }
  1114. res = ipa_uc_mhi_reset_channel(channel->index);
  1115. if (res) {
  1116. IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
  1117. res);
  1118. ipa_mhi_start_channel_internal(channel->client);
  1119. return res;
  1120. }
  1121. }
  1122. IPA_MHI_FUNC_EXIT();
  1123. return 0;
  1124. }
  1125. static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel,
  1126. bool update_state)
  1127. {
  1128. int res;
  1129. IPA_MHI_FUNC_ENTRY();
  1130. if (IPA_CLIENT_IS_PROD(channel->client))
  1131. res = ipa_mhi_reset_ul_channel(channel);
  1132. else
  1133. res = ipa_mhi_reset_dl_channel(channel);
  1134. if (res) {
  1135. IPA_MHI_ERR("failed to reset channel error %d\n", res);
  1136. return res;
  1137. }
  1138. channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
  1139. if ((ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) &&
  1140. update_state) {
  1141. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1142. &channel->state, channel->channel_context_addr +
  1143. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1144. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1145. if (res) {
  1146. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  1147. return res;
  1148. }
  1149. }
  1150. IPA_MHI_FUNC_EXIT();
  1151. return 0;
  1152. }
  1153. /**
  1154. * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
  1155. * MHI channel
  1156. * @in: connect parameters
  1157. * @clnt_hdl: [out] client handle for this pipe
  1158. *
  1159. * This function is called by MHI client driver on MHI channel start.
  1160. * This function is called after MHI engine was started.
  1161. *
  1162. * Return codes: 0 : success
  1163. * negative : error
  1164. */
  1165. int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
  1166. {
  1167. int res;
  1168. unsigned long flags;
  1169. struct ipa_mhi_channel_ctx *channel = NULL;
  1170. IPA_MHI_FUNC_ENTRY();
  1171. if (!in || !clnt_hdl) {
  1172. IPA_MHI_ERR("NULL args\n");
  1173. return -EINVAL;
  1174. }
  1175. if (in->sys.client >= IPA_CLIENT_MAX) {
  1176. IPA_MHI_ERR("bad param client:%d\n", in->sys.client);
  1177. return -EINVAL;
  1178. }
  1179. if (!IPA_CLIENT_IS_MHI(in->sys.client)) {
  1180. IPA_MHI_ERR(
  1181. "Invalid MHI client, client: %d\n", in->sys.client);
  1182. return -EINVAL;
  1183. }
  1184. IPA_MHI_DBG("channel=%d\n", in->channel_id);
  1185. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  1186. if (!ipa_mhi_client_ctx ||
  1187. ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) {
  1188. IPA_MHI_ERR("IPA MHI was not started\n");
  1189. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1190. return -EINVAL;
  1191. }
  1192. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1193. channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id);
  1194. if (!channel) {
  1195. IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
  1196. return -EINVAL;
  1197. }
  1198. if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID &&
  1199. channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
  1200. IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
  1201. return -EFAULT;
  1202. }
  1203. channel->channel_context_addr =
  1204. ipa_mhi_client_ctx->channel_context_array_addr +
  1205. channel->id * sizeof(struct ipa_mhi_ch_ctx);
  1206. /* for event context address index needs to read from host */
  1207. IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n",
  1208. channel->client, channel->index, channel->id, channel->state);
  1209. IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n",
  1210. channel->channel_context_addr,
  1211. channel->cached_gsi_evt_ring_hdl);
  1212. IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
  1213. mutex_lock(&mhi_client_general_mutex);
  1214. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1215. struct ipa_mhi_connect_params_internal internal;
  1216. IPA_MHI_DBG("reading ch/ev context from host\n");
  1217. res = ipa_mhi_read_ch_ctx(channel);
  1218. if (res) {
  1219. IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
  1220. goto fail_start_channel;
  1221. }
  1222. internal.channel_id = in->channel_id;
  1223. internal.sys = &in->sys;
  1224. internal.start.gsi.state = channel->state;
  1225. internal.start.gsi.msi = &ipa_mhi_client_ctx->msi;
  1226. internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host;
  1227. internal.start.gsi.event_context_addr =
  1228. channel->event_context_addr;
  1229. internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host;
  1230. internal.start.gsi.channel_context_addr =
  1231. channel->channel_context_addr;
  1232. internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb;
  1233. internal.start.gsi.channel = (void *)channel;
  1234. internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb;
  1235. internal.start.gsi.assert_bit40 =
  1236. ipa_mhi_client_ctx->assert_bit40;
  1237. internal.start.gsi.mhi = &channel->ch_scratch.mhi;
  1238. internal.start.gsi.cached_gsi_evt_ring_hdl =
  1239. &channel->cached_gsi_evt_ring_hdl;
  1240. internal.start.gsi.evchid = channel->index;
  1241. res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
  1242. if (res) {
  1243. IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
  1244. goto fail_connect_pipe;
  1245. }
  1246. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1247. channel->brstmode_enabled =
  1248. channel->ch_scratch.mhi.burst_mode_enabled;
  1249. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1250. &channel->state, channel->channel_context_addr +
  1251. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1252. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1253. if (res) {
  1254. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1255. mutex_unlock(&mhi_client_general_mutex);
  1256. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1257. return res;
  1258. }
  1259. } else {
  1260. struct ipa_mhi_connect_params_internal internal;
  1261. internal.channel_id = in->channel_id;
  1262. internal.sys = &in->sys;
  1263. internal.start.uC.index = channel->index;
  1264. internal.start.uC.id = channel->id;
  1265. internal.start.uC.state = channel->state;
  1266. res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
  1267. if (res) {
  1268. IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
  1269. goto fail_connect_pipe;
  1270. }
  1271. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1272. }
  1273. mutex_unlock(&mhi_client_general_mutex);
  1274. if (!in->sys.keep_ipa_awake)
  1275. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1276. IPA_MHI_FUNC_EXIT();
  1277. return 0;
  1278. fail_connect_pipe:
  1279. mutex_unlock(&mhi_client_general_mutex);
  1280. ipa_mhi_reset_channel(channel, true);
  1281. fail_start_channel:
  1282. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1283. return -EPERM;
  1284. }
  1285. /**
  1286. * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
  1287. * MHI channel
  1288. * @clnt_hdl: client handle for this pipe
  1289. *
  1290. * This function is called by MHI client driver on MHI channel reset.
  1291. * This function is called after MHI channel was started.
  1292. * This function is doing the following:
  1293. * - Send command to uC/GSI to reset corresponding MHI channel
  1294. * - Configure IPA EP control
  1295. *
  1296. * Return codes: 0 : success
  1297. * negative : error
  1298. */
  1299. int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
  1300. {
  1301. int res;
  1302. enum ipa_client_type client;
  1303. static struct ipa_mhi_channel_ctx *channel;
  1304. IPA_MHI_FUNC_ENTRY();
  1305. if (!ipa_mhi_client_ctx) {
  1306. IPA_MHI_ERR("IPA MHI was not initialized\n");
  1307. return -EINVAL;
  1308. }
  1309. client = ipa_get_client_mapping(clnt_hdl);
  1310. if (!IPA_CLIENT_IS_MHI(client)) {
  1311. IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client);
  1312. return -EINVAL;
  1313. }
  1314. channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
  1315. if (!channel) {
  1316. IPA_MHI_ERR("invalid clnt index\n");
  1317. return -EINVAL;
  1318. }
  1319. IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl));
  1320. res = ipa_mhi_reset_channel(channel, false);
  1321. if (res) {
  1322. IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
  1323. goto fail_reset_channel;
  1324. }
  1325. mutex_lock(&mhi_client_general_mutex);
  1326. res = ipa_disconnect_mhi_pipe(clnt_hdl);
  1327. if (res) {
  1328. IPA_MHI_ERR(
  1329. "IPA core driver failed to disconnect the pipe hdl %d, res %d"
  1330. , clnt_hdl, res);
  1331. goto fail_disconnect_pipe;
  1332. }
  1333. mutex_unlock(&mhi_client_general_mutex);
  1334. IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
  1335. IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
  1336. IPA_MHI_FUNC_EXIT();
  1337. return 0;
  1338. fail_disconnect_pipe:
  1339. mutex_unlock(&mhi_client_general_mutex);
  1340. fail_reset_channel:
  1341. IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
  1342. return res;
  1343. }
  1344. static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels,
  1345. int max_channels)
  1346. {
  1347. int i;
  1348. int res;
  1349. IPA_MHI_FUNC_ENTRY();
  1350. for (i = 0; i < max_channels; i++) {
  1351. if (!channels[i].valid)
  1352. continue;
  1353. if (channels[i].state !=
  1354. IPA_HW_MHI_CHANNEL_STATE_RUN)
  1355. continue;
  1356. IPA_MHI_DBG("suspending channel %d\n",
  1357. channels[i].id);
  1358. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1359. res = ipa_mhi_suspend_gsi_channel(
  1360. &channels[i]);
  1361. else
  1362. res = ipa_uc_mhi_suspend_channel(
  1363. channels[i].index);
  1364. if (res) {
  1365. IPA_MHI_ERR("failed to suspend channel %d error %d\n",
  1366. i, res);
  1367. return res;
  1368. }
  1369. channels[i].state =
  1370. IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
  1371. }
  1372. IPA_MHI_FUNC_EXIT();
  1373. return 0;
  1374. }
  1375. static int ipa_mhi_stop_event_update_channels(
  1376. struct ipa_mhi_channel_ctx *channels, int max_channels)
  1377. {
  1378. int i;
  1379. int res;
  1380. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1381. return 0;
  1382. IPA_MHI_FUNC_ENTRY();
  1383. for (i = 0; i < max_channels; i++) {
  1384. if (!channels[i].valid)
  1385. continue;
  1386. if (channels[i].state !=
  1387. IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
  1388. continue;
  1389. IPA_MHI_DBG("stop update event channel %d\n",
  1390. channels[i].id);
  1391. res = ipa_uc_mhi_stop_event_update_channel(
  1392. channels[i].index);
  1393. if (res) {
  1394. IPA_MHI_ERR("failed stop event channel %d error %d\n",
  1395. i, res);
  1396. return res;
  1397. }
  1398. }
  1399. IPA_MHI_FUNC_EXIT();
  1400. return 0;
  1401. }
  1402. static bool ipa_mhi_check_pending_packets_from_host(void)
  1403. {
  1404. int i;
  1405. int res;
  1406. struct ipa_mhi_channel_ctx *channel;
  1407. IPA_MHI_FUNC_ENTRY();
  1408. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  1409. channel = &ipa_mhi_client_ctx->ul_channels[i];
  1410. if (!channel->valid)
  1411. continue;
  1412. res = ipa_mhi_query_ch_info(channel->client,
  1413. &channel->ch_info);
  1414. if (res) {
  1415. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1416. return true;
  1417. }
  1418. res = ipa_mhi_read_ch_ctx(channel);
  1419. if (res) {
  1420. IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
  1421. return true;
  1422. }
  1423. if (channel->ch_info.rp != channel->ch_ctx_host.wp) {
  1424. IPA_MHI_DBG("There are pending packets from host\n");
  1425. IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n",
  1426. channel->ch_info.rp, channel->ch_ctx_host.wp);
  1427. return true;
  1428. }
  1429. }
  1430. IPA_MHI_FUNC_EXIT();
  1431. return false;
  1432. }
  1433. static int ipa_mhi_resume_channels(bool LPTransitionRejected,
  1434. struct ipa_mhi_channel_ctx *channels, int max_channels)
  1435. {
  1436. int i;
  1437. int res;
  1438. struct ipa_mhi_channel_ctx *channel;
  1439. IPA_MHI_FUNC_ENTRY();
  1440. for (i = 0; i < max_channels; i++) {
  1441. if (!channels[i].valid)
  1442. continue;
  1443. if (channels[i].state !=
  1444. IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
  1445. continue;
  1446. channel = &channels[i];
  1447. IPA_MHI_DBG("resuming channel %d\n", channel->id);
  1448. res = ipa_mhi_resume_channels_internal(channel->client,
  1449. LPTransitionRejected, channel->brstmode_enabled,
  1450. channel->ch_scratch, channel->index);
  1451. if (res) {
  1452. IPA_MHI_ERR("failed to resume channel %d error %d\n",
  1453. i, res);
  1454. return res;
  1455. }
  1456. channel->stop_in_proc = false;
  1457. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1458. }
  1459. IPA_MHI_FUNC_EXIT();
  1460. return 0;
  1461. }
  1462. /**
  1463. * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels
  1464. * @force:
  1465. * false: in case of data pending in IPA, MHI channels will not be
  1466. * suspended and function will fail.
  1467. * true: in case of data pending in IPA, make sure no further access from
  1468. * IPA to PCIe is possible. In this case suspend cannot fail.
  1469. *
  1470. *
  1471. * This function is called by MHI client driver on MHI suspend.
  1472. * This function is called after MHI channel was started.
  1473. * When this function returns device can move to M1/M2/M3/D3cold state.
  1474. *
  1475. * Return codes: 0 : success
  1476. * negative : error
  1477. */
  1478. static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear)
  1479. {
  1480. int res;
  1481. *force_clear = false;
  1482. res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
  1483. IPA_MHI_MAX_UL_CHANNELS);
  1484. if (res) {
  1485. IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
  1486. goto fail_suspend_ul_channel;
  1487. }
  1488. *empty = ipa_mhi_wait_for_ul_empty_timeout(
  1489. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1490. if (!*empty) {
  1491. if (force) {
  1492. res = ipa_mhi_enable_force_clear(
  1493. ipa_mhi_client_ctx->qmi_req_id, false);
  1494. if (res) {
  1495. IPA_MHI_ERR("failed to enable force clear\n");
  1496. ipa_assert();
  1497. return res;
  1498. }
  1499. *force_clear = true;
  1500. IPA_MHI_DBG("force clear datapath enabled\n");
  1501. *empty = ipa_mhi_wait_for_ul_empty_timeout(
  1502. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1503. IPA_MHI_DBG("empty=%d\n", *empty);
  1504. if (!*empty && ipa_get_transport_type()
  1505. == IPA_TRANSPORT_TYPE_GSI) {
  1506. IPA_MHI_ERR("Failed to suspend UL channels\n");
  1507. if (ipa_mhi_client_ctx->test_mode) {
  1508. res = -EAGAIN;
  1509. goto fail_suspend_ul_channel;
  1510. }
  1511. ipa_assert();
  1512. }
  1513. } else {
  1514. IPA_MHI_DBG("IPA not empty\n");
  1515. res = -EAGAIN;
  1516. goto fail_suspend_ul_channel;
  1517. }
  1518. }
  1519. if (*force_clear) {
  1520. res =
  1521. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
  1522. if (res) {
  1523. IPA_MHI_ERR("failed to disable force clear\n");
  1524. ipa_assert();
  1525. return res;
  1526. }
  1527. IPA_MHI_DBG("force clear datapath disabled\n");
  1528. ipa_mhi_client_ctx->qmi_req_id++;
  1529. }
  1530. if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1531. if (ipa_mhi_check_pending_packets_from_host()) {
  1532. res = -EAGAIN;
  1533. goto fail_suspend_ul_channel;
  1534. }
  1535. }
  1536. res = ipa_mhi_stop_event_update_channels(
  1537. ipa_mhi_client_ctx->ul_channels, IPA_MHI_MAX_UL_CHANNELS);
  1538. if (res) {
  1539. IPA_MHI_ERR(
  1540. "ipa_mhi_stop_event_update_ul_channels failed %d\n",
  1541. res);
  1542. goto fail_suspend_ul_channel;
  1543. }
  1544. return 0;
  1545. fail_suspend_ul_channel:
  1546. return res;
  1547. }
  1548. static bool ipa_mhi_has_open_aggr_frame(void)
  1549. {
  1550. struct ipa_mhi_channel_ctx *channel;
  1551. int i;
  1552. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  1553. channel = &ipa_mhi_client_ctx->dl_channels[i];
  1554. if (!channel->valid)
  1555. continue;
  1556. if (ipa_has_open_aggr_frame(channel->client))
  1557. return true;
  1558. }
  1559. return false;
  1560. }
  1561. static void ipa_mhi_update_host_ch_state(bool update_rp)
  1562. {
  1563. int i;
  1564. int res;
  1565. struct ipa_mhi_channel_ctx *channel;
  1566. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  1567. channel = &ipa_mhi_client_ctx->ul_channels[i];
  1568. if (!channel->valid)
  1569. continue;
  1570. if (update_rp) {
  1571. res = ipa_mhi_query_ch_info(channel->client,
  1572. &channel->ch_info);
  1573. if (res) {
  1574. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1575. ipa_assert();
  1576. return;
  1577. }
  1578. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1579. &channel->ch_info.rp,
  1580. channel->channel_context_addr +
  1581. offsetof(struct ipa_mhi_ch_ctx, rp),
  1582. sizeof(channel->ch_info.rp));
  1583. if (res) {
  1584. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1585. ipa_assert();
  1586. return;
  1587. }
  1588. }
  1589. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1590. &channel->state, channel->channel_context_addr +
  1591. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1592. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1593. if (res) {
  1594. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1595. ipa_assert();
  1596. return;
  1597. }
  1598. IPA_MHI_DBG("Updated UL CH=%d state to %s on host\n",
  1599. i, MHI_CH_STATE_STR(channel->state));
  1600. }
  1601. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  1602. channel = &ipa_mhi_client_ctx->dl_channels[i];
  1603. if (!channel->valid)
  1604. continue;
  1605. if (update_rp) {
  1606. res = ipa_mhi_query_ch_info(channel->client,
  1607. &channel->ch_info);
  1608. if (res) {
  1609. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1610. ipa_assert();
  1611. return;
  1612. }
  1613. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1614. &channel->ch_info.rp,
  1615. channel->channel_context_addr +
  1616. offsetof(struct ipa_mhi_ch_ctx, rp),
  1617. sizeof(channel->ch_info.rp));
  1618. if (res) {
  1619. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1620. ipa_assert();
  1621. return;
  1622. }
  1623. }
  1624. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1625. &channel->state, channel->channel_context_addr +
  1626. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1627. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1628. if (res) {
  1629. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1630. ipa_assert();
  1631. return;
  1632. }
  1633. IPA_MHI_DBG("Updated DL CH=%d state to %s on host\n",
  1634. i, MHI_CH_STATE_STR(channel->state));
  1635. }
  1636. }
  1637. static int ipa_mhi_suspend_dl(bool force)
  1638. {
  1639. int res;
  1640. res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
  1641. IPA_MHI_MAX_DL_CHANNELS);
  1642. if (res) {
  1643. IPA_MHI_ERR(
  1644. "ipa_mhi_suspend_channels for dl failed %d\n", res);
  1645. goto fail_suspend_dl_channel;
  1646. }
  1647. res = ipa_mhi_stop_event_update_channels
  1648. (ipa_mhi_client_ctx->dl_channels,
  1649. IPA_MHI_MAX_DL_CHANNELS);
  1650. if (res) {
  1651. IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
  1652. goto fail_stop_event_update_dl_channel;
  1653. }
  1654. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1655. if (ipa_mhi_has_open_aggr_frame()) {
  1656. IPA_MHI_DBG("There is an open aggr frame\n");
  1657. if (force) {
  1658. ipa_mhi_client_ctx->trigger_wakeup = true;
  1659. } else {
  1660. res = -EAGAIN;
  1661. goto fail_stop_event_update_dl_channel;
  1662. }
  1663. }
  1664. }
  1665. return 0;
  1666. fail_stop_event_update_dl_channel:
  1667. ipa_mhi_resume_channels(true,
  1668. ipa_mhi_client_ctx->dl_channels,
  1669. IPA_MHI_MAX_DL_CHANNELS);
  1670. fail_suspend_dl_channel:
  1671. return res;
  1672. }
  1673. /**
  1674. * ipa_mhi_suspend() - Suspend MHI accelerated channels
  1675. * @force:
  1676. * false: in case of data pending in IPA, MHI channels will not be
  1677. * suspended and function will fail.
  1678. * true: in case of data pending in IPA, make sure no further access from
  1679. * IPA to PCIe is possible. In this case suspend cannot fail.
  1680. *
  1681. * This function is called by MHI client driver on MHI suspend.
  1682. * This function is called after MHI channel was started.
  1683. * When this function returns device can move to M1/M2/M3/D3cold state.
  1684. *
  1685. * Return codes: 0 : success
  1686. * negative : error
  1687. */
  1688. int ipa_mhi_suspend(bool force)
  1689. {
  1690. int res;
  1691. bool empty;
  1692. bool force_clear;
  1693. IPA_MHI_FUNC_ENTRY();
  1694. res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
  1695. if (res) {
  1696. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1697. return res;
  1698. }
  1699. res = ipa_mhi_suspend_dl(force);
  1700. if (res) {
  1701. IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res);
  1702. goto fail_suspend_dl_channel;
  1703. }
  1704. usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
  1705. res = ipa_mhi_suspend_ul(force, &empty, &force_clear);
  1706. if (res) {
  1707. IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res);
  1708. goto fail_suspend_ul_channel;
  1709. }
  1710. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1711. ipa_mhi_update_host_ch_state(true);
  1712. /*
  1713. * hold IPA clocks and release them after all
  1714. * IPA PM clients are deactivated to make sure tag process
  1715. * will not start
  1716. */
  1717. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1718. res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  1719. if (res) {
  1720. IPA_MHI_ERR("fail to deactivate client %d\n", res);
  1721. goto fail_deactivate_pm;
  1722. }
  1723. res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1724. if (res) {
  1725. IPA_MHI_ERR("fail to deactivate client %d\n", res);
  1726. goto fail_deactivate_modem_pm;
  1727. }
  1728. usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
  1729. if (!empty)
  1730. ipa_set_tag_process_before_gating(false);
  1731. res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
  1732. if (res) {
  1733. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1734. goto fail_release_cons;
  1735. }
  1736. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1737. IPA_MHI_FUNC_EXIT();
  1738. return 0;
  1739. fail_release_cons:
  1740. ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1741. fail_deactivate_modem_pm:
  1742. ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
  1743. fail_deactivate_pm:
  1744. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1745. fail_suspend_ul_channel:
  1746. ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels,
  1747. IPA_MHI_MAX_UL_CHANNELS);
  1748. if (force_clear) {
  1749. if (
  1750. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) {
  1751. IPA_MHI_ERR("failed to disable force clear\n");
  1752. ipa_assert();
  1753. }
  1754. IPA_MHI_DBG("force clear datapath disabled\n");
  1755. ipa_mhi_client_ctx->qmi_req_id++;
  1756. }
  1757. fail_suspend_dl_channel:
  1758. ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->dl_channels,
  1759. IPA_MHI_MAX_DL_CHANNELS);
  1760. ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  1761. return res;
  1762. }
  1763. /**
  1764. * ipa_mhi_resume() - Resume MHI accelerated channels
  1765. *
  1766. * This function is called by MHI client driver on MHI resume.
  1767. * This function is called after MHI channel was suspended.
  1768. * When this function returns device can move to M0 state.
  1769. * This function is doing the following:
  1770. * - Send command to uC/GSI to resume corresponding MHI channel
  1771. * - Activate PM clients
  1772. * - Resume data to IPA
  1773. *
  1774. * Return codes: 0 : success
  1775. * negative : error
  1776. */
  1777. int ipa_mhi_resume(void)
  1778. {
  1779. int res;
  1780. IPA_MHI_FUNC_ENTRY();
  1781. res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
  1782. if (res) {
  1783. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1784. return res;
  1785. }
  1786. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
  1787. if (res) {
  1788. IPA_MHI_ERR("fail to activate client %d\n", res);
  1789. goto fail_pm_activate;
  1790. }
  1791. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1792. if (res) {
  1793. IPA_MHI_ERR("fail to activate client %d\n", res);
  1794. goto fail_pm_activate_modem;
  1795. }
  1796. /* resume all UL channels */
  1797. res = ipa_mhi_resume_channels(false,
  1798. ipa_mhi_client_ctx->ul_channels,
  1799. IPA_MHI_MAX_UL_CHANNELS);
  1800. if (res) {
  1801. IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
  1802. goto fail_resume_ul_channels;
  1803. }
  1804. res = ipa_mhi_resume_channels(false,
  1805. ipa_mhi_client_ctx->dl_channels,
  1806. IPA_MHI_MAX_DL_CHANNELS);
  1807. if (res) {
  1808. IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
  1809. res);
  1810. goto fail_resume_dl_channels;
  1811. }
  1812. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1813. ipa_mhi_update_host_ch_state(false);
  1814. res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  1815. if (res) {
  1816. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1817. goto fail_set_state;
  1818. }
  1819. IPA_MHI_FUNC_EXIT();
  1820. return 0;
  1821. fail_set_state:
  1822. ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
  1823. IPA_MHI_MAX_DL_CHANNELS);
  1824. fail_resume_dl_channels:
  1825. ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
  1826. IPA_MHI_MAX_UL_CHANNELS);
  1827. fail_resume_ul_channels:
  1828. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1829. fail_pm_activate_modem:
  1830. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  1831. fail_pm_activate:
  1832. ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
  1833. return res;
  1834. }
  1835. static int ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
  1836. int num_of_channels)
  1837. {
  1838. struct ipa_mhi_channel_ctx *channel;
  1839. int i, res;
  1840. u32 clnt_hdl;
  1841. for (i = 0; i < num_of_channels; i++) {
  1842. channel = &channels[i];
  1843. if (!channel->valid)
  1844. continue;
  1845. if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID)
  1846. continue;
  1847. if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
  1848. clnt_hdl = ipa_get_ep_mapping(channel->client);
  1849. IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
  1850. res = ipa_mhi_disconnect_pipe(clnt_hdl);
  1851. if (res) {
  1852. IPA_MHI_ERR(
  1853. "failed to disconnect pipe %d, err %d\n"
  1854. , clnt_hdl, res);
  1855. goto fail;
  1856. }
  1857. }
  1858. res = ipa_mhi_destroy_channel(channel->client);
  1859. if (res) {
  1860. IPA_MHI_ERR(
  1861. "ipa_mhi_destroy_channel failed %d"
  1862. , res);
  1863. goto fail;
  1864. }
  1865. }
  1866. return 0;
  1867. fail:
  1868. return res;
  1869. }
  1870. /**
  1871. * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels
  1872. *
  1873. * This function is called by IPA MHI client driver on MHI reset to destroy all
  1874. * IPA MHI channels.
  1875. */
  1876. int ipa_mhi_destroy_all_channels(void)
  1877. {
  1878. int res;
  1879. IPA_MHI_FUNC_ENTRY();
  1880. /* reset all UL and DL acc channels and its accociated event rings */
  1881. res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels,
  1882. IPA_MHI_MAX_UL_CHANNELS);
  1883. if (res) {
  1884. IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n",
  1885. res);
  1886. return -EPERM;
  1887. }
  1888. IPA_MHI_DBG("All UL channels are disconnected\n");
  1889. res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels,
  1890. IPA_MHI_MAX_DL_CHANNELS);
  1891. if (res) {
  1892. IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n",
  1893. res);
  1894. return -EPERM;
  1895. }
  1896. IPA_MHI_DBG("All DL channels are disconnected\n");
  1897. IPA_MHI_FUNC_EXIT();
  1898. return 0;
  1899. }
  1900. static void ipa_mhi_debugfs_destroy(void)
  1901. {
  1902. debugfs_remove_recursive(dent);
  1903. }
  1904. static void ipa_mhi_deregister_pm(void)
  1905. {
  1906. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  1907. ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
  1908. ipa_mhi_client_ctx->pm_hdl = ~0;
  1909. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1910. ipa_pm_deregister(ipa_mhi_client_ctx->modem_pm_hdl);
  1911. ipa_mhi_client_ctx->modem_pm_hdl = ~0;
  1912. }
  1913. /**
  1914. * ipa_mhi_destroy() - Destroy MHI IPA
  1915. *
  1916. * This function is called by MHI client driver on MHI reset to destroy all IPA
  1917. * MHI resources.
  1918. * When this function returns ipa_mhi can re-initialize.
  1919. */
  1920. void ipa_mhi_destroy(void)
  1921. {
  1922. int res;
  1923. IPA_MHI_FUNC_ENTRY();
  1924. if (!ipa_mhi_client_ctx) {
  1925. IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n");
  1926. return;
  1927. }
  1928. ipa_deregister_client_callback(IPA_CLIENT_MHI_PROD);
  1929. /* reset all UL and DL acc channels and its accociated event rings */
  1930. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1931. res = ipa_mhi_destroy_all_channels();
  1932. if (res) {
  1933. IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n",
  1934. res);
  1935. goto fail;
  1936. }
  1937. }
  1938. IPA_MHI_DBG("All channels are disconnected\n");
  1939. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) {
  1940. IPA_MHI_DBG("cleanup uC MHI\n");
  1941. ipa_uc_mhi_cleanup();
  1942. }
  1943. ipa_mhi_deregister_pm();
  1944. ipa_dma_destroy();
  1945. ipa_mhi_debugfs_destroy();
  1946. destroy_workqueue(ipa_mhi_client_ctx->wq);
  1947. kfree(ipa_mhi_client_ctx);
  1948. ipa_mhi_client_ctx = NULL;
  1949. IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n");
  1950. IPA_MHI_FUNC_EXIT();
  1951. return;
  1952. fail:
  1953. ipa_assert();
  1954. }
  1955. static void ipa_mhi_pm_cb(void *p, enum ipa_pm_cb_event event)
  1956. {
  1957. unsigned long flags;
  1958. IPA_MHI_FUNC_ENTRY();
  1959. if (event != IPA_PM_REQUEST_WAKEUP) {
  1960. IPA_MHI_ERR("Unexpected event %d\n", event);
  1961. WARN_ON(1);
  1962. return;
  1963. }
  1964. IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
  1965. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  1966. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
  1967. ipa_mhi_notify_wakeup();
  1968. } else if (ipa_mhi_client_ctx->state ==
  1969. IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
  1970. /* wakeup event will be trigger after suspend finishes */
  1971. ipa_mhi_client_ctx->trigger_wakeup = true;
  1972. }
  1973. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1974. IPA_MHI_DBG("EXIT");
  1975. }
  1976. static int ipa_mhi_register_pm(void)
  1977. {
  1978. int res;
  1979. struct ipa_pm_register_params params;
  1980. memset(&params, 0, sizeof(params));
  1981. params.name = "MHI";
  1982. params.callback = ipa_mhi_pm_cb;
  1983. params.group = IPA_PM_GROUP_DEFAULT;
  1984. res = ipa_pm_register(&params, &ipa_mhi_client_ctx->pm_hdl);
  1985. if (res) {
  1986. IPA_MHI_ERR("fail to register with PM %d\n", res);
  1987. return res;
  1988. }
  1989. res = ipa_pm_associate_ipa_cons_to_client(ipa_mhi_client_ctx->pm_hdl,
  1990. IPA_CLIENT_MHI_CONS);
  1991. if (res) {
  1992. IPA_MHI_ERR("fail to associate cons with PM %d\n", res);
  1993. goto fail_pm_cons;
  1994. }
  1995. res = ipa_pm_set_throughput(ipa_mhi_client_ctx->pm_hdl, 1000);
  1996. if (res) {
  1997. IPA_MHI_ERR("fail to set perf profile to PM %d\n", res);
  1998. goto fail_pm_cons;
  1999. }
  2000. /* create a modem client for clock scaling */
  2001. memset(&params, 0, sizeof(params));
  2002. params.name = "MODEM (MHI)";
  2003. params.group = IPA_PM_GROUP_MODEM;
  2004. params.skip_clk_vote = true;
  2005. res = ipa_pm_register(&params, &ipa_mhi_client_ctx->modem_pm_hdl);
  2006. if (res) {
  2007. IPA_MHI_ERR("fail to register with PM %d\n", res);
  2008. goto fail_pm_cons;
  2009. }
  2010. return 0;
  2011. fail_pm_cons:
  2012. ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
  2013. ipa_mhi_client_ctx->pm_hdl = ~0;
  2014. return res;
  2015. }
  2016. /**
  2017. * ipa_mhi_init() - Initialize IPA MHI driver
  2018. * @params: initialization params
  2019. *
  2020. * This function is called by MHI client driver on boot to initialize IPA MHI
  2021. * Driver. When this function returns device can move to READY state.
  2022. * This function is doing the following:
  2023. * - Initialize MHI IPA internal data structures
  2024. * - Register with PM
  2025. * - Initialize debugfs
  2026. *
  2027. * Return codes: 0 : success
  2028. * negative : error
  2029. */
  2030. int ipa_mhi_init(struct ipa_mhi_init_params *params)
  2031. {
  2032. int res;
  2033. IPA_MHI_FUNC_ENTRY();
  2034. if (!params) {
  2035. IPA_MHI_ERR("null args\n");
  2036. return -EINVAL;
  2037. }
  2038. if (!params->notify) {
  2039. IPA_MHI_ERR("null notify function\n");
  2040. return -EINVAL;
  2041. }
  2042. if (ipa_mhi_client_ctx) {
  2043. IPA_MHI_ERR("already initialized\n");
  2044. return -EPERM;
  2045. }
  2046. IPA_MHI_DBG("notify = %pS priv = %pK\n", params->notify, params->priv);
  2047. IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
  2048. params->msi.addr_low, params->msi.addr_hi);
  2049. IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
  2050. params->msi.data, params->msi.mask);
  2051. IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
  2052. IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
  2053. IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
  2054. IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40);
  2055. IPA_MHI_DBG("test_mode=%d\n", params->test_mode);
  2056. /* Initialize context */
  2057. ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL);
  2058. if (!ipa_mhi_client_ctx) {
  2059. res = -EFAULT;
  2060. goto fail_alloc_ctx;
  2061. }
  2062. ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
  2063. ipa_mhi_client_ctx->cb_notify = params->notify;
  2064. ipa_mhi_client_ctx->cb_priv = params->priv;
  2065. spin_lock_init(&ipa_mhi_client_ctx->state_lock);
  2066. ipa_mhi_client_ctx->msi = params->msi;
  2067. ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
  2068. ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
  2069. ipa_mhi_client_ctx->first_er_idx = params->first_er_idx;
  2070. ipa_mhi_client_ctx->qmi_req_id = 0;
  2071. ipa_mhi_client_ctx->use_ipadma = true;
  2072. ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40;
  2073. ipa_mhi_client_ctx->test_mode = params->test_mode;
  2074. ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
  2075. if (!ipa_mhi_client_ctx->wq) {
  2076. IPA_MHI_ERR("failed to create workqueue\n");
  2077. res = -EFAULT;
  2078. goto fail_create_wq;
  2079. }
  2080. res = ipa_dma_init();
  2081. if (res) {
  2082. IPA_MHI_ERR("failed to init ipa dma %d\n", res);
  2083. goto fail_dma_init;
  2084. }
  2085. res = ipa_mhi_register_pm();
  2086. if (res) {
  2087. IPA_MHI_ERR("failed to create PM resources\n");
  2088. res = -EFAULT;
  2089. goto fail_pm;
  2090. }
  2091. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  2092. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  2093. } else {
  2094. /* Initialize uC interface */
  2095. ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
  2096. ipa_mhi_uc_wakeup_request_cb);
  2097. if (ipa_uc_state_check() == 0)
  2098. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  2099. }
  2100. ipa_register_client_callback(&ipa_mhi_set_lock_unlock, NULL,
  2101. IPA_CLIENT_MHI_PROD);
  2102. /* Initialize debugfs */
  2103. ipa_mhi_debugfs_init();
  2104. IPA_MHI_FUNC_EXIT();
  2105. return 0;
  2106. fail_pm:
  2107. ipa_dma_destroy();
  2108. fail_dma_init:
  2109. destroy_workqueue(ipa_mhi_client_ctx->wq);
  2110. fail_create_wq:
  2111. kfree(ipa_mhi_client_ctx);
  2112. ipa_mhi_client_ctx = NULL;
  2113. fail_alloc_ctx:
  2114. return res;
  2115. }
  2116. static void ipa_mhi_cache_dl_ul_sync_info(
  2117. struct ipa_config_req_msg_v01 *config_req)
  2118. {
  2119. ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
  2120. ipa_cached_dl_ul_sync_info.params.UlAccmVal =
  2121. (config_req->ul_accumulation_time_limit_valid) ?
  2122. config_req->ul_accumulation_time_limit : 0;
  2123. ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold =
  2124. (config_req->ul_msi_event_threshold_valid) ?
  2125. config_req->ul_msi_event_threshold : 0;
  2126. ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold =
  2127. (config_req->dl_msi_event_threshold_valid) ?
  2128. config_req->dl_msi_event_threshold : 0;
  2129. }
  2130. /**
  2131. * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
  2132. *
  2133. * This function is called by by IPA QMI service to indicate that IPA CONFIG
  2134. * message was sent from modem. IPA MHI will update this information to IPA uC
  2135. * or will cache it until IPA MHI will be initialized.
  2136. *
  2137. * Return codes: 0 : success
  2138. * negative : error
  2139. */
  2140. int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
  2141. {
  2142. IPA_MHI_FUNC_ENTRY();
  2143. if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) {
  2144. ipa_mhi_cache_dl_ul_sync_info(config_req);
  2145. if (ipa_mhi_client_ctx &&
  2146. ipa_mhi_client_ctx->state !=
  2147. IPA_MHI_STATE_INITIALIZED)
  2148. ipa_uc_mhi_send_dl_ul_sync_info(
  2149. &ipa_cached_dl_ul_sync_info);
  2150. }
  2151. IPA_MHI_FUNC_EXIT();
  2152. return 0;
  2153. }
  2154. EXPORT_SYMBOL(ipa_mhi_handle_ipa_config_req);
  2155. int ipa_mhi_is_using_dma(bool *flag)
  2156. {
  2157. IPA_MHI_FUNC_ENTRY();
  2158. if (!ipa_mhi_client_ctx) {
  2159. IPA_MHI_ERR("not initialized\n");
  2160. return -EPERM;
  2161. }
  2162. *flag = ipa_mhi_client_ctx->use_ipadma ? true : false;
  2163. IPA_MHI_FUNC_EXIT();
  2164. return 0;
  2165. }
  2166. EXPORT_SYMBOL(ipa_mhi_is_using_dma);
  2167. const char *ipa_mhi_get_state_str(int state)
  2168. {
  2169. return MHI_STATE_STR(state);
  2170. }
  2171. EXPORT_SYMBOL(ipa_mhi_get_state_str);
  2172. MODULE_LICENSE("GPL v2");
  2173. MODULE_DESCRIPTION("IPA MHI client driver");