ipa_mhi_client.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/export.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mutex.h>
  10. #include <linux/ipa.h>
  11. #include <linux/msm_gsi.h>
  12. #include <linux/ipa_qmi_service_v01.h>
  13. #include <linux/ipa_mhi.h>
  14. #include "../ipa_common_i.h"
  15. #include "../ipa_v3/ipa_pm.h"
  16. #define IPA_MHI_DRV_NAME "ipa_mhi_client"
  17. #define IPA_MHI_DBG(fmt, args...) \
  18. do { \
  19. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  20. __func__, __LINE__, ## args); \
  21. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  22. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  23. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  24. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  25. } while (0)
  26. #define IPA_MHI_DBG_LOW(fmt, args...) \
  27. do { \
  28. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  29. __func__, __LINE__, ## args); \
  30. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  31. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  32. } while (0)
  33. #define IPA_MHI_ERR(fmt, args...) \
  34. do { \
  35. pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  36. __func__, __LINE__, ## args); \
  37. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  38. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  39. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  40. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  41. } while (0)
  42. #define IPA_MHI_FUNC_ENTRY() \
  43. IPA_MHI_DBG("ENTRY\n")
  44. #define IPA_MHI_FUNC_EXIT() \
  45. IPA_MHI_DBG("EXIT\n")
  46. #define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
  47. #define IPA_MHI_SUSPEND_SLEEP_MIN 900
  48. #define IPA_MHI_SUSPEND_SLEEP_MAX 1100
  49. #define IPA_MHI_MAX_UL_CHANNELS 1
  50. #define IPA_MHI_MAX_DL_CHANNELS 2
  51. /* bit #40 in address should be asserted for MHI transfers over pcie */
  52. #define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
  53. ((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
  54. enum ipa_mhi_state {
  55. IPA_MHI_STATE_INITIALIZED,
  56. IPA_MHI_STATE_READY,
  57. IPA_MHI_STATE_STARTED,
  58. IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
  59. IPA_MHI_STATE_SUSPENDED,
  60. IPA_MHI_STATE_RESUME_IN_PROGRESS,
  61. IPA_MHI_STATE_MAX
  62. };
  63. static char *ipa_mhi_state_str[] = {
  64. __stringify(IPA_MHI_STATE_INITIALIZED),
  65. __stringify(IPA_MHI_STATE_READY),
  66. __stringify(IPA_MHI_STATE_STARTED),
  67. __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
  68. __stringify(IPA_MHI_STATE_SUSPENDED),
  69. __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
  70. };
  71. #define MHI_STATE_STR(state) \
  72. (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
  73. ipa_mhi_state_str[(state)] : \
  74. "INVALID")
  75. enum ipa_mhi_dma_dir {
  76. IPA_MHI_DMA_TO_HOST,
  77. IPA_MHI_DMA_FROM_HOST,
  78. };
  79. /**
  80. * struct ipa_mhi_channel_ctx - MHI Channel context
  81. * @valid: entry is valid
  82. * @id: MHI channel ID
  83. * @hdl: channel handle for uC
  84. * @client: IPA Client
  85. * @state: Channel state
  86. */
  87. struct ipa_mhi_channel_ctx {
  88. bool valid;
  89. u8 id;
  90. u8 index;
  91. enum ipa_client_type client;
  92. enum ipa_hw_mhi_channel_states state;
  93. bool stop_in_proc;
  94. struct gsi_chan_info ch_info;
  95. u64 channel_context_addr;
  96. struct ipa_mhi_ch_ctx ch_ctx_host;
  97. u64 event_context_addr;
  98. struct ipa_mhi_ev_ctx ev_ctx_host;
  99. bool brstmode_enabled;
  100. union __packed gsi_channel_scratch ch_scratch;
  101. unsigned long cached_gsi_evt_ring_hdl;
  102. };
  103. struct ipa_mhi_client_ctx {
  104. enum ipa_mhi_state state;
  105. spinlock_t state_lock;
  106. mhi_client_cb cb_notify;
  107. void *cb_priv;
  108. bool trigger_wakeup;
  109. bool wakeup_notified;
  110. struct workqueue_struct *wq;
  111. struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
  112. struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
  113. u32 total_channels;
  114. struct ipa_mhi_msi_info msi;
  115. u32 mmio_addr;
  116. u32 first_ch_idx;
  117. u32 first_er_idx;
  118. u32 host_ctrl_addr;
  119. u32 host_data_addr;
  120. u64 channel_context_array_addr;
  121. u64 event_context_array_addr;
  122. u32 qmi_req_id;
  123. u32 use_ipadma;
  124. bool assert_bit40;
  125. bool test_mode;
  126. u32 pm_hdl;
  127. u32 modem_pm_hdl;
  128. };
  129. static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx;
  130. static DEFINE_MUTEX(mhi_client_general_mutex);
  131. #ifdef CONFIG_DEBUG_FS
  132. #define IPA_MHI_MAX_MSG_LEN 512
  133. static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
  134. static struct dentry *dent;
  135. static char *ipa_mhi_channel_state_str[] = {
  136. __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
  137. __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
  138. __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
  139. __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
  140. __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
  141. __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
  142. };
  143. #define MHI_CH_STATE_STR(state) \
  144. (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
  145. ipa_mhi_channel_state_str[(state)] : \
  146. "INVALID")
  147. static int ipa_mhi_set_lock_unlock(bool is_lock)
  148. {
  149. IPA_MHI_DBG("entry\n");
  150. if (is_lock)
  151. mutex_lock(&mhi_client_general_mutex);
  152. else
  153. mutex_unlock(&mhi_client_general_mutex);
  154. IPA_MHI_DBG("exit\n");
  155. return 0;
  156. }
  157. static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
  158. u64 host_addr, int size)
  159. {
  160. struct ipa_mem_buffer mem;
  161. int res;
  162. struct device *pdev;
  163. IPA_MHI_FUNC_ENTRY();
  164. if (ipa_mhi_client_ctx->use_ipadma) {
  165. pdev = ipa_get_dma_dev();
  166. host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr);
  167. mem.size = size;
  168. mem.base = dma_alloc_coherent(pdev, mem.size,
  169. &mem.phys_base, GFP_KERNEL);
  170. if (!mem.base) {
  171. IPA_MHI_ERR(
  172. "dma_alloc_coherent failed, DMA buff size %d\n"
  173. , mem.size);
  174. return -ENOMEM;
  175. }
  176. res = ipa_dma_enable();
  177. if (res) {
  178. IPA_MHI_ERR("failed to enable IPA DMA rc=%d\n", res);
  179. goto fail_dma_enable;
  180. }
  181. if (dir == IPA_MHI_DMA_FROM_HOST) {
  182. res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
  183. size);
  184. if (res) {
  185. IPA_MHI_ERR(
  186. "ipa_dma_sync_memcpy from host fail%d\n"
  187. , res);
  188. goto fail_memcopy;
  189. }
  190. memcpy(dev_addr, mem.base, size);
  191. } else {
  192. memcpy(mem.base, dev_addr, size);
  193. res = ipa_dma_sync_memcpy(host_addr, mem.phys_base,
  194. size);
  195. if (res) {
  196. IPA_MHI_ERR(
  197. "ipa_dma_sync_memcpy to host fail %d\n"
  198. , res);
  199. goto fail_memcopy;
  200. }
  201. }
  202. goto dma_succeed;
  203. } else {
  204. void *host_ptr;
  205. if (!ipa_mhi_client_ctx->test_mode)
  206. host_ptr = ioremap(host_addr, size);
  207. else
  208. host_ptr = phys_to_virt(host_addr);
  209. if (!host_ptr) {
  210. IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr);
  211. return -EFAULT;
  212. }
  213. if (dir == IPA_MHI_DMA_FROM_HOST)
  214. memcpy(dev_addr, host_ptr, size);
  215. else
  216. memcpy(host_ptr, dev_addr, size);
  217. if (!ipa_mhi_client_ctx->test_mode)
  218. iounmap(host_ptr);
  219. }
  220. IPA_MHI_FUNC_EXIT();
  221. return 0;
  222. dma_succeed:
  223. IPA_MHI_FUNC_EXIT();
  224. res = 0;
  225. fail_memcopy:
  226. if (ipa_dma_disable())
  227. IPA_MHI_ERR("failed to disable IPA DMA\n");
  228. fail_dma_enable:
  229. dma_free_coherent(pdev, mem.size, mem.base, mem.phys_base);
  230. return res;
  231. }
  232. static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel,
  233. char *buff, int len)
  234. {
  235. int nbytes = 0;
  236. if (channel->valid) {
  237. nbytes += scnprintf(&buff[nbytes],
  238. len - nbytes,
  239. "channel idx=%d ch_id=%d client=%d state=%s\n",
  240. channel->index, channel->id, channel->client,
  241. MHI_CH_STATE_STR(channel->state));
  242. nbytes += scnprintf(&buff[nbytes],
  243. len - nbytes,
  244. " ch_ctx=%llx\n",
  245. channel->channel_context_addr);
  246. nbytes += scnprintf(&buff[nbytes],
  247. len - nbytes,
  248. " gsi_evt_ring_hdl=%ld ev_ctx=%llx\n",
  249. channel->cached_gsi_evt_ring_hdl,
  250. channel->event_context_addr);
  251. }
  252. return nbytes;
  253. }
  254. static int ipa_mhi_print_host_channel_ctx_info(
  255. struct ipa_mhi_channel_ctx *channel, char *buff, int len)
  256. {
  257. int res, nbytes = 0;
  258. struct ipa_mhi_ch_ctx ch_ctx_host;
  259. memset(&ch_ctx_host, 0, sizeof(ch_ctx_host));
  260. /* reading ch context from host */
  261. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  262. &ch_ctx_host, channel->channel_context_addr,
  263. sizeof(ch_ctx_host));
  264. if (res) {
  265. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  266. "Failed to read from host %d\n", res);
  267. return nbytes;
  268. }
  269. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  270. "ch_id: %d\n", channel->id);
  271. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  272. "chstate: 0x%x\n", ch_ctx_host.chstate);
  273. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  274. "brstmode: 0x%x\n", ch_ctx_host.brstmode);
  275. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  276. "chtype: 0x%x\n", ch_ctx_host.chtype);
  277. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  278. "erindex: 0x%x\n", ch_ctx_host.erindex);
  279. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  280. "rbase: 0x%llx\n", ch_ctx_host.rbase);
  281. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  282. "rlen: 0x%llx\n", ch_ctx_host.rlen);
  283. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  284. "rp: 0x%llx\n", ch_ctx_host.rp);
  285. nbytes += scnprintf(&buff[nbytes], len - nbytes,
  286. "wp: 0x%llx\n", ch_ctx_host.wp);
  287. return nbytes;
  288. }
  289. static ssize_t ipa_mhi_debugfs_stats(struct file *file,
  290. char __user *ubuf,
  291. size_t count,
  292. loff_t *ppos)
  293. {
  294. int nbytes = 0;
  295. int i;
  296. struct ipa_mhi_channel_ctx *channel;
  297. nbytes += scnprintf(&dbg_buff[nbytes],
  298. IPA_MHI_MAX_MSG_LEN - nbytes,
  299. "IPA MHI state: %s\n",
  300. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  301. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  302. channel = &ipa_mhi_client_ctx->ul_channels[i];
  303. nbytes += ipa_mhi_print_channel_info(channel,
  304. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  305. }
  306. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  307. channel = &ipa_mhi_client_ctx->dl_channels[i];
  308. nbytes += ipa_mhi_print_channel_info(channel,
  309. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  310. }
  311. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  312. }
  313. static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file,
  314. char __user *ubuf,
  315. size_t count,
  316. loff_t *ppos)
  317. {
  318. int nbytes = 0;
  319. nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
  320. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  321. }
  322. static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file,
  323. char __user *ubuf,
  324. size_t count,
  325. loff_t *ppos)
  326. {
  327. int i, nbytes = 0;
  328. struct ipa_mhi_channel_ctx *channel;
  329. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED ||
  330. ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) {
  331. nbytes += scnprintf(&dbg_buff[nbytes],
  332. IPA_MHI_MAX_MSG_LEN - nbytes,
  333. "Cannot dump host channel context ");
  334. nbytes += scnprintf(&dbg_buff[nbytes],
  335. IPA_MHI_MAX_MSG_LEN - nbytes,
  336. "before IPA MHI was STARTED\n");
  337. return simple_read_from_buffer(ubuf, count, ppos,
  338. dbg_buff, nbytes);
  339. }
  340. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
  341. nbytes += scnprintf(&dbg_buff[nbytes],
  342. IPA_MHI_MAX_MSG_LEN - nbytes,
  343. "IPA MHI is suspended, cannot dump channel ctx array");
  344. nbytes += scnprintf(&dbg_buff[nbytes],
  345. IPA_MHI_MAX_MSG_LEN - nbytes,
  346. " from host -PCIe can be in D3 state\n");
  347. return simple_read_from_buffer(ubuf, count, ppos,
  348. dbg_buff, nbytes);
  349. }
  350. nbytes += scnprintf(&dbg_buff[nbytes],
  351. IPA_MHI_MAX_MSG_LEN - nbytes,
  352. "channel contex array - dump from host\n");
  353. nbytes += scnprintf(&dbg_buff[nbytes],
  354. IPA_MHI_MAX_MSG_LEN - nbytes,
  355. "***** UL channels *******\n");
  356. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  357. channel = &ipa_mhi_client_ctx->ul_channels[i];
  358. if (!channel->valid)
  359. continue;
  360. nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
  361. &dbg_buff[nbytes],
  362. IPA_MHI_MAX_MSG_LEN - nbytes);
  363. }
  364. nbytes += scnprintf(&dbg_buff[nbytes],
  365. IPA_MHI_MAX_MSG_LEN - nbytes,
  366. "\n***** DL channels *******\n");
  367. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  368. channel = &ipa_mhi_client_ctx->dl_channels[i];
  369. if (!channel->valid)
  370. continue;
  371. nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
  372. &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
  373. }
  374. return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
  375. }
  376. const struct file_operations ipa_mhi_stats_ops = {
  377. .read = ipa_mhi_debugfs_stats,
  378. };
  379. const struct file_operations ipa_mhi_uc_stats_ops = {
  380. .read = ipa_mhi_debugfs_uc_stats,
  381. };
  382. const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = {
  383. .read = ipa_mhi_debugfs_dump_host_ch_ctx_arr,
  384. };
  385. static void ipa_mhi_debugfs_init(void)
  386. {
  387. const mode_t read_only_mode = 0444;
  388. const mode_t read_write_mode = 0664;
  389. struct dentry *file;
  390. IPA_MHI_FUNC_ENTRY();
  391. dent = debugfs_create_dir("ipa_mhi", 0);
  392. if (IS_ERR(dent)) {
  393. IPA_MHI_ERR("fail to create folder ipa_mhi\n");
  394. return;
  395. }
  396. file = debugfs_create_file("stats", read_only_mode, dent,
  397. 0, &ipa_mhi_stats_ops);
  398. if (!file || IS_ERR(file)) {
  399. IPA_MHI_ERR("fail to create file stats\n");
  400. goto fail;
  401. }
  402. file = debugfs_create_file("uc_stats", read_only_mode, dent,
  403. 0, &ipa_mhi_uc_stats_ops);
  404. if (!file || IS_ERR(file)) {
  405. IPA_MHI_ERR("fail to create file uc_stats\n");
  406. goto fail;
  407. }
  408. file = debugfs_create_u32("use_ipadma", read_write_mode, dent,
  409. &ipa_mhi_client_ctx->use_ipadma);
  410. if (!file || IS_ERR(file)) {
  411. IPA_MHI_ERR("fail to create file use_ipadma\n");
  412. goto fail;
  413. }
  414. file = debugfs_create_file("dump_host_channel_ctx_array",
  415. read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops);
  416. if (!file || IS_ERR(file)) {
  417. IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n");
  418. goto fail;
  419. }
  420. IPA_MHI_FUNC_EXIT();
  421. return;
  422. fail:
  423. debugfs_remove_recursive(dent);
  424. }
  425. #else
  426. static void ipa_mhi_debugfs_init(void) {}
  427. static void ipa_mhi_debugfs_destroy(void) {}
  428. #endif /* CONFIG_DEBUG_FS */
  429. static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info;
  430. static void ipa_mhi_wq_notify_wakeup(struct work_struct *work);
  431. static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup);
  432. static void ipa_mhi_wq_notify_ready(struct work_struct *work);
  433. static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready);
  434. /**
  435. * ipa_mhi_notify_wakeup() - Schedule work to notify data available
  436. *
  437. * This function will schedule a work to notify data available event.
  438. * In case this function is called more than once, only one notification will
  439. * be sent to MHI client driver. No further notifications will be sent until
  440. * IPA MHI state will become STARTED.
  441. */
  442. static void ipa_mhi_notify_wakeup(void)
  443. {
  444. IPA_MHI_FUNC_ENTRY();
  445. if (ipa_mhi_client_ctx->wakeup_notified) {
  446. IPA_MHI_DBG("wakeup already called\n");
  447. return;
  448. }
  449. queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work);
  450. ipa_mhi_client_ctx->wakeup_notified = true;
  451. IPA_MHI_FUNC_EXIT();
  452. }
  453. /**
  454. * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
  455. *
  456. * This function is called from IPA MHI workqueue to notify
  457. * MHI client driver on data available event.
  458. */
  459. static void ipa_mhi_wq_notify_wakeup(struct work_struct *work)
  460. {
  461. IPA_MHI_FUNC_ENTRY();
  462. ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
  463. IPA_MHI_EVENT_DATA_AVAILABLE, 0);
  464. IPA_MHI_FUNC_EXIT();
  465. }
  466. /**
  467. * ipa_mhi_wq_notify_ready() - Notify MHI client on ready
  468. *
  469. * This function is called from IPA MHI workqueue to notify
  470. * MHI client driver on ready event when IPA uC is loaded
  471. */
  472. static void ipa_mhi_wq_notify_ready(struct work_struct *work)
  473. {
  474. IPA_MHI_FUNC_ENTRY();
  475. ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
  476. IPA_MHI_EVENT_READY, 0);
  477. IPA_MHI_FUNC_EXIT();
  478. }
  479. /**
  480. * ipa_mhi_notify_ready() - Schedule work to notify ready
  481. *
  482. * This function will schedule a work to notify ready event.
  483. */
  484. static void ipa_mhi_notify_ready(void)
  485. {
  486. IPA_MHI_FUNC_ENTRY();
  487. queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work);
  488. IPA_MHI_FUNC_EXIT();
  489. }
  490. /**
  491. * ipa_mhi_set_state() - Set new state to IPA MHI
  492. * @state: new state
  493. *
  494. * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
  495. * In some state transitions a wakeup request will be triggered.
  496. *
  497. * Returns: 0 on success, -1 otherwise
  498. */
  499. static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
  500. {
  501. unsigned long flags;
  502. int res = -EPERM;
  503. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  504. IPA_MHI_DBG("Current state: %s\n",
  505. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  506. switch (ipa_mhi_client_ctx->state) {
  507. case IPA_MHI_STATE_INITIALIZED:
  508. if (new_state == IPA_MHI_STATE_READY) {
  509. ipa_mhi_notify_ready();
  510. res = 0;
  511. }
  512. break;
  513. case IPA_MHI_STATE_READY:
  514. if (new_state == IPA_MHI_STATE_READY)
  515. res = 0;
  516. if (new_state == IPA_MHI_STATE_STARTED)
  517. res = 0;
  518. break;
  519. case IPA_MHI_STATE_STARTED:
  520. if (new_state == IPA_MHI_STATE_INITIALIZED)
  521. res = 0;
  522. else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
  523. res = 0;
  524. break;
  525. case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
  526. if (new_state == IPA_MHI_STATE_SUSPENDED) {
  527. if (ipa_mhi_client_ctx->trigger_wakeup) {
  528. ipa_mhi_client_ctx->trigger_wakeup = false;
  529. ipa_mhi_notify_wakeup();
  530. }
  531. res = 0;
  532. } else if (new_state == IPA_MHI_STATE_STARTED) {
  533. ipa_mhi_client_ctx->wakeup_notified = false;
  534. ipa_mhi_client_ctx->trigger_wakeup = false;
  535. res = 0;
  536. }
  537. break;
  538. case IPA_MHI_STATE_SUSPENDED:
  539. if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
  540. res = 0;
  541. break;
  542. case IPA_MHI_STATE_RESUME_IN_PROGRESS:
  543. if (new_state == IPA_MHI_STATE_SUSPENDED) {
  544. if (ipa_mhi_client_ctx->trigger_wakeup) {
  545. ipa_mhi_client_ctx->trigger_wakeup = false;
  546. ipa_mhi_notify_wakeup();
  547. }
  548. res = 0;
  549. } else if (new_state == IPA_MHI_STATE_STARTED) {
  550. ipa_mhi_client_ctx->trigger_wakeup = false;
  551. ipa_mhi_client_ctx->wakeup_notified = false;
  552. res = 0;
  553. }
  554. break;
  555. default:
  556. IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state);
  557. WARN_ON(1);
  558. }
  559. if (res)
  560. IPA_MHI_ERR("Invalid state change to %s\n",
  561. MHI_STATE_STR(new_state));
  562. else {
  563. IPA_MHI_DBG("New state change to %s\n",
  564. MHI_STATE_STR(new_state));
  565. ipa_mhi_client_ctx->state = new_state;
  566. }
  567. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  568. return res;
  569. }
  570. static void ipa_mhi_uc_ready_cb(void)
  571. {
  572. IPA_MHI_FUNC_ENTRY();
  573. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  574. IPA_MHI_FUNC_EXIT();
  575. }
  576. static void ipa_mhi_uc_wakeup_request_cb(void)
  577. {
  578. unsigned long flags;
  579. IPA_MHI_FUNC_ENTRY();
  580. IPA_MHI_DBG("MHI state: %s\n",
  581. MHI_STATE_STR(ipa_mhi_client_ctx->state));
  582. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  583. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED)
  584. ipa_mhi_notify_wakeup();
  585. else if (ipa_mhi_client_ctx->state ==
  586. IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
  587. /* wakeup event will be triggered after suspend finishes */
  588. ipa_mhi_client_ctx->trigger_wakeup = true;
  589. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  590. IPA_MHI_FUNC_EXIT();
  591. }
  592. /**
  593. * ipa_mhi_start() - Start IPA MHI engine
  594. * @params: pcie addresses for MHI
  595. *
  596. * This function is called by MHI client driver on MHI engine start for
  597. * handling MHI accelerated channels. This function is called after
  598. * ipa_mhi_init() was called and can be called after MHI reset to restart MHI
  599. * engine. When this function returns device can move to M0 state.
  600. *
  601. * Return codes: 0 : success
  602. * negative : error
  603. */
  604. int ipa_mhi_start(struct ipa_mhi_start_params *params)
  605. {
  606. int res;
  607. struct ipa_mhi_init_engine init_params;
  608. IPA_MHI_FUNC_ENTRY();
  609. if (!params) {
  610. IPA_MHI_ERR("null args\n");
  611. return -EINVAL;
  612. }
  613. if (!ipa_mhi_client_ctx) {
  614. IPA_MHI_ERR("not initialized\n");
  615. return -EPERM;
  616. }
  617. res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  618. if (res) {
  619. IPA_MHI_ERR("ipa_mhi_set_state %d\n", res);
  620. return res;
  621. }
  622. ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr;
  623. ipa_mhi_client_ctx->host_data_addr = params->host_data_addr;
  624. ipa_mhi_client_ctx->channel_context_array_addr =
  625. params->channel_context_array_addr;
  626. ipa_mhi_client_ctx->event_context_array_addr =
  627. params->event_context_array_addr;
  628. IPA_MHI_DBG("host_ctrl_addr 0x%x\n",
  629. ipa_mhi_client_ctx->host_ctrl_addr);
  630. IPA_MHI_DBG("host_data_addr 0x%x\n",
  631. ipa_mhi_client_ctx->host_data_addr);
  632. IPA_MHI_DBG("channel_context_array_addr 0x%llx\n",
  633. ipa_mhi_client_ctx->channel_context_array_addr);
  634. IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
  635. ipa_mhi_client_ctx->event_context_array_addr);
  636. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
  637. if (res) {
  638. IPA_MHI_ERR("failed activate client %d\n", res);
  639. goto fail_pm_activate;
  640. }
  641. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  642. if (res) {
  643. IPA_MHI_ERR("failed activate modem client %d\n", res);
  644. goto fail_pm_activate_modem;
  645. }
  646. /* gsi params */
  647. init_params.gsi.first_ch_idx =
  648. ipa_mhi_client_ctx->first_ch_idx;
  649. /* uC params */
  650. init_params.uC.first_ch_idx =
  651. ipa_mhi_client_ctx->first_ch_idx;
  652. init_params.uC.first_er_idx =
  653. ipa_mhi_client_ctx->first_er_idx;
  654. init_params.uC.host_ctrl_addr = params->host_ctrl_addr;
  655. init_params.uC.host_data_addr = params->host_data_addr;
  656. init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr;
  657. init_params.uC.msi = &ipa_mhi_client_ctx->msi;
  658. init_params.uC.ipa_cached_dl_ul_sync_info =
  659. &ipa_cached_dl_ul_sync_info;
  660. res = ipa_mhi_init_engine(&init_params);
  661. if (res) {
  662. IPA_MHI_ERR("IPA core failed to start MHI %d\n", res);
  663. goto fail_init_engine;
  664. }
  665. IPA_MHI_FUNC_EXIT();
  666. return 0;
  667. fail_init_engine:
  668. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  669. fail_pm_activate_modem:
  670. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  671. fail_pm_activate:
  672. ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
  673. return res;
  674. }
  675. /**
  676. * ipa_mhi_get_channel_context() - Get corresponding channel context
  677. * @ep: IPA ep
  678. * @channel_id: Channel ID
  679. *
  680. * This function will return the corresponding channel context or allocate new
  681. * one in case channel context for channel does not exist.
  682. */
  683. static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context(
  684. enum ipa_client_type client, u8 channel_id)
  685. {
  686. int ch_idx;
  687. struct ipa_mhi_channel_ctx *channels;
  688. int max_channels;
  689. if (IPA_CLIENT_IS_PROD(client)) {
  690. channels = ipa_mhi_client_ctx->ul_channels;
  691. max_channels = IPA_MHI_MAX_UL_CHANNELS;
  692. } else {
  693. channels = ipa_mhi_client_ctx->dl_channels;
  694. max_channels = IPA_MHI_MAX_DL_CHANNELS;
  695. }
  696. /* find the channel context according to channel id */
  697. for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
  698. if (channels[ch_idx].valid &&
  699. channels[ch_idx].id == channel_id)
  700. return &channels[ch_idx];
  701. }
  702. /* channel context does not exists, allocate a new one */
  703. for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
  704. if (!channels[ch_idx].valid)
  705. break;
  706. }
  707. if (ch_idx == max_channels) {
  708. IPA_MHI_ERR("no more channels available\n");
  709. return NULL;
  710. }
  711. channels[ch_idx].valid = true;
  712. channels[ch_idx].id = channel_id;
  713. channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++;
  714. channels[ch_idx].client = client;
  715. channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
  716. return &channels[ch_idx];
  717. }
  718. /**
  719. * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel
  720. * context
  721. * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe()
  722. *
  723. * This function will return the corresponding channel context or NULL in case
  724. * that channel does not exist.
  725. */
  726. static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
  727. u32 clnt_hdl)
  728. {
  729. int ch_idx;
  730. for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
  731. if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid &&
  732. ipa_get_ep_mapping(
  733. ipa_mhi_client_ctx->ul_channels[ch_idx].client)
  734. == clnt_hdl)
  735. return &ipa_mhi_client_ctx->ul_channels[ch_idx];
  736. }
  737. for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
  738. if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid &&
  739. ipa_get_ep_mapping(
  740. ipa_mhi_client_ctx->dl_channels[ch_idx].client)
  741. == clnt_hdl)
  742. return &ipa_mhi_client_ctx->dl_channels[ch_idx];
  743. }
  744. return NULL;
  745. }
  746. static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel)
  747. {
  748. IPA_MHI_DBG("ch_id %d\n", channel->id);
  749. IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate);
  750. IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
  751. IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
  752. IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype);
  753. IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex);
  754. IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
  755. IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen);
  756. IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp);
  757. IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp);
  758. }
  759. static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel)
  760. {
  761. IPA_MHI_DBG("ch_id %d event id %d\n", channel->id,
  762. channel->ch_ctx_host.erindex);
  763. IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc);
  764. IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt);
  765. IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype);
  766. IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec);
  767. IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase);
  768. IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen);
  769. IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp);
  770. IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp);
  771. }
  772. static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel)
  773. {
  774. int res;
  775. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  776. &channel->ch_ctx_host, channel->channel_context_addr,
  777. sizeof(channel->ch_ctx_host));
  778. if (res) {
  779. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  780. return res;
  781. }
  782. ipa_mhi_dump_ch_ctx(channel);
  783. channel->event_context_addr =
  784. ipa_mhi_client_ctx->event_context_array_addr +
  785. channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx);
  786. IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id,
  787. channel->event_context_addr);
  788. res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
  789. &channel->ev_ctx_host, channel->event_context_addr,
  790. sizeof(channel->ev_ctx_host));
  791. if (res) {
  792. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  793. return res;
  794. }
  795. ipa_mhi_dump_ev_ctx(channel);
  796. return 0;
  797. }
  798. static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify)
  799. {
  800. struct ipa_mhi_channel_ctx *channel = notify->user_data;
  801. IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
  802. channel->id, channel->client, channel->state);
  803. switch (notify->evt_id) {
  804. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  805. IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  806. break;
  807. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  808. IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  809. break;
  810. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  811. IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  812. break;
  813. case GSI_EVT_EVT_RING_EMPTY_ERR:
  814. IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n");
  815. break;
  816. default:
  817. IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
  818. }
  819. IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
  820. ipa_assert();
  821. }
  822. static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
  823. {
  824. struct ipa_mhi_channel_ctx *channel = notify->chan_user_data;
  825. IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
  826. channel->id, channel->client, channel->state);
  827. switch (notify->evt_id) {
  828. case GSI_CHAN_INVALID_TRE_ERR:
  829. IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
  830. break;
  831. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  832. IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  833. break;
  834. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  835. IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  836. break;
  837. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  838. IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  839. break;
  840. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  841. IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  842. break;
  843. case GSI_CHAN_HWO_1_ERR:
  844. IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n");
  845. break;
  846. default:
  847. IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
  848. }
  849. IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
  850. ipa_assert();
  851. }
  852. static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel)
  853. {
  854. IPA_MHI_FUNC_ENTRY();
  855. if (!channel->stop_in_proc) {
  856. IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n");
  857. return true;
  858. }
  859. if (ipa_mhi_stop_gsi_channel(channel->client)) {
  860. channel->stop_in_proc = false;
  861. return true;
  862. }
  863. return false;
  864. }
  865. /**
  866. * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink
  867. * @msecs: timeout to wait
  868. *
  869. * This function will poll until there are no packets pending in uplink channels
  870. * or timeout occurred.
  871. *
  872. * Return code: true - no pending packets in uplink channels
  873. * false - timeout occurred
  874. */
  875. static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
  876. {
  877. unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
  878. unsigned long jiffies_start = jiffies;
  879. bool empty = false;
  880. int i;
  881. IPA_MHI_FUNC_ENTRY();
  882. while (!empty) {
  883. empty = true;
  884. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  885. if (!ipa_mhi_client_ctx->ul_channels[i].valid)
  886. continue;
  887. if (ipa_get_transport_type() ==
  888. IPA_TRANSPORT_TYPE_GSI)
  889. empty &= ipa_mhi_gsi_channel_empty(
  890. &ipa_mhi_client_ctx->ul_channels[i]);
  891. else
  892. empty &= ipa_mhi_sps_channel_empty(
  893. ipa_mhi_client_ctx->ul_channels[i].client);
  894. }
  895. if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
  896. IPA_MHI_DBG("finished waiting for UL empty\n");
  897. break;
  898. }
  899. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI &&
  900. IPA_MHI_MAX_UL_CHANNELS == 1)
  901. usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
  902. IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
  903. }
  904. IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
  905. IPA_MHI_FUNC_EXIT();
  906. return empty;
  907. }
  908. static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
  909. {
  910. struct ipa_enable_force_clear_datapath_req_msg_v01 req;
  911. int i;
  912. int res;
  913. IPA_MHI_FUNC_ENTRY();
  914. memset(&req, 0, sizeof(req));
  915. req.request_id = request_id;
  916. req.source_pipe_bitmask = 0;
  917. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  918. if (!ipa_mhi_client_ctx->ul_channels[i].valid)
  919. continue;
  920. req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping(
  921. ipa_mhi_client_ctx->ul_channels[i].client);
  922. }
  923. if (throttle_source) {
  924. req.throttle_source_valid = 1;
  925. req.throttle_source = 1;
  926. }
  927. IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
  928. req.request_id, req.source_pipe_bitmask,
  929. req.throttle_source);
  930. res = ipa_qmi_enable_force_clear_datapath_send(&req);
  931. if (res) {
  932. IPA_MHI_ERR(
  933. "ipa_qmi_enable_force_clear_datapath_send failed %d\n"
  934. , res);
  935. return res;
  936. }
  937. IPA_MHI_FUNC_EXIT();
  938. return 0;
  939. }
  940. static int ipa_mhi_disable_force_clear(u32 request_id)
  941. {
  942. struct ipa_disable_force_clear_datapath_req_msg_v01 req;
  943. int res;
  944. IPA_MHI_FUNC_ENTRY();
  945. memset(&req, 0, sizeof(req));
  946. req.request_id = request_id;
  947. IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
  948. res = ipa_qmi_disable_force_clear_datapath_send(&req);
  949. if (res) {
  950. IPA_MHI_ERR(
  951. "ipa_qmi_disable_force_clear_datapath_send failed %d\n"
  952. , res);
  953. return res;
  954. }
  955. IPA_MHI_FUNC_EXIT();
  956. return 0;
  957. }
  958. static void ipa_mhi_set_holb_on_dl_channels(bool enable,
  959. struct ipa_ep_cfg_holb old_holb[])
  960. {
  961. int i;
  962. struct ipa_ep_cfg_holb ep_holb;
  963. int ep_idx;
  964. int res;
  965. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  966. if (!ipa_mhi_client_ctx->dl_channels[i].valid)
  967. continue;
  968. if (ipa_mhi_client_ctx->dl_channels[i].state ==
  969. IPA_HW_MHI_CHANNEL_STATE_INVALID)
  970. continue;
  971. ep_idx = ipa_get_ep_mapping(
  972. ipa_mhi_client_ctx->dl_channels[i].client);
  973. if (-1 == ep_idx) {
  974. IPA_MHI_ERR("Client %u is not mapped\n",
  975. ipa_mhi_client_ctx->dl_channels[i].client);
  976. ipa_assert();
  977. return;
  978. }
  979. memset(&ep_holb, 0, sizeof(ep_holb));
  980. if (enable) {
  981. ipa_get_holb(ep_idx, &old_holb[i]);
  982. ep_holb.en = 1;
  983. ep_holb.tmr_val = 0;
  984. } else {
  985. ep_holb = old_holb[i];
  986. }
  987. res = ipa_cfg_ep_holb(ep_idx, &ep_holb);
  988. if (res) {
  989. IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
  990. ipa_assert();
  991. return;
  992. }
  993. }
  994. }
  995. static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel)
  996. {
  997. int clnt_hdl;
  998. int res;
  999. IPA_MHI_FUNC_ENTRY();
  1000. clnt_hdl = ipa_get_ep_mapping(channel->client);
  1001. if (clnt_hdl < 0)
  1002. return -EFAULT;
  1003. res = ipa_stop_gsi_channel(clnt_hdl);
  1004. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1005. res != -GSI_STATUS_TIMED_OUT) {
  1006. IPA_MHI_ERR("GSI stop channel failed %d\n", res);
  1007. return -EFAULT;
  1008. }
  1009. /* check if channel was stopped completely */
  1010. if (res)
  1011. channel->stop_in_proc = true;
  1012. IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ?
  1013. "STOP_IN_PROC" : "STOP");
  1014. IPA_MHI_FUNC_EXIT();
  1015. return 0;
  1016. }
  1017. static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
  1018. {
  1019. int res;
  1020. bool empty;
  1021. struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
  1022. IPA_MHI_FUNC_ENTRY();
  1023. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1024. res = ipa_mhi_suspend_gsi_channel(channel);
  1025. if (res) {
  1026. IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n",
  1027. res);
  1028. return res;
  1029. }
  1030. } else {
  1031. res = ipa_uc_mhi_reset_channel(channel->index);
  1032. if (res) {
  1033. IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
  1034. res);
  1035. return res;
  1036. }
  1037. }
  1038. empty = ipa_mhi_wait_for_ul_empty_timeout(
  1039. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1040. if (!empty) {
  1041. IPA_MHI_DBG("%s not empty\n",
  1042. (ipa_get_transport_type() ==
  1043. IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM");
  1044. res = ipa_mhi_enable_force_clear(
  1045. ipa_mhi_client_ctx->qmi_req_id, false);
  1046. if (res) {
  1047. IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
  1048. res);
  1049. ipa_assert();
  1050. return res;
  1051. }
  1052. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1053. empty = ipa_mhi_wait_for_ul_empty_timeout(
  1054. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1055. IPA_MHI_DBG("empty=%d\n", empty);
  1056. } else {
  1057. /* enable packet drop on all DL channels */
  1058. ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb);
  1059. ipa_generate_tag_process();
  1060. /* disable packet drop on all DL channels */
  1061. ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb);
  1062. res = ipa_disable_sps_pipe(channel->client);
  1063. if (res) {
  1064. IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
  1065. ipa_assert();
  1066. return res;
  1067. }
  1068. }
  1069. res =
  1070. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
  1071. if (res) {
  1072. IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
  1073. res);
  1074. ipa_assert();
  1075. return res;
  1076. }
  1077. ipa_mhi_client_ctx->qmi_req_id++;
  1078. }
  1079. res = ipa_mhi_reset_channel_internal(channel->client);
  1080. if (res) {
  1081. IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n"
  1082. , res);
  1083. return res;
  1084. }
  1085. IPA_MHI_FUNC_EXIT();
  1086. return 0;
  1087. }
  1088. static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
  1089. {
  1090. int res;
  1091. IPA_MHI_FUNC_ENTRY();
  1092. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1093. res = ipa_mhi_suspend_gsi_channel(channel);
  1094. if (res) {
  1095. IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n"
  1096. , res);
  1097. return res;
  1098. }
  1099. res = ipa_mhi_reset_channel_internal(channel->client);
  1100. if (res) {
  1101. IPA_MHI_ERR(
  1102. "ipa_mhi_reset_ul_channel_internal failed %d\n"
  1103. , res);
  1104. return res;
  1105. }
  1106. } else {
  1107. res = ipa_mhi_reset_channel_internal(channel->client);
  1108. if (res) {
  1109. IPA_MHI_ERR(
  1110. "ipa_mhi_reset_ul_channel_internal failed %d\n"
  1111. , res);
  1112. return res;
  1113. }
  1114. res = ipa_uc_mhi_reset_channel(channel->index);
  1115. if (res) {
  1116. IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
  1117. res);
  1118. ipa_mhi_start_channel_internal(channel->client);
  1119. return res;
  1120. }
  1121. }
  1122. IPA_MHI_FUNC_EXIT();
  1123. return 0;
  1124. }
  1125. static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel)
  1126. {
  1127. int res;
  1128. IPA_MHI_FUNC_ENTRY();
  1129. if (IPA_CLIENT_IS_PROD(channel->client))
  1130. res = ipa_mhi_reset_ul_channel(channel);
  1131. else
  1132. res = ipa_mhi_reset_dl_channel(channel);
  1133. if (res) {
  1134. IPA_MHI_ERR("failed to reset channel error %d\n", res);
  1135. return res;
  1136. }
  1137. channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
  1138. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1139. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1140. &channel->state, channel->channel_context_addr +
  1141. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1142. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1143. if (res) {
  1144. IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
  1145. return res;
  1146. }
  1147. }
  1148. IPA_MHI_FUNC_EXIT();
  1149. return 0;
  1150. }
  1151. /**
  1152. * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
  1153. * MHI channel
  1154. * @in: connect parameters
  1155. * @clnt_hdl: [out] client handle for this pipe
  1156. *
  1157. * This function is called by MHI client driver on MHI channel start.
  1158. * This function is called after MHI engine was started.
  1159. *
  1160. * Return codes: 0 : success
  1161. * negative : error
  1162. */
  1163. int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
  1164. {
  1165. int res;
  1166. unsigned long flags;
  1167. struct ipa_mhi_channel_ctx *channel = NULL;
  1168. IPA_MHI_FUNC_ENTRY();
  1169. if (!in || !clnt_hdl) {
  1170. IPA_MHI_ERR("NULL args\n");
  1171. return -EINVAL;
  1172. }
  1173. if (in->sys.client >= IPA_CLIENT_MAX) {
  1174. IPA_MHI_ERR("bad param client:%d\n", in->sys.client);
  1175. return -EINVAL;
  1176. }
  1177. if (!IPA_CLIENT_IS_MHI(in->sys.client)) {
  1178. IPA_MHI_ERR(
  1179. "Invalid MHI client, client: %d\n", in->sys.client);
  1180. return -EINVAL;
  1181. }
  1182. IPA_MHI_DBG("channel=%d\n", in->channel_id);
  1183. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  1184. if (!ipa_mhi_client_ctx ||
  1185. ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) {
  1186. IPA_MHI_ERR("IPA MHI was not started\n");
  1187. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1188. return -EINVAL;
  1189. }
  1190. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1191. channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id);
  1192. if (!channel) {
  1193. IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
  1194. return -EINVAL;
  1195. }
  1196. if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID &&
  1197. channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
  1198. IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
  1199. return -EFAULT;
  1200. }
  1201. channel->channel_context_addr =
  1202. ipa_mhi_client_ctx->channel_context_array_addr +
  1203. channel->id * sizeof(struct ipa_mhi_ch_ctx);
  1204. /* for event context address index needs to read from host */
  1205. IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n",
  1206. channel->client, channel->index, channel->id, channel->state);
  1207. IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n",
  1208. channel->channel_context_addr,
  1209. channel->cached_gsi_evt_ring_hdl);
  1210. IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
  1211. mutex_lock(&mhi_client_general_mutex);
  1212. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1213. struct ipa_mhi_connect_params_internal internal;
  1214. IPA_MHI_DBG("reading ch/ev context from host\n");
  1215. res = ipa_mhi_read_ch_ctx(channel);
  1216. if (res) {
  1217. IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
  1218. goto fail_start_channel;
  1219. }
  1220. internal.channel_id = in->channel_id;
  1221. internal.sys = &in->sys;
  1222. internal.start.gsi.state = channel->state;
  1223. internal.start.gsi.msi = &ipa_mhi_client_ctx->msi;
  1224. internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host;
  1225. internal.start.gsi.event_context_addr =
  1226. channel->event_context_addr;
  1227. internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host;
  1228. internal.start.gsi.channel_context_addr =
  1229. channel->channel_context_addr;
  1230. internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb;
  1231. internal.start.gsi.channel = (void *)channel;
  1232. internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb;
  1233. internal.start.gsi.assert_bit40 =
  1234. ipa_mhi_client_ctx->assert_bit40;
  1235. internal.start.gsi.mhi = &channel->ch_scratch.mhi;
  1236. internal.start.gsi.cached_gsi_evt_ring_hdl =
  1237. &channel->cached_gsi_evt_ring_hdl;
  1238. internal.start.gsi.evchid = channel->index;
  1239. res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
  1240. if (res) {
  1241. IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
  1242. goto fail_connect_pipe;
  1243. }
  1244. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1245. channel->brstmode_enabled =
  1246. channel->ch_scratch.mhi.burst_mode_enabled;
  1247. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1248. &channel->state, channel->channel_context_addr +
  1249. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1250. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1251. if (res) {
  1252. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1253. mutex_unlock(&mhi_client_general_mutex);
  1254. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1255. return res;
  1256. }
  1257. } else {
  1258. struct ipa_mhi_connect_params_internal internal;
  1259. internal.channel_id = in->channel_id;
  1260. internal.sys = &in->sys;
  1261. internal.start.uC.index = channel->index;
  1262. internal.start.uC.id = channel->id;
  1263. internal.start.uC.state = channel->state;
  1264. res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
  1265. if (res) {
  1266. IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
  1267. goto fail_connect_pipe;
  1268. }
  1269. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1270. }
  1271. mutex_unlock(&mhi_client_general_mutex);
  1272. if (!in->sys.keep_ipa_awake)
  1273. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1274. IPA_MHI_FUNC_EXIT();
  1275. return 0;
  1276. fail_connect_pipe:
  1277. mutex_unlock(&mhi_client_general_mutex);
  1278. ipa_mhi_reset_channel(channel);
  1279. fail_start_channel:
  1280. IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
  1281. return -EPERM;
  1282. }
  1283. /**
  1284. * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
  1285. * MHI channel
  1286. * @clnt_hdl: client handle for this pipe
  1287. *
  1288. * This function is called by MHI client driver on MHI channel reset.
  1289. * This function is called after MHI channel was started.
  1290. * This function is doing the following:
  1291. * - Send command to uC/GSI to reset corresponding MHI channel
  1292. * - Configure IPA EP control
  1293. *
  1294. * Return codes: 0 : success
  1295. * negative : error
  1296. */
  1297. int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
  1298. {
  1299. int res;
  1300. enum ipa_client_type client;
  1301. static struct ipa_mhi_channel_ctx *channel;
  1302. IPA_MHI_FUNC_ENTRY();
  1303. if (!ipa_mhi_client_ctx) {
  1304. IPA_MHI_ERR("IPA MHI was not initialized\n");
  1305. return -EINVAL;
  1306. }
  1307. client = ipa_get_client_mapping(clnt_hdl);
  1308. if (!IPA_CLIENT_IS_MHI(client)) {
  1309. IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client);
  1310. return -EINVAL;
  1311. }
  1312. channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
  1313. if (!channel) {
  1314. IPA_MHI_ERR("invalid clnt index\n");
  1315. return -EINVAL;
  1316. }
  1317. IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl));
  1318. res = ipa_mhi_reset_channel(channel);
  1319. if (res) {
  1320. IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
  1321. goto fail_reset_channel;
  1322. }
  1323. mutex_lock(&mhi_client_general_mutex);
  1324. res = ipa_disconnect_mhi_pipe(clnt_hdl);
  1325. if (res) {
  1326. IPA_MHI_ERR(
  1327. "IPA core driver failed to disconnect the pipe hdl %d, res %d"
  1328. , clnt_hdl, res);
  1329. goto fail_disconnect_pipe;
  1330. }
  1331. mutex_unlock(&mhi_client_general_mutex);
  1332. IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
  1333. IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
  1334. IPA_MHI_FUNC_EXIT();
  1335. return 0;
  1336. fail_disconnect_pipe:
  1337. mutex_unlock(&mhi_client_general_mutex);
  1338. fail_reset_channel:
  1339. IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
  1340. return res;
  1341. }
  1342. static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels,
  1343. int max_channels)
  1344. {
  1345. int i;
  1346. int res;
  1347. IPA_MHI_FUNC_ENTRY();
  1348. for (i = 0; i < max_channels; i++) {
  1349. if (!channels[i].valid)
  1350. continue;
  1351. if (channels[i].state !=
  1352. IPA_HW_MHI_CHANNEL_STATE_RUN)
  1353. continue;
  1354. IPA_MHI_DBG("suspending channel %d\n",
  1355. channels[i].id);
  1356. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1357. res = ipa_mhi_suspend_gsi_channel(
  1358. &channels[i]);
  1359. else
  1360. res = ipa_uc_mhi_suspend_channel(
  1361. channels[i].index);
  1362. if (res) {
  1363. IPA_MHI_ERR("failed to suspend channel %d error %d\n",
  1364. i, res);
  1365. return res;
  1366. }
  1367. channels[i].state =
  1368. IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
  1369. }
  1370. IPA_MHI_FUNC_EXIT();
  1371. return 0;
  1372. }
  1373. static int ipa_mhi_stop_event_update_channels(
  1374. struct ipa_mhi_channel_ctx *channels, int max_channels)
  1375. {
  1376. int i;
  1377. int res;
  1378. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1379. return 0;
  1380. IPA_MHI_FUNC_ENTRY();
  1381. for (i = 0; i < max_channels; i++) {
  1382. if (!channels[i].valid)
  1383. continue;
  1384. if (channels[i].state !=
  1385. IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
  1386. continue;
  1387. IPA_MHI_DBG("stop update event channel %d\n",
  1388. channels[i].id);
  1389. res = ipa_uc_mhi_stop_event_update_channel(
  1390. channels[i].index);
  1391. if (res) {
  1392. IPA_MHI_ERR("failed stop event channel %d error %d\n",
  1393. i, res);
  1394. return res;
  1395. }
  1396. }
  1397. IPA_MHI_FUNC_EXIT();
  1398. return 0;
  1399. }
  1400. static bool ipa_mhi_check_pending_packets_from_host(void)
  1401. {
  1402. int i;
  1403. int res;
  1404. struct ipa_mhi_channel_ctx *channel;
  1405. IPA_MHI_FUNC_ENTRY();
  1406. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  1407. channel = &ipa_mhi_client_ctx->ul_channels[i];
  1408. if (!channel->valid)
  1409. continue;
  1410. res = ipa_mhi_query_ch_info(channel->client,
  1411. &channel->ch_info);
  1412. if (res) {
  1413. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1414. return true;
  1415. }
  1416. res = ipa_mhi_read_ch_ctx(channel);
  1417. if (res) {
  1418. IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
  1419. return true;
  1420. }
  1421. if (channel->ch_info.rp != channel->ch_ctx_host.wp) {
  1422. IPA_MHI_DBG("There are pending packets from host\n");
  1423. IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n",
  1424. channel->ch_info.rp, channel->ch_ctx_host.wp);
  1425. return true;
  1426. }
  1427. }
  1428. IPA_MHI_FUNC_EXIT();
  1429. return false;
  1430. }
  1431. static int ipa_mhi_resume_channels(bool LPTransitionRejected,
  1432. struct ipa_mhi_channel_ctx *channels, int max_channels)
  1433. {
  1434. int i;
  1435. int res;
  1436. struct ipa_mhi_channel_ctx *channel;
  1437. IPA_MHI_FUNC_ENTRY();
  1438. for (i = 0; i < max_channels; i++) {
  1439. if (!channels[i].valid)
  1440. continue;
  1441. if (channels[i].state !=
  1442. IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
  1443. continue;
  1444. channel = &channels[i];
  1445. IPA_MHI_DBG("resuming channel %d\n", channel->id);
  1446. res = ipa_mhi_resume_channels_internal(channel->client,
  1447. LPTransitionRejected, channel->brstmode_enabled,
  1448. channel->ch_scratch, channel->index);
  1449. if (res) {
  1450. IPA_MHI_ERR("failed to resume channel %d error %d\n",
  1451. i, res);
  1452. return res;
  1453. }
  1454. channel->stop_in_proc = false;
  1455. channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
  1456. }
  1457. IPA_MHI_FUNC_EXIT();
  1458. return 0;
  1459. }
  1460. /**
  1461. * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels
  1462. * @force:
  1463. * false: in case of data pending in IPA, MHI channels will not be
  1464. * suspended and function will fail.
  1465. * true: in case of data pending in IPA, make sure no further access from
  1466. * IPA to PCIe is possible. In this case suspend cannot fail.
  1467. *
  1468. *
  1469. * This function is called by MHI client driver on MHI suspend.
  1470. * This function is called after MHI channel was started.
  1471. * When this function returns device can move to M1/M2/M3/D3cold state.
  1472. *
  1473. * Return codes: 0 : success
  1474. * negative : error
  1475. */
  1476. static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear)
  1477. {
  1478. int res;
  1479. *force_clear = false;
  1480. res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
  1481. IPA_MHI_MAX_UL_CHANNELS);
  1482. if (res) {
  1483. IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
  1484. goto fail_suspend_ul_channel;
  1485. }
  1486. *empty = ipa_mhi_wait_for_ul_empty_timeout(
  1487. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1488. if (!*empty) {
  1489. if (force) {
  1490. res = ipa_mhi_enable_force_clear(
  1491. ipa_mhi_client_ctx->qmi_req_id, false);
  1492. if (res) {
  1493. IPA_MHI_ERR("failed to enable force clear\n");
  1494. ipa_assert();
  1495. return res;
  1496. }
  1497. *force_clear = true;
  1498. IPA_MHI_DBG("force clear datapath enabled\n");
  1499. *empty = ipa_mhi_wait_for_ul_empty_timeout(
  1500. IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
  1501. IPA_MHI_DBG("empty=%d\n", *empty);
  1502. if (!*empty && ipa_get_transport_type()
  1503. == IPA_TRANSPORT_TYPE_GSI) {
  1504. IPA_MHI_ERR("Failed to suspend UL channels\n");
  1505. if (ipa_mhi_client_ctx->test_mode) {
  1506. res = -EAGAIN;
  1507. goto fail_suspend_ul_channel;
  1508. }
  1509. ipa_assert();
  1510. }
  1511. } else {
  1512. IPA_MHI_DBG("IPA not empty\n");
  1513. res = -EAGAIN;
  1514. goto fail_suspend_ul_channel;
  1515. }
  1516. }
  1517. if (*force_clear) {
  1518. res =
  1519. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
  1520. if (res) {
  1521. IPA_MHI_ERR("failed to disable force clear\n");
  1522. ipa_assert();
  1523. return res;
  1524. }
  1525. IPA_MHI_DBG("force clear datapath disabled\n");
  1526. ipa_mhi_client_ctx->qmi_req_id++;
  1527. }
  1528. if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1529. if (ipa_mhi_check_pending_packets_from_host()) {
  1530. res = -EAGAIN;
  1531. goto fail_suspend_ul_channel;
  1532. }
  1533. }
  1534. res = ipa_mhi_stop_event_update_channels(
  1535. ipa_mhi_client_ctx->ul_channels, IPA_MHI_MAX_UL_CHANNELS);
  1536. if (res) {
  1537. IPA_MHI_ERR(
  1538. "ipa_mhi_stop_event_update_ul_channels failed %d\n",
  1539. res);
  1540. goto fail_suspend_ul_channel;
  1541. }
  1542. return 0;
  1543. fail_suspend_ul_channel:
  1544. return res;
  1545. }
  1546. static bool ipa_mhi_has_open_aggr_frame(void)
  1547. {
  1548. struct ipa_mhi_channel_ctx *channel;
  1549. int i;
  1550. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  1551. channel = &ipa_mhi_client_ctx->dl_channels[i];
  1552. if (!channel->valid)
  1553. continue;
  1554. if (ipa_has_open_aggr_frame(channel->client))
  1555. return true;
  1556. }
  1557. return false;
  1558. }
  1559. static void ipa_mhi_update_host_ch_state(bool update_rp)
  1560. {
  1561. int i;
  1562. int res;
  1563. struct ipa_mhi_channel_ctx *channel;
  1564. for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
  1565. channel = &ipa_mhi_client_ctx->ul_channels[i];
  1566. if (!channel->valid)
  1567. continue;
  1568. if (update_rp) {
  1569. res = ipa_mhi_query_ch_info(channel->client,
  1570. &channel->ch_info);
  1571. if (res) {
  1572. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1573. ipa_assert();
  1574. return;
  1575. }
  1576. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1577. &channel->ch_info.rp,
  1578. channel->channel_context_addr +
  1579. offsetof(struct ipa_mhi_ch_ctx, rp),
  1580. sizeof(channel->ch_info.rp));
  1581. if (res) {
  1582. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1583. ipa_assert();
  1584. return;
  1585. }
  1586. }
  1587. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1588. &channel->state, channel->channel_context_addr +
  1589. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1590. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1591. if (res) {
  1592. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1593. ipa_assert();
  1594. return;
  1595. }
  1596. IPA_MHI_DBG("Updated UL CH=%d state to %s on host\n",
  1597. i, MHI_CH_STATE_STR(channel->state));
  1598. }
  1599. for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
  1600. channel = &ipa_mhi_client_ctx->dl_channels[i];
  1601. if (!channel->valid)
  1602. continue;
  1603. if (update_rp) {
  1604. res = ipa_mhi_query_ch_info(channel->client,
  1605. &channel->ch_info);
  1606. if (res) {
  1607. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  1608. ipa_assert();
  1609. return;
  1610. }
  1611. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1612. &channel->ch_info.rp,
  1613. channel->channel_context_addr +
  1614. offsetof(struct ipa_mhi_ch_ctx, rp),
  1615. sizeof(channel->ch_info.rp));
  1616. if (res) {
  1617. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1618. ipa_assert();
  1619. return;
  1620. }
  1621. }
  1622. res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
  1623. &channel->state, channel->channel_context_addr +
  1624. offsetof(struct ipa_mhi_ch_ctx, chstate),
  1625. sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
  1626. if (res) {
  1627. IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
  1628. ipa_assert();
  1629. return;
  1630. }
  1631. IPA_MHI_DBG("Updated DL CH=%d state to %s on host\n",
  1632. i, MHI_CH_STATE_STR(channel->state));
  1633. }
  1634. }
  1635. static int ipa_mhi_suspend_dl(bool force)
  1636. {
  1637. int res;
  1638. res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
  1639. IPA_MHI_MAX_DL_CHANNELS);
  1640. if (res) {
  1641. IPA_MHI_ERR(
  1642. "ipa_mhi_suspend_channels for dl failed %d\n", res);
  1643. goto fail_suspend_dl_channel;
  1644. }
  1645. res = ipa_mhi_stop_event_update_channels
  1646. (ipa_mhi_client_ctx->dl_channels,
  1647. IPA_MHI_MAX_DL_CHANNELS);
  1648. if (res) {
  1649. IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
  1650. goto fail_stop_event_update_dl_channel;
  1651. }
  1652. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1653. if (ipa_mhi_has_open_aggr_frame()) {
  1654. IPA_MHI_DBG("There is an open aggr frame\n");
  1655. if (force) {
  1656. ipa_mhi_client_ctx->trigger_wakeup = true;
  1657. } else {
  1658. res = -EAGAIN;
  1659. goto fail_stop_event_update_dl_channel;
  1660. }
  1661. }
  1662. }
  1663. return 0;
  1664. fail_stop_event_update_dl_channel:
  1665. ipa_mhi_resume_channels(true,
  1666. ipa_mhi_client_ctx->dl_channels,
  1667. IPA_MHI_MAX_DL_CHANNELS);
  1668. fail_suspend_dl_channel:
  1669. return res;
  1670. }
  1671. /**
  1672. * ipa_mhi_suspend() - Suspend MHI accelerated channels
  1673. * @force:
  1674. * false: in case of data pending in IPA, MHI channels will not be
  1675. * suspended and function will fail.
  1676. * true: in case of data pending in IPA, make sure no further access from
  1677. * IPA to PCIe is possible. In this case suspend cannot fail.
  1678. *
  1679. * This function is called by MHI client driver on MHI suspend.
  1680. * This function is called after MHI channel was started.
  1681. * When this function returns device can move to M1/M2/M3/D3cold state.
  1682. *
  1683. * Return codes: 0 : success
  1684. * negative : error
  1685. */
  1686. int ipa_mhi_suspend(bool force)
  1687. {
  1688. int res;
  1689. bool empty;
  1690. bool force_clear;
  1691. IPA_MHI_FUNC_ENTRY();
  1692. res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
  1693. if (res) {
  1694. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1695. return res;
  1696. }
  1697. res = ipa_mhi_suspend_dl(force);
  1698. if (res) {
  1699. IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res);
  1700. goto fail_suspend_dl_channel;
  1701. }
  1702. usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
  1703. res = ipa_mhi_suspend_ul(force, &empty, &force_clear);
  1704. if (res) {
  1705. IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res);
  1706. goto fail_suspend_ul_channel;
  1707. }
  1708. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1709. ipa_mhi_update_host_ch_state(true);
  1710. /*
  1711. * hold IPA clocks and release them after all
  1712. * IPA PM clients are deactivated to make sure tag process
  1713. * will not start
  1714. */
  1715. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1716. res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  1717. if (res) {
  1718. IPA_MHI_ERR("fail to deactivate client %d\n", res);
  1719. goto fail_deactivate_pm;
  1720. }
  1721. res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1722. if (res) {
  1723. IPA_MHI_ERR("fail to deactivate client %d\n", res);
  1724. goto fail_deactivate_modem_pm;
  1725. }
  1726. usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
  1727. if (!empty)
  1728. ipa_set_tag_process_before_gating(false);
  1729. res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
  1730. if (res) {
  1731. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1732. goto fail_release_cons;
  1733. }
  1734. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1735. IPA_MHI_FUNC_EXIT();
  1736. return 0;
  1737. fail_release_cons:
  1738. ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1739. fail_deactivate_modem_pm:
  1740. ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
  1741. fail_deactivate_pm:
  1742. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1743. fail_suspend_ul_channel:
  1744. ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels,
  1745. IPA_MHI_MAX_UL_CHANNELS);
  1746. if (force_clear) {
  1747. if (
  1748. ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) {
  1749. IPA_MHI_ERR("failed to disable force clear\n");
  1750. ipa_assert();
  1751. }
  1752. IPA_MHI_DBG("force clear datapath disabled\n");
  1753. ipa_mhi_client_ctx->qmi_req_id++;
  1754. }
  1755. fail_suspend_dl_channel:
  1756. ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->dl_channels,
  1757. IPA_MHI_MAX_DL_CHANNELS);
  1758. ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  1759. return res;
  1760. }
  1761. /**
  1762. * ipa_mhi_resume() - Resume MHI accelerated channels
  1763. *
  1764. * This function is called by MHI client driver on MHI resume.
  1765. * This function is called after MHI channel was suspended.
  1766. * When this function returns device can move to M0 state.
  1767. * This function is doing the following:
  1768. * - Send command to uC/GSI to resume corresponding MHI channel
  1769. * - Activate PM clients
  1770. * - Resume data to IPA
  1771. *
  1772. * Return codes: 0 : success
  1773. * negative : error
  1774. */
  1775. int ipa_mhi_resume(void)
  1776. {
  1777. int res;
  1778. IPA_MHI_FUNC_ENTRY();
  1779. res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
  1780. if (res) {
  1781. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1782. return res;
  1783. }
  1784. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
  1785. if (res) {
  1786. IPA_MHI_ERR("fail to activate client %d\n", res);
  1787. goto fail_pm_activate;
  1788. }
  1789. res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1790. if (res) {
  1791. IPA_MHI_ERR("fail to activate client %d\n", res);
  1792. goto fail_pm_activate_modem;
  1793. }
  1794. /* resume all UL channels */
  1795. res = ipa_mhi_resume_channels(false,
  1796. ipa_mhi_client_ctx->ul_channels,
  1797. IPA_MHI_MAX_UL_CHANNELS);
  1798. if (res) {
  1799. IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
  1800. goto fail_resume_ul_channels;
  1801. }
  1802. res = ipa_mhi_resume_channels(false,
  1803. ipa_mhi_client_ctx->dl_channels,
  1804. IPA_MHI_MAX_DL_CHANNELS);
  1805. if (res) {
  1806. IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
  1807. res);
  1808. goto fail_resume_dl_channels;
  1809. }
  1810. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
  1811. ipa_mhi_update_host_ch_state(false);
  1812. res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
  1813. if (res) {
  1814. IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
  1815. goto fail_set_state;
  1816. }
  1817. IPA_MHI_FUNC_EXIT();
  1818. return 0;
  1819. fail_set_state:
  1820. ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
  1821. IPA_MHI_MAX_DL_CHANNELS);
  1822. fail_resume_dl_channels:
  1823. ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
  1824. IPA_MHI_MAX_UL_CHANNELS);
  1825. fail_resume_ul_channels:
  1826. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1827. fail_pm_activate_modem:
  1828. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  1829. fail_pm_activate:
  1830. ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
  1831. return res;
  1832. }
  1833. static int ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
  1834. int num_of_channels)
  1835. {
  1836. struct ipa_mhi_channel_ctx *channel;
  1837. int i, res;
  1838. u32 clnt_hdl;
  1839. for (i = 0; i < num_of_channels; i++) {
  1840. channel = &channels[i];
  1841. if (!channel->valid)
  1842. continue;
  1843. if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID)
  1844. continue;
  1845. if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
  1846. clnt_hdl = ipa_get_ep_mapping(channel->client);
  1847. IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
  1848. res = ipa_mhi_disconnect_pipe(clnt_hdl);
  1849. if (res) {
  1850. IPA_MHI_ERR(
  1851. "failed to disconnect pipe %d, err %d\n"
  1852. , clnt_hdl, res);
  1853. goto fail;
  1854. }
  1855. }
  1856. res = ipa_mhi_destroy_channel(channel->client);
  1857. if (res) {
  1858. IPA_MHI_ERR(
  1859. "ipa_mhi_destroy_channel failed %d"
  1860. , res);
  1861. goto fail;
  1862. }
  1863. }
  1864. return 0;
  1865. fail:
  1866. return res;
  1867. }
  1868. /**
  1869. * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels
  1870. *
  1871. * This function is called by IPA MHI client driver on MHI reset to destroy all
  1872. * IPA MHI channels.
  1873. */
  1874. int ipa_mhi_destroy_all_channels(void)
  1875. {
  1876. int res;
  1877. IPA_MHI_FUNC_ENTRY();
  1878. /* reset all UL and DL acc channels and its accociated event rings */
  1879. res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels,
  1880. IPA_MHI_MAX_UL_CHANNELS);
  1881. if (res) {
  1882. IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n",
  1883. res);
  1884. return -EPERM;
  1885. }
  1886. IPA_MHI_DBG("All UL channels are disconnected\n");
  1887. res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels,
  1888. IPA_MHI_MAX_DL_CHANNELS);
  1889. if (res) {
  1890. IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n",
  1891. res);
  1892. return -EPERM;
  1893. }
  1894. IPA_MHI_DBG("All DL channels are disconnected\n");
  1895. IPA_MHI_FUNC_EXIT();
  1896. return 0;
  1897. }
  1898. static void ipa_mhi_debugfs_destroy(void)
  1899. {
  1900. debugfs_remove_recursive(dent);
  1901. }
  1902. static void ipa_mhi_deregister_pm(void)
  1903. {
  1904. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
  1905. ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
  1906. ipa_mhi_client_ctx->pm_hdl = ~0;
  1907. ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
  1908. ipa_pm_deregister(ipa_mhi_client_ctx->modem_pm_hdl);
  1909. ipa_mhi_client_ctx->modem_pm_hdl = ~0;
  1910. }
  1911. /**
  1912. * ipa_mhi_destroy() - Destroy MHI IPA
  1913. *
  1914. * This function is called by MHI client driver on MHI reset to destroy all IPA
  1915. * MHI resources.
  1916. * When this function returns ipa_mhi can re-initialize.
  1917. */
  1918. void ipa_mhi_destroy(void)
  1919. {
  1920. int res;
  1921. IPA_MHI_FUNC_ENTRY();
  1922. if (!ipa_mhi_client_ctx) {
  1923. IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n");
  1924. return;
  1925. }
  1926. ipa_deregister_client_callback(IPA_CLIENT_MHI_PROD);
  1927. /* reset all UL and DL acc channels and its accociated event rings */
  1928. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  1929. res = ipa_mhi_destroy_all_channels();
  1930. if (res) {
  1931. IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n",
  1932. res);
  1933. goto fail;
  1934. }
  1935. }
  1936. IPA_MHI_DBG("All channels are disconnected\n");
  1937. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) {
  1938. IPA_MHI_DBG("cleanup uC MHI\n");
  1939. ipa_uc_mhi_cleanup();
  1940. }
  1941. ipa_mhi_deregister_pm();
  1942. ipa_dma_destroy();
  1943. ipa_mhi_debugfs_destroy();
  1944. destroy_workqueue(ipa_mhi_client_ctx->wq);
  1945. kfree(ipa_mhi_client_ctx);
  1946. ipa_mhi_client_ctx = NULL;
  1947. IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n");
  1948. IPA_MHI_FUNC_EXIT();
  1949. return;
  1950. fail:
  1951. ipa_assert();
  1952. }
  1953. static void ipa_mhi_pm_cb(void *p, enum ipa_pm_cb_event event)
  1954. {
  1955. unsigned long flags;
  1956. IPA_MHI_FUNC_ENTRY();
  1957. if (event != IPA_PM_REQUEST_WAKEUP) {
  1958. IPA_MHI_ERR("Unexpected event %d\n", event);
  1959. WARN_ON(1);
  1960. return;
  1961. }
  1962. IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
  1963. spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
  1964. if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
  1965. ipa_mhi_notify_wakeup();
  1966. } else if (ipa_mhi_client_ctx->state ==
  1967. IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
  1968. /* wakeup event will be trigger after suspend finishes */
  1969. ipa_mhi_client_ctx->trigger_wakeup = true;
  1970. }
  1971. spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
  1972. IPA_MHI_DBG("EXIT");
  1973. }
  1974. static int ipa_mhi_register_pm(void)
  1975. {
  1976. int res;
  1977. struct ipa_pm_register_params params;
  1978. memset(&params, 0, sizeof(params));
  1979. params.name = "MHI";
  1980. params.callback = ipa_mhi_pm_cb;
  1981. params.group = IPA_PM_GROUP_DEFAULT;
  1982. res = ipa_pm_register(&params, &ipa_mhi_client_ctx->pm_hdl);
  1983. if (res) {
  1984. IPA_MHI_ERR("fail to register with PM %d\n", res);
  1985. return res;
  1986. }
  1987. res = ipa_pm_associate_ipa_cons_to_client(ipa_mhi_client_ctx->pm_hdl,
  1988. IPA_CLIENT_MHI_CONS);
  1989. if (res) {
  1990. IPA_MHI_ERR("fail to associate cons with PM %d\n", res);
  1991. goto fail_pm_cons;
  1992. }
  1993. res = ipa_pm_set_throughput(ipa_mhi_client_ctx->pm_hdl, 1000);
  1994. if (res) {
  1995. IPA_MHI_ERR("fail to set perf profile to PM %d\n", res);
  1996. goto fail_pm_cons;
  1997. }
  1998. /* create a modem client for clock scaling */
  1999. memset(&params, 0, sizeof(params));
  2000. params.name = "MODEM (MHI)";
  2001. params.group = IPA_PM_GROUP_MODEM;
  2002. params.skip_clk_vote = true;
  2003. res = ipa_pm_register(&params, &ipa_mhi_client_ctx->modem_pm_hdl);
  2004. if (res) {
  2005. IPA_MHI_ERR("fail to register with PM %d\n", res);
  2006. goto fail_pm_cons;
  2007. }
  2008. return 0;
  2009. fail_pm_cons:
  2010. ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
  2011. ipa_mhi_client_ctx->pm_hdl = ~0;
  2012. return res;
  2013. }
  2014. /**
  2015. * ipa_mhi_init() - Initialize IPA MHI driver
  2016. * @params: initialization params
  2017. *
  2018. * This function is called by MHI client driver on boot to initialize IPA MHI
  2019. * Driver. When this function returns device can move to READY state.
  2020. * This function is doing the following:
  2021. * - Initialize MHI IPA internal data structures
  2022. * - Register with PM
  2023. * - Initialize debugfs
  2024. *
  2025. * Return codes: 0 : success
  2026. * negative : error
  2027. */
  2028. int ipa_mhi_init(struct ipa_mhi_init_params *params)
  2029. {
  2030. int res;
  2031. IPA_MHI_FUNC_ENTRY();
  2032. if (!params) {
  2033. IPA_MHI_ERR("null args\n");
  2034. return -EINVAL;
  2035. }
  2036. if (!params->notify) {
  2037. IPA_MHI_ERR("null notify function\n");
  2038. return -EINVAL;
  2039. }
  2040. if (ipa_mhi_client_ctx) {
  2041. IPA_MHI_ERR("already initialized\n");
  2042. return -EPERM;
  2043. }
  2044. IPA_MHI_DBG("notify = %pS priv = %pK\n", params->notify, params->priv);
  2045. IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
  2046. params->msi.addr_low, params->msi.addr_hi);
  2047. IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
  2048. params->msi.data, params->msi.mask);
  2049. IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
  2050. IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
  2051. IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
  2052. IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40);
  2053. IPA_MHI_DBG("test_mode=%d\n", params->test_mode);
  2054. /* Initialize context */
  2055. ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL);
  2056. if (!ipa_mhi_client_ctx) {
  2057. res = -EFAULT;
  2058. goto fail_alloc_ctx;
  2059. }
  2060. ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
  2061. ipa_mhi_client_ctx->cb_notify = params->notify;
  2062. ipa_mhi_client_ctx->cb_priv = params->priv;
  2063. spin_lock_init(&ipa_mhi_client_ctx->state_lock);
  2064. ipa_mhi_client_ctx->msi = params->msi;
  2065. ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
  2066. ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
  2067. ipa_mhi_client_ctx->first_er_idx = params->first_er_idx;
  2068. ipa_mhi_client_ctx->qmi_req_id = 0;
  2069. ipa_mhi_client_ctx->use_ipadma = true;
  2070. ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40;
  2071. ipa_mhi_client_ctx->test_mode = params->test_mode;
  2072. ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
  2073. if (!ipa_mhi_client_ctx->wq) {
  2074. IPA_MHI_ERR("failed to create workqueue\n");
  2075. res = -EFAULT;
  2076. goto fail_create_wq;
  2077. }
  2078. res = ipa_dma_init();
  2079. if (res) {
  2080. IPA_MHI_ERR("failed to init ipa dma %d\n", res);
  2081. goto fail_dma_init;
  2082. }
  2083. res = ipa_mhi_register_pm();
  2084. if (res) {
  2085. IPA_MHI_ERR("failed to create PM resources\n");
  2086. res = -EFAULT;
  2087. goto fail_pm;
  2088. }
  2089. if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
  2090. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  2091. } else {
  2092. /* Initialize uC interface */
  2093. ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
  2094. ipa_mhi_uc_wakeup_request_cb);
  2095. if (ipa_uc_state_check() == 0)
  2096. ipa_mhi_set_state(IPA_MHI_STATE_READY);
  2097. }
  2098. ipa_register_client_callback(&ipa_mhi_set_lock_unlock, NULL,
  2099. IPA_CLIENT_MHI_PROD);
  2100. /* Initialize debugfs */
  2101. ipa_mhi_debugfs_init();
  2102. IPA_MHI_FUNC_EXIT();
  2103. return 0;
  2104. fail_pm:
  2105. ipa_dma_destroy();
  2106. fail_dma_init:
  2107. destroy_workqueue(ipa_mhi_client_ctx->wq);
  2108. fail_create_wq:
  2109. kfree(ipa_mhi_client_ctx);
  2110. ipa_mhi_client_ctx = NULL;
  2111. fail_alloc_ctx:
  2112. return res;
  2113. }
  2114. static void ipa_mhi_cache_dl_ul_sync_info(
  2115. struct ipa_config_req_msg_v01 *config_req)
  2116. {
  2117. ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
  2118. ipa_cached_dl_ul_sync_info.params.UlAccmVal =
  2119. (config_req->ul_accumulation_time_limit_valid) ?
  2120. config_req->ul_accumulation_time_limit : 0;
  2121. ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold =
  2122. (config_req->ul_msi_event_threshold_valid) ?
  2123. config_req->ul_msi_event_threshold : 0;
  2124. ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold =
  2125. (config_req->dl_msi_event_threshold_valid) ?
  2126. config_req->dl_msi_event_threshold : 0;
  2127. }
  2128. /**
  2129. * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
  2130. *
  2131. * This function is called by by IPA QMI service to indicate that IPA CONFIG
  2132. * message was sent from modem. IPA MHI will update this information to IPA uC
  2133. * or will cache it until IPA MHI will be initialized.
  2134. *
  2135. * Return codes: 0 : success
  2136. * negative : error
  2137. */
  2138. int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
  2139. {
  2140. IPA_MHI_FUNC_ENTRY();
  2141. if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) {
  2142. ipa_mhi_cache_dl_ul_sync_info(config_req);
  2143. if (ipa_mhi_client_ctx &&
  2144. ipa_mhi_client_ctx->state !=
  2145. IPA_MHI_STATE_INITIALIZED)
  2146. ipa_uc_mhi_send_dl_ul_sync_info(
  2147. &ipa_cached_dl_ul_sync_info);
  2148. }
  2149. IPA_MHI_FUNC_EXIT();
  2150. return 0;
  2151. }
  2152. int ipa_mhi_is_using_dma(bool *flag)
  2153. {
  2154. IPA_MHI_FUNC_ENTRY();
  2155. if (!ipa_mhi_client_ctx) {
  2156. IPA_MHI_ERR("not initialized\n");
  2157. return -EPERM;
  2158. }
  2159. *flag = ipa_mhi_client_ctx->use_ipadma ? true : false;
  2160. IPA_MHI_FUNC_EXIT();
  2161. return 0;
  2162. }
  2163. EXPORT_SYMBOL(ipa_mhi_is_using_dma);
  2164. const char *ipa_mhi_get_state_str(int state)
  2165. {
  2166. return MHI_STATE_STR(state);
  2167. }
  2168. EXPORT_SYMBOL(ipa_mhi_get_state_str);
  2169. MODULE_LICENSE("GPL v2");
  2170. MODULE_DESCRIPTION("IPA MHI client driver");