main.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. *
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/device.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/list.h>
  12. #include <linux/mhi.h>
  13. #include <linux/module.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/slab.h>
  16. #include "internal.h"
  17. int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
  18. void __iomem *base, u32 offset, u32 *out)
  19. {
  20. return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
  21. }
  22. int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
  23. void __iomem *base, u32 offset,
  24. u32 mask, u32 *out)
  25. {
  26. u32 tmp;
  27. int ret;
  28. ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
  29. if (ret)
  30. return ret;
  31. *out = (tmp & mask) >> __ffs(mask);
  32. return 0;
  33. }
  34. int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
  35. void __iomem *base, u32 offset,
  36. u32 mask, u32 val, u32 delayus)
  37. {
  38. int ret;
  39. u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
  40. while (retry--) {
  41. ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
  42. if (ret)
  43. return ret;
  44. if (out == val)
  45. return 0;
  46. fsleep(delayus);
  47. }
  48. return -ETIMEDOUT;
  49. }
  50. void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
  51. u32 offset, u32 val)
  52. {
  53. mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
  54. }
  55. int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
  56. void __iomem *base, u32 offset, u32 mask,
  57. u32 val)
  58. {
  59. int ret;
  60. u32 tmp;
  61. ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
  62. if (ret)
  63. return ret;
  64. tmp &= ~mask;
  65. tmp |= (val << __ffs(mask));
  66. mhi_write_reg(mhi_cntrl, base, offset, tmp);
  67. return 0;
  68. }
  69. void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
  70. dma_addr_t db_val)
  71. {
  72. mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
  73. mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
  74. }
  75. void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
  76. struct db_cfg *db_cfg,
  77. void __iomem *db_addr,
  78. dma_addr_t db_val)
  79. {
  80. if (db_cfg->db_mode) {
  81. db_cfg->db_val = db_val;
  82. mhi_write_db(mhi_cntrl, db_addr, db_val);
  83. db_cfg->db_mode = 0;
  84. }
  85. }
  86. void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
  87. struct db_cfg *db_cfg,
  88. void __iomem *db_addr,
  89. dma_addr_t db_val)
  90. {
  91. db_cfg->db_val = db_val;
  92. mhi_write_db(mhi_cntrl, db_addr, db_val);
  93. }
  94. void mhi_ring_er_db(struct mhi_event *mhi_event)
  95. {
  96. struct mhi_ring *ring = &mhi_event->ring;
  97. mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
  98. ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
  99. }
  100. void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
  101. {
  102. dma_addr_t db;
  103. struct mhi_ring *ring = &mhi_cmd->ring;
  104. db = ring->iommu_base + (ring->wp - ring->base);
  105. *ring->ctxt_wp = cpu_to_le64(db);
  106. mhi_write_db(mhi_cntrl, ring->db_addr, db);
  107. }
  108. void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
  109. struct mhi_chan *mhi_chan)
  110. {
  111. struct mhi_ring *ring = &mhi_chan->tre_ring;
  112. dma_addr_t db;
  113. db = ring->iommu_base + (ring->wp - ring->base);
  114. /*
  115. * Writes to the new ring element must be visible to the hardware
  116. * before letting h/w know there is new element to fetch.
  117. */
  118. dma_wmb();
  119. *ring->ctxt_wp = cpu_to_le64(db);
  120. mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
  121. ring->db_addr, db);
  122. }
  123. enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
  124. {
  125. u32 exec;
  126. int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
  127. return (ret) ? MHI_EE_MAX : exec;
  128. }
  129. EXPORT_SYMBOL_GPL(mhi_get_exec_env);
  130. enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
  131. {
  132. u32 state;
  133. int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
  134. MHISTATUS_MHISTATE_MASK, &state);
  135. return ret ? MHI_STATE_MAX : state;
  136. }
  137. EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
  138. void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
  139. {
  140. if (mhi_cntrl->reset) {
  141. mhi_cntrl->reset(mhi_cntrl);
  142. return;
  143. }
  144. /* Generic MHI SoC reset */
  145. mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
  146. MHI_SOC_RESET_REQ);
  147. }
  148. EXPORT_SYMBOL_GPL(mhi_soc_reset);
  149. int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
  150. struct mhi_buf_info *buf_info)
  151. {
  152. buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
  153. buf_info->v_addr, buf_info->len,
  154. buf_info->dir);
  155. if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
  156. return -ENOMEM;
  157. return 0;
  158. }
  159. int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
  160. struct mhi_buf_info *buf_info)
  161. {
  162. void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
  163. &buf_info->p_addr, GFP_ATOMIC);
  164. if (!buf)
  165. return -ENOMEM;
  166. if (buf_info->dir == DMA_TO_DEVICE)
  167. memcpy(buf, buf_info->v_addr, buf_info->len);
  168. buf_info->bb_addr = buf;
  169. return 0;
  170. }
  171. void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
  172. struct mhi_buf_info *buf_info)
  173. {
  174. dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
  175. buf_info->dir);
  176. }
  177. void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
  178. struct mhi_buf_info *buf_info)
  179. {
  180. if (buf_info->dir == DMA_FROM_DEVICE)
  181. memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
  182. dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
  183. buf_info->bb_addr, buf_info->p_addr);
  184. }
  185. static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
  186. struct mhi_ring *ring)
  187. {
  188. int nr_el;
  189. if (ring->wp < ring->rp) {
  190. nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
  191. } else {
  192. nr_el = (ring->rp - ring->base) / ring->el_size;
  193. nr_el += ((ring->base + ring->len - ring->wp) /
  194. ring->el_size) - 1;
  195. }
  196. return nr_el;
  197. }
  198. void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
  199. {
  200. return (addr - ring->iommu_base) + ring->base;
  201. }
  202. dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr)
  203. {
  204. return (addr - ring->base) + ring->iommu_base;
  205. }
  206. static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
  207. struct mhi_ring *ring)
  208. {
  209. ring->wp += ring->el_size;
  210. if (ring->wp >= (ring->base + ring->len))
  211. ring->wp = ring->base;
  212. /* smp update */
  213. smp_wmb();
  214. }
  215. static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
  216. struct mhi_ring *ring)
  217. {
  218. ring->rp += ring->el_size;
  219. if (ring->rp >= (ring->base + ring->len))
  220. ring->rp = ring->base;
  221. /* smp update */
  222. smp_wmb();
  223. }
  224. int mhi_destroy_device(struct device *dev, void *data)
  225. {
  226. struct mhi_chan *ul_chan, *dl_chan;
  227. struct mhi_device *mhi_dev;
  228. struct mhi_controller *mhi_cntrl;
  229. enum mhi_ee_type ee = MHI_EE_MAX;
  230. if (dev->bus != &mhi_bus_type)
  231. return 0;
  232. mhi_dev = to_mhi_device(dev);
  233. mhi_cntrl = mhi_dev->mhi_cntrl;
  234. /* Only destroy virtual devices thats attached to bus */
  235. if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
  236. return 0;
  237. ul_chan = mhi_dev->ul_chan;
  238. dl_chan = mhi_dev->dl_chan;
  239. /*
  240. * If execution environment is specified, remove only those devices that
  241. * started in them based on ee_mask for the channels as we move on to a
  242. * different execution environment
  243. */
  244. if (data)
  245. ee = *(enum mhi_ee_type *)data;
  246. /*
  247. * For the suspend and resume case, this function will get called
  248. * without mhi_unregister_controller(). Hence, we need to drop the
  249. * references to mhi_dev created for ul and dl channels. We can
  250. * be sure that there will be no instances of mhi_dev left after
  251. * this.
  252. */
  253. if (ul_chan) {
  254. if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
  255. return 0;
  256. put_device(&ul_chan->mhi_dev->dev);
  257. }
  258. if (dl_chan) {
  259. if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
  260. return 0;
  261. put_device(&dl_chan->mhi_dev->dev);
  262. }
  263. MHI_VERB(dev, "destroy device for chan:%s\n",
  264. mhi_dev->name);
  265. /* Notify the client and remove the device from MHI bus */
  266. device_del(dev);
  267. put_device(dev);
  268. return 0;
  269. }
  270. int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
  271. enum dma_data_direction dir)
  272. {
  273. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  274. struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
  275. mhi_dev->ul_chan : mhi_dev->dl_chan;
  276. struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
  277. return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
  278. }
  279. EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
  280. void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
  281. {
  282. struct mhi_driver *mhi_drv;
  283. if (!mhi_dev->dev.driver)
  284. return;
  285. mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
  286. if (mhi_drv->status_cb)
  287. mhi_drv->status_cb(mhi_dev, cb_reason);
  288. }
  289. EXPORT_SYMBOL_GPL(mhi_notify);
  290. /* Bind MHI channels to MHI devices */
  291. void mhi_create_devices(struct mhi_controller *mhi_cntrl)
  292. {
  293. struct mhi_chan *mhi_chan;
  294. struct mhi_device *mhi_dev;
  295. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  296. int i, ret;
  297. mhi_chan = mhi_cntrl->mhi_chan;
  298. for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
  299. if (!mhi_chan->configured || mhi_chan->mhi_dev ||
  300. !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
  301. continue;
  302. mhi_dev = mhi_alloc_device(mhi_cntrl);
  303. if (IS_ERR(mhi_dev))
  304. return;
  305. mhi_dev->dev_type = MHI_DEVICE_XFER;
  306. switch (mhi_chan->dir) {
  307. case DMA_TO_DEVICE:
  308. mhi_dev->ul_chan = mhi_chan;
  309. mhi_dev->ul_chan_id = mhi_chan->chan;
  310. mhi_dev->ul_event_id = mhi_chan->er_index;
  311. break;
  312. case DMA_NONE:
  313. __attribute__((__fallthrough__));
  314. case DMA_BIDIRECTIONAL:
  315. mhi_dev->ul_chan_id = mhi_chan->chan;
  316. mhi_dev->ul_event_id = mhi_chan->er_index;
  317. __attribute__((__fallthrough__));
  318. case DMA_FROM_DEVICE:
  319. /* We use dl_chan as offload channels */
  320. mhi_dev->dl_chan = mhi_chan;
  321. mhi_dev->dl_chan_id = mhi_chan->chan;
  322. mhi_dev->dl_event_id = mhi_chan->er_index;
  323. break;
  324. default:
  325. MHI_ERR(dev, "Direction not supported\n");
  326. put_device(&mhi_dev->dev);
  327. return;
  328. }
  329. get_device(&mhi_dev->dev);
  330. mhi_chan->mhi_dev = mhi_dev;
  331. /* Check next channel if it matches */
  332. if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
  333. if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
  334. i++;
  335. mhi_chan++;
  336. if (mhi_chan->dir == DMA_TO_DEVICE) {
  337. mhi_dev->ul_chan = mhi_chan;
  338. mhi_dev->ul_chan_id = mhi_chan->chan;
  339. mhi_dev->ul_event_id = mhi_chan->er_index;
  340. } else {
  341. mhi_dev->dl_chan = mhi_chan;
  342. mhi_dev->dl_chan_id = mhi_chan->chan;
  343. mhi_dev->dl_event_id = mhi_chan->er_index;
  344. }
  345. get_device(&mhi_dev->dev);
  346. mhi_chan->mhi_dev = mhi_dev;
  347. }
  348. }
  349. /* Channel name is same for both UL and DL */
  350. mhi_dev->name = mhi_chan->name;
  351. dev_set_name(&mhi_dev->dev, "%s_%s",
  352. dev_name(&mhi_cntrl->mhi_dev->dev),
  353. mhi_dev->name);
  354. /* Init wakeup source if available */
  355. if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
  356. device_init_wakeup(&mhi_dev->dev, true);
  357. ret = device_add(&mhi_dev->dev);
  358. if (ret)
  359. put_device(&mhi_dev->dev);
  360. }
  361. }
  362. void mhi_process_sleeping_events(struct mhi_controller *mhi_cntrl)
  363. {
  364. struct mhi_event *mhi_event;
  365. struct mhi_event_ctxt *er_ctxt;
  366. struct mhi_ring *ev_ring;
  367. int i;
  368. mhi_event = mhi_cntrl->mhi_event;
  369. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  370. if (mhi_event->offload_ev || mhi_event->priority !=
  371. MHI_ER_PRIORITY_HI_SLEEP)
  372. continue;
  373. er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
  374. ev_ring = &mhi_event->ring;
  375. /* Only proceed if event ring has pending events */
  376. if (ev_ring->rp == mhi_to_virtual(ev_ring, er_ctxt->rp))
  377. continue;
  378. queue_work(mhi_cntrl->hiprio_wq, &mhi_event->work);
  379. }
  380. }
  381. irqreturn_t mhi_irq_handler(int irq_number, void *priv)
  382. {
  383. struct mhi_event *mhi_event = priv;
  384. struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
  385. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  386. struct mhi_event_ctxt *er_ctxt;
  387. struct mhi_ring *ev_ring = &mhi_event->ring;
  388. dma_addr_t ptr;
  389. void *dev_rp;
  390. /*
  391. * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
  392. * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
  393. * before handling the IRQs.
  394. */
  395. if (!mhi_cntrl->mhi_ctxt) {
  396. dev_dbg(&mhi_cntrl->mhi_dev->dev,
  397. "mhi_ctxt has been freed\n");
  398. return IRQ_HANDLED;
  399. }
  400. er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
  401. ptr = le64_to_cpu(er_ctxt->rp);
  402. if (!is_valid_ring_ptr(ev_ring, ptr)) {
  403. MHI_ERR(dev,
  404. "Event ring rp points outside of the event ring\n");
  405. return IRQ_HANDLED;
  406. }
  407. dev_rp = mhi_to_virtual(ev_ring, ptr);
  408. /* Only proceed if event ring has pending events */
  409. if (ev_ring->rp == dev_rp)
  410. return IRQ_HANDLED;
  411. /* For client managed event ring, notify pending data */
  412. if (mhi_event->cl_manage) {
  413. struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
  414. struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
  415. if (mhi_dev)
  416. mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
  417. return IRQ_HANDLED;
  418. }
  419. switch (mhi_event->priority) {
  420. case MHI_ER_PRIORITY_HI_NOSLEEP:
  421. tasklet_hi_schedule(&mhi_event->task);
  422. break;
  423. case MHI_ER_PRIORITY_DEFAULT_NOSLEEP:
  424. tasklet_schedule(&mhi_event->task);
  425. break;
  426. case MHI_ER_PRIORITY_HI_SLEEP:
  427. queue_work(mhi_cntrl->hiprio_wq, &mhi_event->work);
  428. break;
  429. default:
  430. MHI_VERB(dev, "skip unknown priority event\n");
  431. break;
  432. }
  433. return IRQ_HANDLED;
  434. }
  435. irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
  436. {
  437. struct mhi_controller *mhi_cntrl = priv;
  438. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  439. enum mhi_state state;
  440. enum mhi_pm_state pm_state = 0;
  441. enum mhi_ee_type ee;
  442. write_lock_irq(&mhi_cntrl->pm_lock);
  443. if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
  444. write_unlock_irq(&mhi_cntrl->pm_lock);
  445. goto exit_intvec;
  446. }
  447. state = mhi_get_mhi_state(mhi_cntrl);
  448. ee = mhi_get_exec_env(mhi_cntrl);
  449. MHI_VERB(dev, "local ee: %s state: %s device ee: %s state: %s\n",
  450. TO_MHI_EXEC_STR(mhi_cntrl->ee),
  451. mhi_state_str(mhi_cntrl->dev_state),
  452. TO_MHI_EXEC_STR(ee), mhi_state_str(state));
  453. if (state == MHI_STATE_SYS_ERR) {
  454. MHI_VERB(dev, "System error detected\n");
  455. pm_state = mhi_tryset_pm_state(mhi_cntrl,
  456. MHI_PM_SYS_ERR_DETECT);
  457. }
  458. write_unlock_irq(&mhi_cntrl->pm_lock);
  459. if (pm_state != MHI_PM_SYS_ERR_DETECT)
  460. goto exit_intvec;
  461. switch (ee) {
  462. case MHI_EE_RDDM:
  463. /* proceed if power down is not already in progress */
  464. if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
  465. /* notify critical clients with early notifications */
  466. mhi_report_error(mhi_cntrl);
  467. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
  468. mhi_cntrl->ee = ee;
  469. wake_up_all(&mhi_cntrl->state_event);
  470. }
  471. break;
  472. case MHI_EE_PBL:
  473. case MHI_EE_EDL:
  474. case MHI_EE_PTHRU:
  475. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
  476. mhi_cntrl->ee = ee;
  477. wake_up_all(&mhi_cntrl->state_event);
  478. mhi_pm_sys_err_handler(mhi_cntrl);
  479. break;
  480. default:
  481. wake_up_all(&mhi_cntrl->state_event);
  482. mhi_pm_sys_err_handler(mhi_cntrl);
  483. break;
  484. }
  485. exit_intvec:
  486. return IRQ_HANDLED;
  487. }
  488. irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
  489. {
  490. struct mhi_controller *mhi_cntrl = dev;
  491. /* Wake up events waiting for state change */
  492. wake_up_all(&mhi_cntrl->state_event);
  493. return IRQ_WAKE_THREAD;
  494. }
  495. static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
  496. struct mhi_ring *ring)
  497. {
  498. /* Update the WP */
  499. ring->wp += ring->el_size;
  500. if (ring->wp >= (ring->base + ring->len))
  501. ring->wp = ring->base;
  502. *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
  503. /* Update the RP */
  504. ring->rp += ring->el_size;
  505. if (ring->rp >= (ring->base + ring->len))
  506. ring->rp = ring->base;
  507. /* Update to all cores */
  508. smp_wmb();
  509. }
  510. static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
  511. struct mhi_ring_element *event,
  512. struct mhi_chan *mhi_chan)
  513. {
  514. struct mhi_ring *buf_ring, *tre_ring;
  515. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  516. struct mhi_result result;
  517. unsigned long flags = 0;
  518. u32 ev_code;
  519. ev_code = MHI_TRE_GET_EV_CODE(event);
  520. buf_ring = &mhi_chan->buf_ring;
  521. tre_ring = &mhi_chan->tre_ring;
  522. result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
  523. -EOVERFLOW : 0;
  524. /*
  525. * If it's a DB Event then we need to grab the lock
  526. * with preemption disabled and as a write because we
  527. * have to update db register and there are chances that
  528. * another thread could be doing the same.
  529. */
  530. if (ev_code >= MHI_EV_CC_OOB)
  531. write_lock_irqsave(&mhi_chan->lock, flags);
  532. else
  533. read_lock_bh(&mhi_chan->lock);
  534. if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
  535. goto end_process_tx_event;
  536. switch (ev_code) {
  537. case MHI_EV_CC_OVERFLOW:
  538. case MHI_EV_CC_EOB:
  539. case MHI_EV_CC_EOT:
  540. {
  541. dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
  542. struct mhi_ring_element *local_rp, *ev_tre;
  543. void *dev_rp;
  544. struct mhi_buf_info *buf_info;
  545. u16 xfer_len;
  546. if (!is_valid_ring_ptr(tre_ring, ptr)) {
  547. MHI_ERR(dev,
  548. "Event element points outside of the tre ring\n");
  549. break;
  550. }
  551. /* Get the TRB this event points to */
  552. ev_tre = mhi_to_virtual(tre_ring, ptr);
  553. dev_rp = ev_tre + 1;
  554. if (dev_rp >= (tre_ring->base + tre_ring->len))
  555. dev_rp = tre_ring->base;
  556. result.dir = mhi_chan->dir;
  557. local_rp = tre_ring->rp;
  558. while (local_rp != dev_rp) {
  559. buf_info = buf_ring->rp;
  560. /* If it's the last TRE, get length from the event */
  561. if (local_rp == ev_tre)
  562. xfer_len = MHI_TRE_GET_EV_LEN(event);
  563. else
  564. xfer_len = buf_info->len;
  565. /* Unmap if it's not pre-mapped by client */
  566. if (likely(!buf_info->pre_mapped))
  567. mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
  568. result.buf_addr = buf_info->cb_buf;
  569. /* truncate to buf len if xfer_len is larger */
  570. result.bytes_xferd =
  571. min_t(u16, xfer_len, buf_info->len);
  572. mhi_del_ring_element(mhi_cntrl, buf_ring);
  573. mhi_del_ring_element(mhi_cntrl, tre_ring);
  574. local_rp = tre_ring->rp;
  575. read_unlock_bh(&mhi_chan->lock);
  576. /* notify client */
  577. mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
  578. if (mhi_chan->dir == DMA_TO_DEVICE)
  579. atomic_dec(&mhi_cntrl->pending_pkts);
  580. /*
  581. * Recycle the buffer if buffer is pre-allocated,
  582. * if there is an error, not much we can do apart
  583. * from dropping the packet
  584. */
  585. if (mhi_chan->pre_alloc) {
  586. if (mhi_queue_buf(mhi_chan->mhi_dev,
  587. mhi_chan->dir,
  588. buf_info->cb_buf,
  589. buf_info->len, MHI_EOT)) {
  590. MHI_ERR(dev,
  591. "Error recycling buffer for chan:%d\n",
  592. mhi_chan->chan);
  593. kfree(buf_info->cb_buf);
  594. }
  595. }
  596. read_lock_bh(&mhi_chan->lock);
  597. }
  598. break;
  599. } /* CC_EOT */
  600. case MHI_EV_CC_OOB:
  601. case MHI_EV_CC_DB_MODE:
  602. {
  603. unsigned long pm_lock_flags;
  604. mhi_chan->db_cfg.db_mode = 1;
  605. read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
  606. if (tre_ring->wp != tre_ring->rp &&
  607. MHI_DB_ACCESS_VALID(mhi_cntrl)) {
  608. mhi_ring_chan_db(mhi_cntrl, mhi_chan);
  609. }
  610. read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
  611. break;
  612. }
  613. case MHI_EV_CC_BAD_TRE:
  614. default:
  615. panic("Unknown event 0x%x\n", ev_code);
  616. break;
  617. } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
  618. end_process_tx_event:
  619. if (ev_code >= MHI_EV_CC_OOB)
  620. write_unlock_irqrestore(&mhi_chan->lock, flags);
  621. else
  622. read_unlock_bh(&mhi_chan->lock);
  623. return 0;
  624. }
  625. static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
  626. struct mhi_ring_element *event,
  627. struct mhi_chan *mhi_chan)
  628. {
  629. struct mhi_ring *buf_ring, *tre_ring;
  630. struct mhi_buf_info *buf_info;
  631. struct mhi_result result;
  632. int ev_code;
  633. u32 cookie; /* offset to local descriptor */
  634. u16 xfer_len;
  635. buf_ring = &mhi_chan->buf_ring;
  636. tre_ring = &mhi_chan->tre_ring;
  637. ev_code = MHI_TRE_GET_EV_CODE(event);
  638. cookie = MHI_TRE_GET_EV_COOKIE(event);
  639. xfer_len = MHI_TRE_GET_EV_LEN(event);
  640. /* Received out of bound cookie */
  641. WARN_ON(cookie >= buf_ring->len);
  642. buf_info = buf_ring->base + cookie;
  643. result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
  644. -EOVERFLOW : 0;
  645. /* truncate to buf len if xfer_len is larger */
  646. result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
  647. result.buf_addr = buf_info->cb_buf;
  648. result.dir = mhi_chan->dir;
  649. read_lock_bh(&mhi_chan->lock);
  650. if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
  651. goto end_process_rsc_event;
  652. WARN_ON(!buf_info->used);
  653. /* notify the client */
  654. mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
  655. /*
  656. * Note: We're arbitrarily incrementing RP even though, completion
  657. * packet we processed might not be the same one, reason we can do this
  658. * is because device guaranteed to cache descriptors in order it
  659. * receive, so even though completion event is different we can re-use
  660. * all descriptors in between.
  661. * Example:
  662. * Transfer Ring has descriptors: A, B, C, D
  663. * Last descriptor host queue is D (WP) and first descriptor
  664. * host queue is A (RP).
  665. * The completion event we just serviced is descriptor C.
  666. * Then we can safely queue descriptors to replace A, B, and C
  667. * even though host did not receive any completions.
  668. */
  669. mhi_del_ring_element(mhi_cntrl, tre_ring);
  670. buf_info->used = false;
  671. end_process_rsc_event:
  672. read_unlock_bh(&mhi_chan->lock);
  673. return 0;
  674. }
  675. static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
  676. struct mhi_ring_element *tre)
  677. {
  678. dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
  679. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  680. struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
  681. struct mhi_ring *mhi_ring = &cmd_ring->ring;
  682. struct mhi_ring_element *cmd_pkt;
  683. struct mhi_chan *mhi_chan;
  684. u32 chan;
  685. if (!is_valid_ring_ptr(mhi_ring, ptr)) {
  686. MHI_ERR(dev,
  687. "Event element points outside of the cmd ring\n");
  688. return;
  689. }
  690. cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
  691. if (cmd_pkt != mhi_ring->rp)
  692. panic("Out of order cmd completion: 0x%llx. Expected: 0x%llx\n",
  693. cmd_pkt, mhi_ring->rp);
  694. if (MHI_TRE_GET_CMD_TYPE(cmd_pkt) == MHI_CMD_SFR_CFG) {
  695. mhi_misc_cmd_completion(mhi_cntrl, MHI_CMD_SFR_CFG,
  696. MHI_TRE_GET_EV_CODE(tre));
  697. goto exit_cmd_completion;
  698. }
  699. chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
  700. if (chan < mhi_cntrl->max_chan &&
  701. mhi_cntrl->mhi_chan[chan].configured) {
  702. mhi_chan = &mhi_cntrl->mhi_chan[chan];
  703. write_lock_bh(&mhi_chan->lock);
  704. mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
  705. complete(&mhi_chan->completion);
  706. write_unlock_bh(&mhi_chan->lock);
  707. } else {
  708. MHI_ERR(dev,
  709. "Completion packet for invalid channel ID: %d\n", chan);
  710. }
  711. exit_cmd_completion:
  712. mhi_del_ring_element(mhi_cntrl, mhi_ring);
  713. }
  714. int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
  715. struct mhi_event *mhi_event,
  716. u32 event_quota)
  717. {
  718. struct mhi_ring_element *dev_rp, *local_rp;
  719. struct mhi_ring *ev_ring = &mhi_event->ring;
  720. struct mhi_event_ctxt *er_ctxt =
  721. &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
  722. struct mhi_chan *mhi_chan;
  723. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  724. u32 chan;
  725. int count = 0;
  726. dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
  727. /*
  728. * This is a quick check to avoid unnecessary event processing
  729. * in case MHI is already in error state, but it's still possible
  730. * to transition to error state while processing events
  731. */
  732. if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
  733. return -EIO;
  734. if (!is_valid_ring_ptr(ev_ring, ptr)) {
  735. MHI_ERR(dev,
  736. "Event ring rp points outside of the event ring\n");
  737. return -EIO;
  738. }
  739. dev_rp = mhi_to_virtual(ev_ring, ptr);
  740. local_rp = ev_ring->rp;
  741. while (dev_rp != local_rp) {
  742. enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
  743. MHI_VERB(dev, "RP:0x%llx Processing Event:0x%llx 0x%08x 0x%08x\n",
  744. (u64)mhi_to_physical(ev_ring, local_rp),
  745. local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
  746. switch (type) {
  747. case MHI_PKT_TYPE_BW_REQ_EVENT:
  748. {
  749. struct mhi_link_info *link_info;
  750. link_info = &mhi_cntrl->mhi_link_info;
  751. write_lock_irq(&mhi_cntrl->pm_lock);
  752. link_info->target_link_speed =
  753. MHI_TRE_GET_EV_LINKSPEED(local_rp);
  754. link_info->target_link_width =
  755. MHI_TRE_GET_EV_LINKWIDTH(local_rp);
  756. write_unlock_irq(&mhi_cntrl->pm_lock);
  757. MHI_VERB(dev, "Received BW_REQ event\n");
  758. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
  759. break;
  760. }
  761. case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
  762. {
  763. enum mhi_state new_state;
  764. new_state = MHI_TRE_GET_EV_STATE(local_rp);
  765. MHI_VERB(dev, "State change event to state: %s\n",
  766. mhi_state_str(new_state));
  767. switch (new_state) {
  768. case MHI_STATE_M0:
  769. mhi_pm_m0_transition(mhi_cntrl);
  770. break;
  771. case MHI_STATE_M1:
  772. mhi_pm_m1_transition(mhi_cntrl);
  773. break;
  774. case MHI_STATE_M3:
  775. mhi_pm_m3_transition(mhi_cntrl);
  776. break;
  777. case MHI_STATE_SYS_ERR:
  778. {
  779. enum mhi_pm_state pm_state;
  780. MHI_VERB(dev, "System error detected\n");
  781. write_lock_irq(&mhi_cntrl->pm_lock);
  782. pm_state = mhi_tryset_pm_state(mhi_cntrl,
  783. MHI_PM_SYS_ERR_DETECT);
  784. write_unlock_irq(&mhi_cntrl->pm_lock);
  785. if (pm_state == MHI_PM_SYS_ERR_DETECT)
  786. mhi_pm_sys_err_handler(mhi_cntrl);
  787. break;
  788. }
  789. default:
  790. MHI_ERR(dev, "Invalid state: %s\n",
  791. mhi_state_str(new_state));
  792. }
  793. break;
  794. }
  795. case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
  796. mhi_process_cmd_completion(mhi_cntrl, local_rp);
  797. break;
  798. case MHI_PKT_TYPE_EE_EVENT:
  799. {
  800. enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
  801. enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
  802. MHI_VERB(dev, "Received EE event: %s\n",
  803. TO_MHI_EXEC_STR(event));
  804. switch (event) {
  805. case MHI_EE_SBL:
  806. st = DEV_ST_TRANSITION_SBL;
  807. break;
  808. case MHI_EE_WFW:
  809. case MHI_EE_AMSS:
  810. st = DEV_ST_TRANSITION_MISSION_MODE;
  811. break;
  812. case MHI_EE_FP:
  813. st = DEV_ST_TRANSITION_FP;
  814. break;
  815. case MHI_EE_RDDM:
  816. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
  817. write_lock_irq(&mhi_cntrl->pm_lock);
  818. mhi_cntrl->ee = event;
  819. write_unlock_irq(&mhi_cntrl->pm_lock);
  820. wake_up_all(&mhi_cntrl->state_event);
  821. break;
  822. default:
  823. MHI_ERR(dev,
  824. "Unhandled EE event: 0x%x\n", type);
  825. }
  826. if (st != DEV_ST_TRANSITION_MAX)
  827. mhi_queue_state_transition(mhi_cntrl, st);
  828. break;
  829. }
  830. case MHI_PKT_TYPE_TX_EVENT:
  831. chan = MHI_TRE_GET_EV_CHID(local_rp);
  832. WARN_ON(chan >= mhi_cntrl->max_chan);
  833. /*
  834. * Only process the event ring elements whose channel
  835. * ID is within the maximum supported range.
  836. */
  837. if (chan < mhi_cntrl->max_chan) {
  838. mhi_chan = &mhi_cntrl->mhi_chan[chan];
  839. if (!mhi_chan->configured)
  840. break;
  841. parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
  842. event_quota--;
  843. }
  844. break;
  845. default:
  846. MHI_ERR(dev, "Unhandled event type: %d\n", type);
  847. break;
  848. }
  849. mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
  850. local_rp = ev_ring->rp;
  851. ptr = le64_to_cpu(er_ctxt->rp);
  852. if (!is_valid_ring_ptr(ev_ring, ptr)) {
  853. MHI_ERR(dev,
  854. "Event ring rp points outside of the event ring\n");
  855. return -EIO;
  856. }
  857. dev_rp = mhi_to_virtual(ev_ring, ptr);
  858. count++;
  859. }
  860. read_lock_bh(&mhi_cntrl->pm_lock);
  861. if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
  862. mhi_ring_er_db(mhi_event);
  863. read_unlock_bh(&mhi_cntrl->pm_lock);
  864. return count;
  865. }
  866. int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
  867. struct mhi_event *mhi_event,
  868. u32 event_quota)
  869. {
  870. struct mhi_ring_element *dev_rp, *local_rp;
  871. struct mhi_ring *ev_ring = &mhi_event->ring;
  872. struct mhi_event_ctxt *er_ctxt =
  873. &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
  874. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  875. int count = 0;
  876. u32 chan;
  877. struct mhi_chan *mhi_chan;
  878. dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
  879. if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
  880. return -EIO;
  881. if (!is_valid_ring_ptr(ev_ring, ptr)) {
  882. MHI_ERR(dev,
  883. "Event ring rp points outside of the event ring\n");
  884. return -EIO;
  885. }
  886. dev_rp = mhi_to_virtual(ev_ring, ptr);
  887. local_rp = ev_ring->rp;
  888. while (dev_rp != local_rp && event_quota > 0) {
  889. enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
  890. MHI_VERB(dev, "Processing Event:0x%llx 0x%08x 0x%08x\n",
  891. local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
  892. chan = MHI_TRE_GET_EV_CHID(local_rp);
  893. WARN_ON(chan >= mhi_cntrl->max_chan);
  894. /*
  895. * Only process the event ring elements whose channel
  896. * ID is within the maximum supported range.
  897. */
  898. if (chan < mhi_cntrl->max_chan &&
  899. mhi_cntrl->mhi_chan[chan].configured) {
  900. mhi_chan = &mhi_cntrl->mhi_chan[chan];
  901. if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
  902. parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
  903. event_quota--;
  904. } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
  905. parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
  906. event_quota--;
  907. }
  908. }
  909. mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
  910. local_rp = ev_ring->rp;
  911. ptr = le64_to_cpu(er_ctxt->rp);
  912. if (!is_valid_ring_ptr(ev_ring, ptr)) {
  913. MHI_ERR(dev,
  914. "Event ring rp points outside of the event ring\n");
  915. return -EIO;
  916. }
  917. dev_rp = mhi_to_virtual(ev_ring, ptr);
  918. count++;
  919. }
  920. read_lock_bh(&mhi_cntrl->pm_lock);
  921. if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
  922. mhi_ring_er_db(mhi_event);
  923. read_unlock_bh(&mhi_cntrl->pm_lock);
  924. return count;
  925. }
  926. void mhi_ev_task(unsigned long data)
  927. {
  928. struct mhi_event *mhi_event = (struct mhi_event *)data;
  929. struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
  930. /* process all pending events */
  931. spin_lock_bh(&mhi_event->lock);
  932. mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
  933. spin_unlock_bh(&mhi_event->lock);
  934. }
  935. void mhi_ctrl_ev_task(unsigned long data)
  936. {
  937. struct mhi_event *mhi_event = (struct mhi_event *)data;
  938. struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
  939. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  940. enum mhi_state state;
  941. enum mhi_pm_state pm_state = 0;
  942. int ret;
  943. /*
  944. * We can check PM state w/o a lock here because there is no way
  945. * PM state can change from reg access valid to no access while this
  946. * thread being executed.
  947. */
  948. if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
  949. /*
  950. * We may have a pending event but not allowed to
  951. * process it since we are probably in a suspended state,
  952. * so trigger a resume.
  953. */
  954. mhi_trigger_resume(mhi_cntrl);
  955. return;
  956. }
  957. /* Process ctrl events */
  958. ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
  959. /*
  960. * We received an IRQ but no events to process, maybe device went to
  961. * SYS_ERR state? Check the state to confirm.
  962. */
  963. if (!ret) {
  964. write_lock_irq(&mhi_cntrl->pm_lock);
  965. state = mhi_get_mhi_state(mhi_cntrl);
  966. if (state == MHI_STATE_SYS_ERR) {
  967. MHI_VERB(dev, "System error detected\n");
  968. pm_state = mhi_tryset_pm_state(mhi_cntrl,
  969. MHI_PM_SYS_ERR_DETECT);
  970. }
  971. write_unlock_irq(&mhi_cntrl->pm_lock);
  972. if (pm_state == MHI_PM_SYS_ERR_DETECT)
  973. mhi_pm_sys_err_handler(mhi_cntrl);
  974. }
  975. }
  976. void mhi_process_ev_work(struct work_struct *work)
  977. {
  978. struct mhi_event *mhi_event = container_of(work, struct mhi_event,
  979. work);
  980. struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
  981. struct device *dev = mhi_cntrl->cntrl_dev;
  982. MHI_VERB(dev, "Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
  983. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  984. mhi_state_str(mhi_cntrl->dev_state),
  985. TO_MHI_EXEC_STR(mhi_cntrl->ee));
  986. if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
  987. return;
  988. mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
  989. }
  990. static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
  991. struct mhi_ring *ring)
  992. {
  993. void *tmp = ring->wp + ring->el_size;
  994. if (tmp >= (ring->base + ring->len))
  995. tmp = ring->base;
  996. return (tmp == ring->rp);
  997. }
  998. static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
  999. enum dma_data_direction dir, enum mhi_flags mflags)
  1000. {
  1001. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1002. struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
  1003. mhi_dev->dl_chan;
  1004. struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
  1005. unsigned long flags;
  1006. int ret;
  1007. if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
  1008. return -EIO;
  1009. ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
  1010. if (unlikely(ret))
  1011. return -EAGAIN;
  1012. ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
  1013. if (unlikely(ret))
  1014. return ret;
  1015. /* Let controller mark last busy for runtime PM framework if needed */
  1016. if (mhi_cntrl->runtime_last_busy)
  1017. mhi_cntrl->runtime_last_busy(mhi_cntrl);
  1018. read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
  1019. /* trigger M3 exit if necessary */
  1020. if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
  1021. mhi_trigger_resume(mhi_cntrl);
  1022. /* Assert dev_wake (to exit/prevent M1/M2)*/
  1023. mhi_cntrl->wake_toggle(mhi_cntrl);
  1024. if (mhi_chan->dir == DMA_TO_DEVICE)
  1025. atomic_inc(&mhi_cntrl->pending_pkts);
  1026. if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
  1027. mhi_ring_chan_db(mhi_cntrl, mhi_chan);
  1028. read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
  1029. return ret;
  1030. }
  1031. int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
  1032. struct sk_buff *skb, size_t len, enum mhi_flags mflags)
  1033. {
  1034. struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
  1035. mhi_dev->dl_chan;
  1036. struct mhi_buf_info buf_info = { };
  1037. buf_info.v_addr = skb->data;
  1038. buf_info.cb_buf = skb;
  1039. buf_info.len = len;
  1040. if (unlikely(mhi_chan->pre_alloc))
  1041. return -EINVAL;
  1042. return mhi_queue(mhi_dev, &buf_info, dir, mflags);
  1043. }
  1044. EXPORT_SYMBOL_GPL(mhi_queue_skb);
  1045. int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
  1046. struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
  1047. {
  1048. struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
  1049. mhi_dev->dl_chan;
  1050. struct mhi_buf_info buf_info = { };
  1051. buf_info.p_addr = mhi_buf->dma_addr;
  1052. buf_info.cb_buf = mhi_buf;
  1053. buf_info.pre_mapped = true;
  1054. buf_info.len = len;
  1055. if (unlikely(mhi_chan->pre_alloc))
  1056. return -EINVAL;
  1057. return mhi_queue(mhi_dev, &buf_info, dir, mflags);
  1058. }
  1059. EXPORT_SYMBOL_GPL(mhi_queue_dma);
  1060. int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
  1061. struct mhi_buf_info *info, enum mhi_flags flags)
  1062. {
  1063. struct device *dev = &mhi_chan->mhi_dev->dev;
  1064. struct mhi_ring *buf_ring, *tre_ring;
  1065. struct mhi_ring_element *mhi_tre;
  1066. struct mhi_buf_info *buf_info;
  1067. int eot, eob, chain, bei;
  1068. int ret;
  1069. /* Protect accesses for reading and incrementing WP */
  1070. write_lock_bh(&mhi_chan->lock);
  1071. buf_ring = &mhi_chan->buf_ring;
  1072. tre_ring = &mhi_chan->tre_ring;
  1073. buf_info = buf_ring->wp;
  1074. WARN_ON(buf_info->used);
  1075. buf_info->pre_mapped = info->pre_mapped;
  1076. if (info->pre_mapped)
  1077. buf_info->p_addr = info->p_addr;
  1078. else
  1079. buf_info->v_addr = info->v_addr;
  1080. buf_info->cb_buf = info->cb_buf;
  1081. buf_info->wp = tre_ring->wp;
  1082. buf_info->dir = mhi_chan->dir;
  1083. buf_info->len = info->len;
  1084. if (!info->pre_mapped) {
  1085. ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
  1086. if (ret) {
  1087. write_unlock_bh(&mhi_chan->lock);
  1088. return ret;
  1089. }
  1090. }
  1091. eob = !!(flags & MHI_EOB);
  1092. eot = !!(flags & MHI_EOT);
  1093. chain = !!(flags & MHI_CHAIN);
  1094. bei = !!(mhi_chan->intmod);
  1095. mhi_tre = tre_ring->wp;
  1096. mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
  1097. mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
  1098. mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
  1099. MHI_VERB_EXTRA(dev, "Chan: %d WP: 0x%llx TRE: 0x%llx 0x%08x 0x%08x\n",
  1100. mhi_chan->chan, (u64)mhi_to_physical(tre_ring, mhi_tre),
  1101. mhi_tre->ptr, mhi_tre->dword[0], mhi_tre->dword[1]);
  1102. /* increment WP */
  1103. mhi_add_ring_element(mhi_cntrl, tre_ring);
  1104. mhi_add_ring_element(mhi_cntrl, buf_ring);
  1105. write_unlock_bh(&mhi_chan->lock);
  1106. return 0;
  1107. }
  1108. int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
  1109. void *buf, size_t len, enum mhi_flags mflags)
  1110. {
  1111. struct mhi_buf_info buf_info = { };
  1112. buf_info.v_addr = buf;
  1113. buf_info.cb_buf = buf;
  1114. buf_info.len = len;
  1115. return mhi_queue(mhi_dev, &buf_info, dir, mflags);
  1116. }
  1117. EXPORT_SYMBOL_GPL(mhi_queue_buf);
  1118. bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
  1119. {
  1120. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1121. struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
  1122. mhi_dev->ul_chan : mhi_dev->dl_chan;
  1123. struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
  1124. return mhi_is_ring_full(mhi_cntrl, tre_ring);
  1125. }
  1126. EXPORT_SYMBOL_GPL(mhi_queue_is_full);
  1127. int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
  1128. struct mhi_chan *mhi_chan,
  1129. enum mhi_cmd_type cmd)
  1130. {
  1131. struct mhi_ring_element *cmd_tre = NULL;
  1132. struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
  1133. struct mhi_ring *ring = &mhi_cmd->ring;
  1134. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1135. int chan = 0;
  1136. if (mhi_chan)
  1137. chan = mhi_chan->chan;
  1138. spin_lock_bh(&mhi_cmd->lock);
  1139. if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
  1140. spin_unlock_bh(&mhi_cmd->lock);
  1141. return -ENOMEM;
  1142. }
  1143. /* prepare the cmd tre */
  1144. cmd_tre = ring->wp;
  1145. switch (cmd) {
  1146. case MHI_CMD_RESET_CHAN:
  1147. cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
  1148. cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
  1149. cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
  1150. break;
  1151. case MHI_CMD_STOP_CHAN:
  1152. cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
  1153. cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
  1154. cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
  1155. break;
  1156. case MHI_CMD_START_CHAN:
  1157. cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
  1158. cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
  1159. cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
  1160. break;
  1161. case MHI_CMD_SFR_CFG:
  1162. mhi_misc_cmd_configure(mhi_cntrl, MHI_CMD_SFR_CFG,
  1163. &cmd_tre->ptr, &cmd_tre->dword[0],
  1164. &cmd_tre->dword[1]);
  1165. break;
  1166. default:
  1167. MHI_ERR(dev, "Command not supported\n");
  1168. break;
  1169. }
  1170. /* queue to hardware */
  1171. mhi_add_ring_element(mhi_cntrl, ring);
  1172. read_lock_bh(&mhi_cntrl->pm_lock);
  1173. if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
  1174. mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
  1175. read_unlock_bh(&mhi_cntrl->pm_lock);
  1176. spin_unlock_bh(&mhi_cmd->lock);
  1177. return 0;
  1178. }
  1179. static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
  1180. struct mhi_chan *mhi_chan,
  1181. enum mhi_ch_state_type to_state)
  1182. {
  1183. struct device *dev = &mhi_chan->mhi_dev->dev;
  1184. enum mhi_cmd_type cmd = MHI_CMD_NOP;
  1185. int ret;
  1186. MHI_VERB(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
  1187. TO_CH_STATE_TYPE_STR(to_state));
  1188. switch (to_state) {
  1189. case MHI_CH_STATE_TYPE_RESET:
  1190. write_lock_irq(&mhi_chan->lock);
  1191. if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
  1192. mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
  1193. mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
  1194. write_unlock_irq(&mhi_chan->lock);
  1195. return -EINVAL;
  1196. }
  1197. mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
  1198. write_unlock_irq(&mhi_chan->lock);
  1199. cmd = MHI_CMD_RESET_CHAN;
  1200. break;
  1201. case MHI_CH_STATE_TYPE_STOP:
  1202. if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
  1203. return -EINVAL;
  1204. cmd = MHI_CMD_STOP_CHAN;
  1205. break;
  1206. case MHI_CH_STATE_TYPE_START:
  1207. if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
  1208. mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
  1209. return -EINVAL;
  1210. cmd = MHI_CMD_START_CHAN;
  1211. break;
  1212. default:
  1213. MHI_ERR(dev, "%d: Channel state update to %s not allowed\n",
  1214. mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
  1215. return -EINVAL;
  1216. }
  1217. /* bring host and device out of suspended states */
  1218. ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
  1219. if (ret)
  1220. return ret;
  1221. mhi_cntrl->runtime_get(mhi_cntrl);
  1222. reinit_completion(&mhi_chan->completion);
  1223. ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
  1224. if (ret) {
  1225. MHI_ERR(dev, "%d: Failed to send %s channel command\n",
  1226. mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
  1227. goto exit_channel_update;
  1228. }
  1229. ret = wait_for_completion_timeout(&mhi_chan->completion,
  1230. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  1231. if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
  1232. MHI_ERR(dev,
  1233. "%d: Failed to receive %s channel command completion\n",
  1234. mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
  1235. ret = -EIO;
  1236. goto exit_channel_update;
  1237. }
  1238. ret = 0;
  1239. if (to_state != MHI_CH_STATE_TYPE_RESET) {
  1240. write_lock_irq(&mhi_chan->lock);
  1241. mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
  1242. MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
  1243. write_unlock_irq(&mhi_chan->lock);
  1244. }
  1245. MHI_VERB(dev, "%d: Channel state change to %s successful\n",
  1246. mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
  1247. exit_channel_update:
  1248. mhi_cntrl->runtime_put(mhi_cntrl);
  1249. mhi_device_put(mhi_cntrl->mhi_dev);
  1250. return ret;
  1251. }
  1252. static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
  1253. struct mhi_chan *mhi_chan)
  1254. {
  1255. int ret;
  1256. struct device *dev = &mhi_chan->mhi_dev->dev;
  1257. mutex_lock(&mhi_chan->mutex);
  1258. if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
  1259. MHI_VERB(dev, "Current EE: %s Required EE Mask: 0x%x\n",
  1260. TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
  1261. goto exit_unprepare_channel;
  1262. }
  1263. /* no more processing events for this channel */
  1264. ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
  1265. MHI_CH_STATE_TYPE_RESET);
  1266. if (ret)
  1267. MHI_ERR(dev, "%d: Failed to reset channel, still resetting\n",
  1268. mhi_chan->chan);
  1269. exit_unprepare_channel:
  1270. write_lock_irq(&mhi_chan->lock);
  1271. mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
  1272. write_unlock_irq(&mhi_chan->lock);
  1273. if (!mhi_chan->offload_ch) {
  1274. mhi_reset_chan(mhi_cntrl, mhi_chan);
  1275. mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
  1276. }
  1277. MHI_VERB(dev, "%d: successfully reset\n", mhi_chan->chan);
  1278. mutex_unlock(&mhi_chan->mutex);
  1279. }
  1280. int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
  1281. struct mhi_chan *mhi_chan, unsigned int flags)
  1282. {
  1283. int ret = 0;
  1284. struct device *dev = &mhi_chan->mhi_dev->dev;
  1285. if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
  1286. MHI_ERR(dev, "Current EE: %s Required EE Mask: 0x%x\n",
  1287. TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
  1288. return -ENOTCONN;
  1289. }
  1290. mutex_lock(&mhi_chan->mutex);
  1291. /* Check of client manages channel context for offload channels */
  1292. if (!mhi_chan->offload_ch) {
  1293. ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
  1294. if (ret)
  1295. goto error_init_chan;
  1296. }
  1297. ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
  1298. MHI_CH_STATE_TYPE_START);
  1299. if (ret)
  1300. goto error_pm_state;
  1301. if (mhi_chan->dir == DMA_FROM_DEVICE)
  1302. mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
  1303. /* Pre-allocate buffer for xfer ring */
  1304. if (mhi_chan->pre_alloc) {
  1305. int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
  1306. &mhi_chan->tre_ring);
  1307. size_t len = mhi_cntrl->buffer_len;
  1308. while (nr_el--) {
  1309. void *buf;
  1310. struct mhi_buf_info info = { };
  1311. buf = kmalloc(len, GFP_KERNEL);
  1312. if (!buf) {
  1313. ret = -ENOMEM;
  1314. goto error_pre_alloc;
  1315. }
  1316. /* Prepare transfer descriptors */
  1317. info.v_addr = buf;
  1318. info.cb_buf = buf;
  1319. info.len = len;
  1320. ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
  1321. if (ret) {
  1322. kfree(buf);
  1323. goto error_pre_alloc;
  1324. }
  1325. }
  1326. read_lock_bh(&mhi_cntrl->pm_lock);
  1327. if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
  1328. read_lock_irq(&mhi_chan->lock);
  1329. mhi_ring_chan_db(mhi_cntrl, mhi_chan);
  1330. read_unlock_irq(&mhi_chan->lock);
  1331. }
  1332. read_unlock_bh(&mhi_cntrl->pm_lock);
  1333. }
  1334. mutex_unlock(&mhi_chan->mutex);
  1335. return 0;
  1336. error_pm_state:
  1337. if (!mhi_chan->offload_ch)
  1338. mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
  1339. error_init_chan:
  1340. mutex_unlock(&mhi_chan->mutex);
  1341. return ret;
  1342. error_pre_alloc:
  1343. mutex_unlock(&mhi_chan->mutex);
  1344. mhi_unprepare_channel(mhi_cntrl, mhi_chan);
  1345. return ret;
  1346. }
  1347. static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
  1348. struct mhi_event *mhi_event,
  1349. struct mhi_event_ctxt *er_ctxt,
  1350. int chan)
  1351. {
  1352. struct mhi_ring_element *dev_rp, *local_rp;
  1353. struct mhi_ring *ev_ring;
  1354. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1355. unsigned long flags;
  1356. dma_addr_t ptr;
  1357. MHI_VERB(dev, "Marking all events for chan: %d as stale\n", chan);
  1358. ev_ring = &mhi_event->ring;
  1359. /* mark all stale events related to channel as STALE event */
  1360. spin_lock_irqsave(&mhi_event->lock, flags);
  1361. ptr = le64_to_cpu(er_ctxt->rp);
  1362. if (!is_valid_ring_ptr(ev_ring, ptr)) {
  1363. MHI_ERR(dev,
  1364. "Event ring rp points outside of the event ring\n");
  1365. dev_rp = ev_ring->rp;
  1366. } else {
  1367. dev_rp = mhi_to_virtual(ev_ring, ptr);
  1368. }
  1369. local_rp = ev_ring->rp;
  1370. while (dev_rp != local_rp) {
  1371. if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
  1372. chan == MHI_TRE_GET_EV_CHID(local_rp))
  1373. local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
  1374. MHI_PKT_TYPE_STALE_EVENT);
  1375. local_rp++;
  1376. if (local_rp == (ev_ring->base + ev_ring->len))
  1377. local_rp = ev_ring->base;
  1378. }
  1379. MHI_VERB(dev, "Finished marking events as stale events\n");
  1380. spin_unlock_irqrestore(&mhi_event->lock, flags);
  1381. }
  1382. static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
  1383. struct mhi_chan *mhi_chan)
  1384. {
  1385. struct mhi_ring *buf_ring, *tre_ring;
  1386. struct mhi_result result;
  1387. /* Reset any pending buffers */
  1388. buf_ring = &mhi_chan->buf_ring;
  1389. tre_ring = &mhi_chan->tre_ring;
  1390. result.transaction_status = -ENOTCONN;
  1391. result.bytes_xferd = 0;
  1392. while (tre_ring->rp != tre_ring->wp) {
  1393. struct mhi_buf_info *buf_info = buf_ring->rp;
  1394. if (mhi_chan->dir == DMA_TO_DEVICE)
  1395. atomic_dec(&mhi_cntrl->pending_pkts);
  1396. if (!buf_info->pre_mapped)
  1397. mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
  1398. mhi_del_ring_element(mhi_cntrl, buf_ring);
  1399. mhi_del_ring_element(mhi_cntrl, tre_ring);
  1400. if (mhi_chan->pre_alloc) {
  1401. kfree(buf_info->cb_buf);
  1402. } else {
  1403. result.buf_addr = buf_info->cb_buf;
  1404. mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
  1405. }
  1406. }
  1407. }
  1408. void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
  1409. {
  1410. struct mhi_event *mhi_event;
  1411. struct mhi_event_ctxt *er_ctxt;
  1412. int chan = mhi_chan->chan;
  1413. /* Nothing to reset, client doesn't queue buffers */
  1414. if (mhi_chan->offload_ch)
  1415. return;
  1416. read_lock_bh(&mhi_cntrl->pm_lock);
  1417. mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
  1418. er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
  1419. mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
  1420. mhi_reset_data_chan(mhi_cntrl, mhi_chan);
  1421. read_unlock_bh(&mhi_cntrl->pm_lock);
  1422. }
  1423. static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
  1424. {
  1425. int ret, dir;
  1426. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1427. struct mhi_chan *mhi_chan;
  1428. for (dir = 0; dir < 2; dir++) {
  1429. mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
  1430. if (!mhi_chan)
  1431. continue;
  1432. ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
  1433. if (ret)
  1434. goto error_open_chan;
  1435. }
  1436. return 0;
  1437. error_open_chan:
  1438. for (--dir; dir >= 0; dir--) {
  1439. mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
  1440. if (!mhi_chan)
  1441. continue;
  1442. mhi_unprepare_channel(mhi_cntrl, mhi_chan);
  1443. }
  1444. return ret;
  1445. }
  1446. int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
  1447. {
  1448. return __mhi_prepare_for_transfer(mhi_dev, 0);
  1449. }
  1450. EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
  1451. int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
  1452. {
  1453. return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
  1454. }
  1455. EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
  1456. void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
  1457. {
  1458. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1459. struct mhi_chan *mhi_chan;
  1460. int dir;
  1461. /* Get out of suspended state */
  1462. mhi_cntrl->runtime_get(mhi_cntrl);
  1463. for (dir = 0; dir < 2; dir++) {
  1464. mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
  1465. if (!mhi_chan)
  1466. continue;
  1467. mhi_unprepare_channel(mhi_cntrl, mhi_chan);
  1468. }
  1469. /* Allow suspend */
  1470. mhi_cntrl->runtime_put(mhi_cntrl);
  1471. }
  1472. EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
  1473. static int mhi_update_transfer_state(struct mhi_device *mhi_dev,
  1474. enum mhi_ch_state_type to_state)
  1475. {
  1476. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1477. struct mhi_chan *mhi_chan;
  1478. int dir, ret;
  1479. for (dir = 0; dir < 2; dir++) {
  1480. mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
  1481. if (!mhi_chan)
  1482. continue;
  1483. /*
  1484. * Bail out if one of the channels fails as client will reset
  1485. * both upon failure
  1486. */
  1487. mutex_lock(&mhi_chan->mutex);
  1488. ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, to_state);
  1489. if (ret) {
  1490. mutex_unlock(&mhi_chan->mutex);
  1491. return ret;
  1492. }
  1493. mutex_unlock(&mhi_chan->mutex);
  1494. }
  1495. return 0;
  1496. }
  1497. int mhi_stop_transfer(struct mhi_device *mhi_dev)
  1498. {
  1499. return mhi_update_transfer_state(mhi_dev, MHI_CH_STATE_TYPE_STOP);
  1500. }
  1501. EXPORT_SYMBOL(mhi_stop_transfer);
  1502. int mhi_start_transfer(struct mhi_device *mhi_dev)
  1503. {
  1504. return mhi_update_transfer_state(mhi_dev, MHI_CH_STATE_TYPE_START);
  1505. }
  1506. EXPORT_SYMBOL(mhi_start_transfer);
  1507. int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
  1508. {
  1509. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1510. struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
  1511. struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
  1512. int ret;
  1513. spin_lock_bh(&mhi_event->lock);
  1514. ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
  1515. spin_unlock_bh(&mhi_event->lock);
  1516. return ret;
  1517. }
  1518. EXPORT_SYMBOL_GPL(mhi_poll);