pm.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. *
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/device.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/list.h>
  12. #include <linux/mhi.h>
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/wait.h>
  16. #include "internal.h"
  17. /*
  18. * Not all MHI state transitions are synchronous. Transitions like Linkdown,
  19. * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
  20. * transition to a new state only if we're allowed to.
  21. *
  22. * Priority increases as we go down. For instance, from any state in L0, the
  23. * transition can be made to states in L1, L2 and L3. A notable exception to
  24. * this rule is state DISABLE. From DISABLE state we can only transition to
  25. * POR state. Also, while in L2 state, user cannot jump back to previous
  26. * L1 or L0 states.
  27. *
  28. * Valid transitions:
  29. * L0: DISABLE <--> POR
  30. * POR <--> POR
  31. * POR -> M0 -> M2 --> M0
  32. * POR -> FW_DL_ERR
  33. * FW_DL_ERR <--> FW_DL_ERR
  34. * M0 <--> M0
  35. * M0 -> FW_DL_ERR
  36. * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
  37. * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
  38. * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
  39. * SHUTDOWN_PROCESS -> DISABLE
  40. * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
  41. * LD_ERR_FATAL_DETECT -> DISABLE
  42. */
  43. static const struct mhi_pm_transitions dev_state_transitions[] = {
  44. /* L0 States */
  45. {
  46. MHI_PM_DISABLE,
  47. MHI_PM_POR
  48. },
  49. {
  50. MHI_PM_POR,
  51. MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
  52. MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  53. MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
  54. },
  55. {
  56. MHI_PM_M0,
  57. MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
  58. MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  59. MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
  60. },
  61. {
  62. MHI_PM_M2,
  63. MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  64. MHI_PM_LD_ERR_FATAL_DETECT
  65. },
  66. {
  67. MHI_PM_M3_ENTER,
  68. MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  69. MHI_PM_LD_ERR_FATAL_DETECT
  70. },
  71. {
  72. MHI_PM_M3,
  73. MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
  74. MHI_PM_LD_ERR_FATAL_DETECT
  75. },
  76. {
  77. MHI_PM_M3_EXIT,
  78. MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  79. MHI_PM_LD_ERR_FATAL_DETECT
  80. },
  81. {
  82. MHI_PM_FW_DL_ERR,
  83. MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
  84. MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
  85. },
  86. /* L1 States */
  87. {
  88. MHI_PM_SYS_ERR_DETECT,
  89. MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
  90. MHI_PM_LD_ERR_FATAL_DETECT
  91. },
  92. {
  93. MHI_PM_SYS_ERR_PROCESS,
  94. MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
  95. MHI_PM_LD_ERR_FATAL_DETECT
  96. },
  97. /* L2 States */
  98. {
  99. MHI_PM_SHUTDOWN_PROCESS,
  100. MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
  101. },
  102. /* L3 States */
  103. {
  104. MHI_PM_LD_ERR_FATAL_DETECT,
  105. MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
  106. },
  107. };
  108. enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
  109. enum mhi_pm_state state)
  110. {
  111. unsigned long cur_state = mhi_cntrl->pm_state;
  112. int index = find_last_bit(&cur_state, 32);
  113. if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
  114. return cur_state;
  115. if (unlikely(dev_state_transitions[index].from_state != cur_state))
  116. return cur_state;
  117. if (unlikely(!(dev_state_transitions[index].to_states & state)))
  118. return cur_state;
  119. mhi_cntrl->pm_state = state;
  120. return mhi_cntrl->pm_state;
  121. }
  122. void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
  123. {
  124. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  125. int ret;
  126. if (state == MHI_STATE_RESET) {
  127. ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
  128. MHICTRL_RESET_MASK, 1);
  129. } else {
  130. ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
  131. MHICTRL_MHISTATE_MASK, state);
  132. }
  133. if (ret)
  134. MHI_ERR(dev, "Failed to set MHI state to: %s\n",
  135. mhi_state_str(state));
  136. }
  137. /* NOP for backward compatibility, host allowed to ring DB in M2 state */
  138. static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
  139. {
  140. }
  141. static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
  142. {
  143. mhi_cntrl->wake_get(mhi_cntrl, false);
  144. mhi_cntrl->wake_put(mhi_cntrl, true);
  145. }
  146. /* Add event ring elements and ring er db */
  147. static void mhi_setup_event_rings(struct mhi_controller *mhi_cntrl, bool add_el)
  148. {
  149. struct mhi_event *mhi_event;
  150. int i;
  151. bool skip_er_setup;
  152. mhi_event = mhi_cntrl->mhi_event;
  153. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  154. struct mhi_ring *ring = &mhi_event->ring;
  155. if (mhi_event->offload_ev)
  156. continue;
  157. /* skip HW event ring setup in ready state */
  158. if (mhi_cntrl->dev_state == MHI_STATE_READY)
  159. skip_er_setup = mhi_event->hw_ring;
  160. else
  161. skip_er_setup = !mhi_event->hw_ring;
  162. /* if no er element to add, ring all er dbs */
  163. if (add_el && skip_er_setup)
  164. continue;
  165. if (add_el) {
  166. ring->wp = ring->base + ring->len - ring->el_size;
  167. *ring->ctxt_wp =
  168. ring->iommu_base + ring->len - ring->el_size;
  169. /* Update all cores */
  170. smp_wmb();
  171. }
  172. /* Ring the event ring db */
  173. spin_lock_irq(&mhi_event->lock);
  174. mhi_ring_er_db(mhi_event);
  175. spin_unlock_irq(&mhi_event->lock);
  176. }
  177. }
  178. /* Handle device ready state transition */
  179. int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
  180. {
  181. enum mhi_pm_state cur_state;
  182. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  183. u32 interval_us = 25000; /* poll register field every 25 milliseconds */
  184. int ret = -EINVAL;
  185. /* Check if device entered error state */
  186. if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
  187. MHI_ERR(dev, "Device link is not accessible\n");
  188. return -EIO;
  189. }
  190. /* Wait for RESET to be cleared and READY bit to be set by the device */
  191. ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
  192. MHICTRL_RESET_MASK, 0, interval_us);
  193. if (ret) {
  194. MHI_ERR(dev, "Device failed to clear MHI Reset\n");
  195. return ret;
  196. }
  197. ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
  198. MHISTATUS_READY_MASK, 1, interval_us);
  199. if (ret) {
  200. MHI_ERR(dev, "Device failed to enter MHI Ready\n");
  201. return ret;
  202. }
  203. MHI_VERB(dev, "Device in READY State\n");
  204. write_lock_irq(&mhi_cntrl->pm_lock);
  205. cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
  206. mhi_cntrl->dev_state = MHI_STATE_READY;
  207. write_unlock_irq(&mhi_cntrl->pm_lock);
  208. if (cur_state != MHI_PM_POR) {
  209. MHI_ERR(dev, "Error moving to state %s from %s\n",
  210. to_mhi_pm_state_str(MHI_PM_POR),
  211. to_mhi_pm_state_str(cur_state));
  212. return -EIO;
  213. }
  214. read_lock_bh(&mhi_cntrl->pm_lock);
  215. if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
  216. MHI_ERR(dev, "Device registers not accessible\n");
  217. goto error_mmio;
  218. }
  219. /* Configure MMIO registers */
  220. ret = mhi_init_mmio(mhi_cntrl);
  221. if (ret) {
  222. MHI_ERR(dev, "Error configuring MMIO registers\n");
  223. goto error_mmio;
  224. }
  225. /* add SW event ring elements and ring SW event ring dbs */
  226. mhi_setup_event_rings(mhi_cntrl, true);
  227. /* Set MHI to M0 state */
  228. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
  229. read_unlock_bh(&mhi_cntrl->pm_lock);
  230. return 0;
  231. error_mmio:
  232. read_unlock_bh(&mhi_cntrl->pm_lock);
  233. return -EIO;
  234. }
  235. int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
  236. {
  237. enum mhi_pm_state cur_state;
  238. struct mhi_chan *mhi_chan;
  239. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  240. int i;
  241. write_lock_irq(&mhi_cntrl->pm_lock);
  242. mhi_cntrl->dev_state = MHI_STATE_M0;
  243. cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
  244. write_unlock_irq(&mhi_cntrl->pm_lock);
  245. if (unlikely(cur_state != MHI_PM_M0)) {
  246. MHI_ERR(dev, "Unable to transition to M0 state\n");
  247. return -EIO;
  248. }
  249. mhi_cntrl->M0++;
  250. /* Wake up the device */
  251. read_lock_bh(&mhi_cntrl->pm_lock);
  252. mhi_cntrl->wake_get(mhi_cntrl, true);
  253. /* Ring all event rings and CMD ring only if we're in mission mode */
  254. if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
  255. struct mhi_cmd *mhi_cmd =
  256. &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
  257. mhi_setup_event_rings(mhi_cntrl, false);
  258. /* Only ring primary cmd ring if ring is not empty */
  259. spin_lock_irq(&mhi_cmd->lock);
  260. if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
  261. mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
  262. spin_unlock_irq(&mhi_cmd->lock);
  263. /* ring misc doorbells for certain controllers */
  264. mhi_misc_dbs_pending(mhi_cntrl);
  265. }
  266. /* Ring channel DB registers */
  267. mhi_chan = mhi_cntrl->mhi_chan;
  268. for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
  269. struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
  270. if (mhi_chan->db_cfg.reset_req) {
  271. write_lock_irq(&mhi_chan->lock);
  272. mhi_chan->db_cfg.db_mode = true;
  273. write_unlock_irq(&mhi_chan->lock);
  274. }
  275. read_lock_irq(&mhi_chan->lock);
  276. /* Only ring DB if ring is not empty */
  277. if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
  278. mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
  279. mhi_ring_chan_db(mhi_cntrl, mhi_chan);
  280. read_unlock_irq(&mhi_chan->lock);
  281. }
  282. mhi_cntrl->wake_put(mhi_cntrl, false);
  283. read_unlock_bh(&mhi_cntrl->pm_lock);
  284. wake_up_all(&mhi_cntrl->state_event);
  285. return 0;
  286. }
  287. /*
  288. * After receiving the MHI state change event from the device indicating the
  289. * transition to M1 state, the host can transition the device to M2 state
  290. * for keeping it in low power state.
  291. */
  292. void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
  293. {
  294. enum mhi_pm_state state;
  295. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  296. write_lock_irq(&mhi_cntrl->pm_lock);
  297. state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
  298. if (state == MHI_PM_M2) {
  299. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
  300. mhi_cntrl->dev_state = MHI_STATE_M2;
  301. write_unlock_irq(&mhi_cntrl->pm_lock);
  302. mhi_cntrl->M2++;
  303. wake_up_all(&mhi_cntrl->state_event);
  304. /* If there are any pending resources, exit M2 immediately */
  305. if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
  306. atomic_read(&mhi_cntrl->dev_wake))) {
  307. MHI_VERB(dev,
  308. "Exiting M2, pending_pkts: %d dev_wake: %d\n",
  309. atomic_read(&mhi_cntrl->pending_pkts),
  310. atomic_read(&mhi_cntrl->dev_wake));
  311. read_lock_bh(&mhi_cntrl->pm_lock);
  312. mhi_cntrl->wake_get(mhi_cntrl, true);
  313. mhi_cntrl->wake_put(mhi_cntrl, true);
  314. read_unlock_bh(&mhi_cntrl->pm_lock);
  315. } else {
  316. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
  317. }
  318. } else {
  319. write_unlock_irq(&mhi_cntrl->pm_lock);
  320. }
  321. }
  322. /* MHI M3 completion handler */
  323. int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
  324. {
  325. enum mhi_pm_state state;
  326. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  327. write_lock_irq(&mhi_cntrl->pm_lock);
  328. mhi_cntrl->dev_state = MHI_STATE_M3;
  329. state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
  330. write_unlock_irq(&mhi_cntrl->pm_lock);
  331. if (state != MHI_PM_M3) {
  332. MHI_ERR(dev, "Unable to transition to M3 state\n");
  333. return -EIO;
  334. }
  335. mhi_cntrl->M3++;
  336. wake_up_all(&mhi_cntrl->state_event);
  337. return 0;
  338. }
  339. /* Handle device Mission Mode transition */
  340. static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
  341. {
  342. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  343. enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
  344. int ret;
  345. MHI_VERB(dev, "Processing Mission Mode transition\n");
  346. write_lock_irq(&mhi_cntrl->pm_lock);
  347. if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
  348. ee = mhi_get_exec_env(mhi_cntrl);
  349. if (!MHI_IN_MISSION_MODE(ee)) {
  350. mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
  351. write_unlock_irq(&mhi_cntrl->pm_lock);
  352. wake_up_all(&mhi_cntrl->state_event);
  353. return -EIO;
  354. }
  355. mhi_cntrl->ee = ee;
  356. write_unlock_irq(&mhi_cntrl->pm_lock);
  357. wake_up_all(&mhi_cntrl->state_event);
  358. mhi_reset_reg_write_q(mhi_cntrl);
  359. device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
  360. mhi_destroy_device);
  361. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
  362. /* Force MHI to be in M0 state before continuing */
  363. ret = __mhi_device_get_sync(mhi_cntrl);
  364. if (ret)
  365. return ret;
  366. read_lock_bh(&mhi_cntrl->pm_lock);
  367. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  368. ret = -EIO;
  369. goto error_mission_mode;
  370. }
  371. /* Add elements to all HW event rings and ring HW event ring dbs */
  372. mhi_setup_event_rings(mhi_cntrl, true);
  373. read_unlock_bh(&mhi_cntrl->pm_lock);
  374. mhi_process_sleeping_events(mhi_cntrl);
  375. /*
  376. * The MHI devices are only created when the client device switches its
  377. * Execution Environment (EE) to either SBL or AMSS states
  378. */
  379. mhi_create_devices(mhi_cntrl);
  380. mhi_misc_mission_mode(mhi_cntrl);
  381. read_lock_bh(&mhi_cntrl->pm_lock);
  382. error_mission_mode:
  383. mhi_cntrl->wake_put(mhi_cntrl, false);
  384. read_unlock_bh(&mhi_cntrl->pm_lock);
  385. return ret;
  386. }
  387. /* Handle shutdown transitions */
  388. static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
  389. {
  390. enum mhi_pm_state cur_state;
  391. struct mhi_event *mhi_event;
  392. struct mhi_cmd_ctxt *cmd_ctxt;
  393. struct mhi_cmd *mhi_cmd;
  394. struct mhi_event_ctxt *er_ctxt;
  395. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  396. int ret, i;
  397. MHI_VERB(dev, "Processing disable transition with PM state: %s\n",
  398. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  399. mhi_reset_reg_write_q(mhi_cntrl);
  400. mutex_lock(&mhi_cntrl->pm_mutex);
  401. /* Trigger MHI RESET so that the device will not access host memory. */
  402. if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
  403. /* Skip MHI RESET if in RDDM state */
  404. if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
  405. goto skip_mhi_reset;
  406. MHI_VERB(dev, "Triggering MHI Reset in device\n");
  407. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
  408. /* Wait for the reset bit to be cleared by the device */
  409. ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
  410. MHICTRL_RESET_MASK, 0, 25000);
  411. if (ret)
  412. MHI_ERR(dev, "Device failed to clear MHI Reset\n");
  413. /*
  414. * Device will clear BHI_INTVEC as a part of RESET processing,
  415. * hence re-program it
  416. */
  417. mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
  418. if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
  419. /* wait for ready to be set */
  420. ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
  421. MHISTATUS,
  422. MHISTATUS_READY_MASK, 1, 25000);
  423. if (ret)
  424. MHI_ERR(dev, "Device failed to enter READY state\n");
  425. }
  426. }
  427. skip_mhi_reset:
  428. MHI_VERB(dev,
  429. "Waiting for all pending event ring processing to complete\n");
  430. mhi_event = mhi_cntrl->mhi_event;
  431. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  432. if (mhi_event->offload_ev)
  433. continue;
  434. disable_irq(mhi_cntrl->irq[mhi_event->irq]);
  435. if (mhi_event->priority == MHI_ER_PRIORITY_HI_SLEEP)
  436. cancel_work_sync(&mhi_event->work);
  437. else
  438. tasklet_kill(&mhi_event->task);
  439. }
  440. /* Release lock and wait for all pending threads to complete */
  441. mutex_unlock(&mhi_cntrl->pm_mutex);
  442. mhi_misc_disable(mhi_cntrl);
  443. MHI_VERB(dev, "Waiting for all pending threads to complete\n");
  444. wake_up_all(&mhi_cntrl->state_event);
  445. MHI_VERB(dev, "Reset all active channels and remove MHI devices\n");
  446. device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
  447. mutex_lock(&mhi_cntrl->pm_mutex);
  448. WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
  449. WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
  450. /* Reset the ev rings and cmd rings */
  451. MHI_VERB(dev, "Resetting EV CTXT and CMD CTXT\n");
  452. mhi_cmd = mhi_cntrl->mhi_cmd;
  453. cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
  454. for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
  455. struct mhi_ring *ring = &mhi_cmd->ring;
  456. ring->rp = ring->base;
  457. ring->wp = ring->base;
  458. cmd_ctxt->rp = cmd_ctxt->rbase;
  459. cmd_ctxt->wp = cmd_ctxt->rbase;
  460. }
  461. mhi_event = mhi_cntrl->mhi_event;
  462. er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
  463. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
  464. mhi_event++) {
  465. struct mhi_ring *ring = &mhi_event->ring;
  466. /* Skip offload events */
  467. if (mhi_event->offload_ev)
  468. continue;
  469. ring->rp = ring->base;
  470. ring->wp = ring->base;
  471. er_ctxt->rp = er_ctxt->rbase;
  472. er_ctxt->wp = er_ctxt->rbase;
  473. }
  474. /* Move to disable state */
  475. write_lock_irq(&mhi_cntrl->pm_lock);
  476. cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
  477. write_unlock_irq(&mhi_cntrl->pm_lock);
  478. if (unlikely(cur_state != MHI_PM_DISABLE))
  479. MHI_ERR(dev, "Error moving from PM state: %s to: %s\n",
  480. to_mhi_pm_state_str(cur_state),
  481. to_mhi_pm_state_str(MHI_PM_DISABLE));
  482. MHI_VERB(dev, "Exiting with PM state: %s, MHI state: %s\n",
  483. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  484. mhi_state_str(mhi_cntrl->dev_state));
  485. mutex_unlock(&mhi_cntrl->pm_mutex);
  486. }
  487. /* Handle system error transitions */
  488. static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
  489. {
  490. enum mhi_pm_state cur_state, prev_state;
  491. enum dev_st_transition next_state;
  492. struct mhi_event *mhi_event;
  493. struct mhi_cmd_ctxt *cmd_ctxt;
  494. struct mhi_cmd *mhi_cmd;
  495. struct mhi_event_ctxt *er_ctxt;
  496. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  497. int ret, i;
  498. MHI_VERB(dev, "Transitioning from PM state: %s to: %s\n",
  499. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  500. to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
  501. /* We must notify MHI control driver so it can clean up first */
  502. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
  503. mutex_lock(&mhi_cntrl->pm_mutex);
  504. write_lock_irq(&mhi_cntrl->pm_lock);
  505. prev_state = mhi_cntrl->pm_state;
  506. cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
  507. write_unlock_irq(&mhi_cntrl->pm_lock);
  508. if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
  509. MHI_ERR(dev, "Failed to transition from PM state: %s to: %s\n",
  510. to_mhi_pm_state_str(cur_state),
  511. to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
  512. goto exit_sys_error_transition;
  513. }
  514. mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
  515. mhi_cntrl->dev_state = MHI_STATE_RESET;
  516. /* Wake up threads waiting for state transition */
  517. wake_up_all(&mhi_cntrl->state_event);
  518. /* Trigger MHI RESET so that the device will not access host memory */
  519. if (MHI_REG_ACCESS_VALID(prev_state)) {
  520. u32 in_reset = -1;
  521. unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
  522. MHI_VERB(dev, "Triggering MHI Reset in device\n");
  523. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
  524. /* Wait for the reset bit to be cleared by the device */
  525. ret = wait_event_timeout(mhi_cntrl->state_event,
  526. mhi_read_reg_field(mhi_cntrl,
  527. mhi_cntrl->regs,
  528. MHICTRL,
  529. MHICTRL_RESET_MASK,
  530. &in_reset) ||
  531. !in_reset, timeout);
  532. if (!ret || in_reset) {
  533. MHI_ERR(dev, "Device failed to exit MHI Reset state\n");
  534. goto exit_sys_error_transition;
  535. }
  536. /*
  537. * Device will clear BHI_INTVEC as a part of RESET processing,
  538. * hence re-program it
  539. */
  540. mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
  541. }
  542. MHI_VERB(dev,
  543. "Waiting for all pending event ring processing to complete\n");
  544. mhi_event = mhi_cntrl->mhi_event;
  545. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  546. if (mhi_event->offload_ev)
  547. continue;
  548. if (mhi_event->priority == MHI_ER_PRIORITY_HI_SLEEP)
  549. cancel_work_sync(&mhi_event->work);
  550. else
  551. tasklet_kill(&mhi_event->task);
  552. }
  553. /* Release lock and wait for all pending threads to complete */
  554. mutex_unlock(&mhi_cntrl->pm_mutex);
  555. mhi_misc_disable(mhi_cntrl);
  556. MHI_VERB(dev, "Waiting for all pending threads to complete\n");
  557. wake_up_all(&mhi_cntrl->state_event);
  558. MHI_VERB(dev, "Reset all active channels and remove MHI devices\n");
  559. device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
  560. mutex_lock(&mhi_cntrl->pm_mutex);
  561. WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
  562. WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
  563. /* Reset the ev rings and cmd rings */
  564. MHI_VERB(dev, "Resetting EV CTXT and CMD CTXT\n");
  565. mhi_cmd = mhi_cntrl->mhi_cmd;
  566. cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
  567. for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
  568. struct mhi_ring *ring = &mhi_cmd->ring;
  569. ring->rp = ring->base;
  570. ring->wp = ring->base;
  571. cmd_ctxt->rp = cmd_ctxt->rbase;
  572. cmd_ctxt->wp = cmd_ctxt->rbase;
  573. }
  574. mhi_event = mhi_cntrl->mhi_event;
  575. er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
  576. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
  577. mhi_event++) {
  578. struct mhi_ring *ring = &mhi_event->ring;
  579. /* Skip offload events */
  580. if (mhi_event->offload_ev)
  581. continue;
  582. ring->rp = ring->base;
  583. ring->wp = ring->base;
  584. er_ctxt->rp = er_ctxt->rbase;
  585. er_ctxt->wp = er_ctxt->rbase;
  586. }
  587. /* Transition to next state */
  588. if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
  589. write_lock_irq(&mhi_cntrl->pm_lock);
  590. cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
  591. write_unlock_irq(&mhi_cntrl->pm_lock);
  592. if (cur_state != MHI_PM_POR) {
  593. MHI_ERR(dev, "Error moving to state %s from %s\n",
  594. to_mhi_pm_state_str(MHI_PM_POR),
  595. to_mhi_pm_state_str(cur_state));
  596. goto exit_sys_error_transition;
  597. }
  598. next_state = DEV_ST_TRANSITION_PBL;
  599. } else {
  600. next_state = DEV_ST_TRANSITION_READY;
  601. }
  602. mhi_queue_state_transition(mhi_cntrl, next_state);
  603. exit_sys_error_transition:
  604. MHI_VERB(dev, "Exiting with PM state: %s, MHI state: %s\n",
  605. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  606. mhi_state_str(mhi_cntrl->dev_state));
  607. mutex_unlock(&mhi_cntrl->pm_mutex);
  608. }
  609. /* Queue a new work item and schedule work */
  610. int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
  611. enum dev_st_transition state)
  612. {
  613. struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
  614. unsigned long flags;
  615. if (!item)
  616. return -ENOMEM;
  617. item->state = state;
  618. spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
  619. list_add_tail(&item->node, &mhi_cntrl->transition_list);
  620. spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
  621. queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
  622. return 0;
  623. }
  624. /* SYS_ERR worker */
  625. void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
  626. {
  627. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  628. /* skip if controller supports RDDM */
  629. if (mhi_cntrl->rddm_image) {
  630. MHI_VERB(dev, "Controller supports RDDM, skip SYS_ERROR\n");
  631. return;
  632. }
  633. mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
  634. }
  635. /* Device State Transition worker */
  636. void mhi_pm_st_worker(struct work_struct *work)
  637. {
  638. struct state_transition *itr, *tmp;
  639. LIST_HEAD(head);
  640. struct mhi_controller *mhi_cntrl = container_of(work,
  641. struct mhi_controller,
  642. st_worker);
  643. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  644. spin_lock_irq(&mhi_cntrl->transition_lock);
  645. list_splice_tail_init(&mhi_cntrl->transition_list, &head);
  646. spin_unlock_irq(&mhi_cntrl->transition_lock);
  647. list_for_each_entry_safe(itr, tmp, &head, node) {
  648. list_del(&itr->node);
  649. MHI_VERB(dev, "Handling state transition: %s\n",
  650. TO_DEV_STATE_TRANS_STR(itr->state));
  651. switch (itr->state) {
  652. case DEV_ST_TRANSITION_PBL:
  653. write_lock_irq(&mhi_cntrl->pm_lock);
  654. if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
  655. mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
  656. write_unlock_irq(&mhi_cntrl->pm_lock);
  657. mhi_fw_load_handler(mhi_cntrl);
  658. break;
  659. case DEV_ST_TRANSITION_SBL:
  660. write_lock_irq(&mhi_cntrl->pm_lock);
  661. mhi_cntrl->ee = MHI_EE_SBL;
  662. write_unlock_irq(&mhi_cntrl->pm_lock);
  663. mhi_process_sleeping_events(mhi_cntrl);
  664. /*
  665. * The MHI devices are only created when the client
  666. * device switches its Execution Environment (EE) to
  667. * either SBL or AMSS states
  668. */
  669. mhi_create_devices(mhi_cntrl);
  670. if (mhi_cntrl->fbc_download)
  671. mhi_download_amss_image(mhi_cntrl);
  672. break;
  673. case DEV_ST_TRANSITION_MISSION_MODE:
  674. mhi_pm_mission_mode_transition(mhi_cntrl);
  675. break;
  676. case DEV_ST_TRANSITION_FP:
  677. write_lock_irq(&mhi_cntrl->pm_lock);
  678. mhi_cntrl->ee = MHI_EE_FP;
  679. write_unlock_irq(&mhi_cntrl->pm_lock);
  680. mhi_create_devices(mhi_cntrl);
  681. break;
  682. case DEV_ST_TRANSITION_READY:
  683. mhi_ready_state_transition(mhi_cntrl);
  684. break;
  685. case DEV_ST_TRANSITION_SYS_ERR:
  686. mhi_pm_sys_error_transition(mhi_cntrl);
  687. break;
  688. case DEV_ST_TRANSITION_DISABLE:
  689. mhi_pm_disable_transition(mhi_cntrl);
  690. break;
  691. default:
  692. break;
  693. }
  694. kfree(itr);
  695. }
  696. }
  697. static bool mhi_in_rddm(struct mhi_controller *mhi_cntrl)
  698. {
  699. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  700. if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM
  701. && mhi_is_active(mhi_cntrl)) {
  702. mhi_cntrl->ee = MHI_EE_RDDM;
  703. MHI_ERR(dev, "RDDM event occurred!\n");
  704. /* notify critical clients with early notifications */
  705. mhi_report_error(mhi_cntrl);
  706. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
  707. wake_up_all(&mhi_cntrl->state_event);
  708. return true;
  709. }
  710. return false;
  711. }
  712. int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
  713. {
  714. struct mhi_chan *itr, *tmp;
  715. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  716. enum mhi_pm_state new_state;
  717. int ret;
  718. if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
  719. return -EINVAL;
  720. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
  721. return -EIO;
  722. /* Return busy if there are any pending resources */
  723. if (atomic_read(&mhi_cntrl->dev_wake) ||
  724. atomic_read(&mhi_cntrl->pending_pkts))
  725. return -EBUSY;
  726. /* Take MHI out of M2 state */
  727. read_lock_bh(&mhi_cntrl->pm_lock);
  728. mhi_cntrl->wake_get(mhi_cntrl, false);
  729. read_unlock_bh(&mhi_cntrl->pm_lock);
  730. /* finish reg writes */
  731. mhi_force_reg_write(mhi_cntrl);
  732. ret = wait_event_timeout(mhi_cntrl->state_event,
  733. mhi_cntrl->dev_state == MHI_STATE_M0 ||
  734. mhi_cntrl->dev_state == MHI_STATE_M1 ||
  735. MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
  736. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  737. read_lock_bh(&mhi_cntrl->pm_lock);
  738. mhi_cntrl->wake_put(mhi_cntrl, false);
  739. read_unlock_bh(&mhi_cntrl->pm_lock);
  740. if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  741. MHI_ERR(dev,
  742. "Could not enter M0/M1 state");
  743. return -EIO;
  744. }
  745. /* finish any reg writes before setting M3 */
  746. mhi_force_reg_write(mhi_cntrl);
  747. write_lock_irq(&mhi_cntrl->pm_lock);
  748. if (atomic_read(&mhi_cntrl->dev_wake) ||
  749. atomic_read(&mhi_cntrl->pending_pkts)) {
  750. write_unlock_irq(&mhi_cntrl->pm_lock);
  751. return -EBUSY;
  752. }
  753. MHI_VERB(dev, "Allowing M3 transition\n");
  754. new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
  755. if (new_state != MHI_PM_M3_ENTER) {
  756. write_unlock_irq(&mhi_cntrl->pm_lock);
  757. MHI_ERR(dev,
  758. "Error setting to PM state: %s from: %s\n",
  759. to_mhi_pm_state_str(MHI_PM_M3_ENTER),
  760. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  761. return -EIO;
  762. }
  763. /* Set MHI to M3 and wait for completion */
  764. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
  765. write_unlock_irq(&mhi_cntrl->pm_lock);
  766. MHI_VERB(dev, "Waiting for M3 completion\n");
  767. ret = wait_event_timeout(mhi_cntrl->state_event,
  768. mhi_cntrl->dev_state == MHI_STATE_M3 ||
  769. MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
  770. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  771. if (!ret) {
  772. mhi_debug_reg_dump(mhi_cntrl);
  773. panic("Timedout waiting for M3 ACK");
  774. return -EIO;
  775. } else if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  776. MHI_ERR(dev,
  777. "Did not enter M3 state, MHI state: %s, PM state: %s\n",
  778. mhi_state_str(mhi_cntrl->dev_state),
  779. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  780. return -EIO;
  781. }
  782. /* Notify clients about entering LPM */
  783. list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
  784. mutex_lock(&itr->mutex);
  785. if (itr->mhi_dev)
  786. mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
  787. mutex_unlock(&itr->mutex);
  788. }
  789. return 0;
  790. }
  791. EXPORT_SYMBOL_GPL(mhi_pm_suspend);
  792. static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
  793. {
  794. struct mhi_chan *itr, *tmp;
  795. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  796. enum mhi_pm_state cur_state;
  797. int ret;
  798. MHI_VERB(dev, "Entered with PM state: %s, MHI state: %s\n",
  799. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  800. mhi_state_str(mhi_cntrl->dev_state));
  801. if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
  802. return 0;
  803. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
  804. return -EIO;
  805. if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
  806. dev_warn(dev, "Resuming from non M3 state (%s)\n",
  807. mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
  808. return -EINVAL;
  809. }
  810. if (mhi_in_rddm(mhi_cntrl))
  811. return 0;
  812. /* Notify clients about exiting LPM */
  813. list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
  814. mutex_lock(&itr->mutex);
  815. if (itr->mhi_dev)
  816. mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
  817. mutex_unlock(&itr->mutex);
  818. }
  819. write_lock_irq(&mhi_cntrl->pm_lock);
  820. cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
  821. if (cur_state != MHI_PM_M3_EXIT) {
  822. write_unlock_irq(&mhi_cntrl->pm_lock);
  823. MHI_LOG(dev,
  824. "Error setting to PM state: %s from: %s\n",
  825. to_mhi_pm_state_str(MHI_PM_M3_EXIT),
  826. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  827. return -EIO;
  828. }
  829. /* Set MHI to M0 and wait for completion */
  830. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
  831. write_unlock_irq(&mhi_cntrl->pm_lock);
  832. ret = wait_event_timeout(mhi_cntrl->state_event,
  833. mhi_cntrl->dev_state == MHI_STATE_M0 ||
  834. mhi_cntrl->dev_state == MHI_STATE_M2 ||
  835. MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
  836. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  837. if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  838. if (mhi_in_rddm(mhi_cntrl))
  839. return 0;
  840. MHI_ERR(dev,
  841. "Did not enter M0 state, MHI state: %s, PM state: %s\n",
  842. mhi_state_str(mhi_cntrl->dev_state),
  843. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  844. return -EIO;
  845. }
  846. return 0;
  847. }
  848. int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
  849. {
  850. return __mhi_pm_resume(mhi_cntrl, false);
  851. }
  852. EXPORT_SYMBOL_GPL(mhi_pm_resume);
  853. int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
  854. {
  855. return __mhi_pm_resume(mhi_cntrl, true);
  856. }
  857. EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
  858. int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
  859. {
  860. int ret;
  861. /* Wake up the device */
  862. read_lock_bh(&mhi_cntrl->pm_lock);
  863. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  864. read_unlock_bh(&mhi_cntrl->pm_lock);
  865. return -EIO;
  866. }
  867. mhi_cntrl->wake_get(mhi_cntrl, true);
  868. if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
  869. mhi_trigger_resume(mhi_cntrl);
  870. read_unlock_bh(&mhi_cntrl->pm_lock);
  871. mhi_force_reg_write(mhi_cntrl);
  872. ret = wait_event_timeout(mhi_cntrl->state_event,
  873. mhi_cntrl->pm_state == MHI_PM_M0 ||
  874. MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
  875. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  876. if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  877. read_lock_bh(&mhi_cntrl->pm_lock);
  878. mhi_cntrl->wake_put(mhi_cntrl, false);
  879. read_unlock_bh(&mhi_cntrl->pm_lock);
  880. return -EIO;
  881. }
  882. return 0;
  883. }
  884. /* Assert device wake db */
  885. static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
  886. {
  887. unsigned long flags;
  888. /*
  889. * If force flag is set, then increment the wake count value and
  890. * ring wake db
  891. */
  892. if (unlikely(force)) {
  893. spin_lock_irqsave(&mhi_cntrl->wlock, flags);
  894. atomic_inc(&mhi_cntrl->dev_wake);
  895. if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
  896. !mhi_cntrl->wake_set) {
  897. if (mhi_cntrl->db_access & MHI_PM_M2)
  898. mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
  899. else
  900. mhi_write_offload_wakedb(mhi_cntrl, 1);
  901. mhi_cntrl->wake_set = true;
  902. }
  903. spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
  904. } else {
  905. /*
  906. * If resources are already requested, then just increment
  907. * the wake count value and return
  908. */
  909. if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
  910. return;
  911. spin_lock_irqsave(&mhi_cntrl->wlock, flags);
  912. if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
  913. MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
  914. !mhi_cntrl->wake_set) {
  915. if (mhi_cntrl->db_access & MHI_PM_M2)
  916. mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
  917. else
  918. mhi_write_offload_wakedb(mhi_cntrl, 1);
  919. mhi_cntrl->wake_set = true;
  920. }
  921. spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
  922. }
  923. }
  924. /* De-assert device wake db */
  925. static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
  926. bool override)
  927. {
  928. unsigned long flags;
  929. /*
  930. * Only continue if there is a single resource, else just decrement
  931. * and return
  932. */
  933. if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
  934. return;
  935. spin_lock_irqsave(&mhi_cntrl->wlock, flags);
  936. if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
  937. MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
  938. mhi_cntrl->wake_set) {
  939. if (mhi_cntrl->db_access & MHI_PM_M2)
  940. mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
  941. else
  942. mhi_write_offload_wakedb(mhi_cntrl, 0);
  943. mhi_cntrl->wake_set = false;
  944. }
  945. spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
  946. }
  947. int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
  948. {
  949. struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
  950. enum mhi_state state;
  951. enum mhi_ee_type current_ee;
  952. enum dev_st_transition next_state;
  953. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  954. u32 interval_us = 25000; /* poll register field every 25 milliseconds */
  955. int ret, i;
  956. MHI_LOG(dev, "Requested to power ON\n");
  957. /* Supply default wake routines if not provided by controller driver */
  958. if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
  959. !mhi_cntrl->wake_toggle) {
  960. mhi_cntrl->wake_get = mhi_assert_dev_wake;
  961. mhi_cntrl->wake_put = mhi_deassert_dev_wake;
  962. mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
  963. mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
  964. }
  965. mutex_lock(&mhi_cntrl->pm_mutex);
  966. mhi_cntrl->pm_state = MHI_PM_DISABLE;
  967. /* Setup BHI INTVEC */
  968. write_lock_irq(&mhi_cntrl->pm_lock);
  969. mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
  970. mhi_cntrl->pm_state = MHI_PM_POR;
  971. mhi_cntrl->ee = MHI_EE_MAX;
  972. current_ee = mhi_get_exec_env(mhi_cntrl);
  973. write_unlock_irq(&mhi_cntrl->pm_lock);
  974. /* Confirm that the device is in valid exec env */
  975. if (!MHI_POWER_UP_CAPABLE(current_ee)) {
  976. MHI_ERR(dev, "%s is not a valid EE for power on\n",
  977. TO_MHI_EXEC_STR(current_ee));
  978. ret = -EIO;
  979. goto error_exit;
  980. }
  981. state = mhi_get_mhi_state(mhi_cntrl);
  982. MHI_VERB(dev, "Attempting power on with EE: %s, state: %s\n",
  983. TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
  984. if (state == MHI_STATE_SYS_ERR) {
  985. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
  986. ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
  987. MHICTRL_RESET_MASK, 0, interval_us);
  988. if (ret) {
  989. MHI_LOG(dev, "Failed to reset MHI due to syserr state\n");
  990. goto error_exit;
  991. }
  992. /*
  993. * device cleares INTVEC as part of RESET processing,
  994. * re-program it
  995. */
  996. mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
  997. }
  998. /* IRQs have been requested during probe, so we just need to enable them. */
  999. enable_irq(mhi_cntrl->irq[0]);
  1000. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  1001. if (mhi_event->offload_ev)
  1002. continue;
  1003. enable_irq(mhi_cntrl->irq[mhi_event->irq]);
  1004. }
  1005. /* Transition to next state */
  1006. next_state = MHI_IN_PBL(current_ee) ?
  1007. DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
  1008. mhi_queue_state_transition(mhi_cntrl, next_state);
  1009. mutex_unlock(&mhi_cntrl->pm_mutex);
  1010. MHI_LOG(dev, "Power on setup success\n");
  1011. return 0;
  1012. error_exit:
  1013. mhi_cntrl->pm_state = MHI_PM_DISABLE;
  1014. mutex_unlock(&mhi_cntrl->pm_mutex);
  1015. return ret;
  1016. }
  1017. EXPORT_SYMBOL_GPL(mhi_async_power_up);
  1018. void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
  1019. {
  1020. enum mhi_pm_state cur_state, transition_state;
  1021. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1022. mutex_lock(&mhi_cntrl->pm_mutex);
  1023. write_lock_irq(&mhi_cntrl->pm_lock);
  1024. cur_state = mhi_cntrl->pm_state;
  1025. if (cur_state == MHI_PM_DISABLE) {
  1026. write_unlock_irq(&mhi_cntrl->pm_lock);
  1027. mutex_unlock(&mhi_cntrl->pm_mutex);
  1028. return; /* Already powered down */
  1029. }
  1030. /* If it's not a graceful shutdown, force MHI to linkdown state */
  1031. transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
  1032. MHI_PM_LD_ERR_FATAL_DETECT;
  1033. cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
  1034. if (cur_state != transition_state) {
  1035. MHI_ERR(dev, "Failed to move to state: %s from: %s\n",
  1036. to_mhi_pm_state_str(transition_state),
  1037. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  1038. /* Force link down or error fatal detected state */
  1039. mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
  1040. }
  1041. /* mark device inactive to avoid any further host processing */
  1042. mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
  1043. mhi_cntrl->dev_state = MHI_STATE_RESET;
  1044. wake_up_all(&mhi_cntrl->state_event);
  1045. write_unlock_irq(&mhi_cntrl->pm_lock);
  1046. mutex_unlock(&mhi_cntrl->pm_mutex);
  1047. mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
  1048. /* Wait for shutdown to complete */
  1049. flush_work(&mhi_cntrl->st_worker);
  1050. disable_irq(mhi_cntrl->irq[0]);
  1051. if (mhi_cntrl->fbc_image)
  1052. mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
  1053. }
  1054. EXPORT_SYMBOL_GPL(mhi_power_down);
  1055. int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
  1056. {
  1057. int ret = mhi_async_power_up(mhi_cntrl);
  1058. if (ret)
  1059. return ret;
  1060. wait_event_timeout(mhi_cntrl->state_event,
  1061. MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
  1062. MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
  1063. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  1064. ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
  1065. if (ret && !mhi_cntrl->rddm_image)
  1066. mhi_power_down(mhi_cntrl, false);
  1067. return ret;
  1068. }
  1069. EXPORT_SYMBOL(mhi_sync_power_up);
  1070. int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
  1071. {
  1072. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1073. int ret;
  1074. /* Check if device is already in RDDM */
  1075. if (mhi_cntrl->ee == MHI_EE_RDDM)
  1076. return 0;
  1077. MHI_VERB(dev, "Triggering SYS_ERR to force RDDM state\n");
  1078. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
  1079. /* Wait for RDDM event */
  1080. ret = wait_event_timeout(mhi_cntrl->state_event,
  1081. mhi_cntrl->ee == MHI_EE_RDDM,
  1082. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  1083. ret = ret ? 0 : -EIO;
  1084. return ret;
  1085. }
  1086. EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
  1087. void mhi_device_get(struct mhi_device *mhi_dev)
  1088. {
  1089. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1090. mhi_dev->dev_wake++;
  1091. read_lock_bh(&mhi_cntrl->pm_lock);
  1092. if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
  1093. mhi_trigger_resume(mhi_cntrl);
  1094. mhi_cntrl->wake_get(mhi_cntrl, true);
  1095. read_unlock_bh(&mhi_cntrl->pm_lock);
  1096. }
  1097. EXPORT_SYMBOL_GPL(mhi_device_get);
  1098. int mhi_device_get_sync(struct mhi_device *mhi_dev)
  1099. {
  1100. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1101. int ret;
  1102. ret = __mhi_device_get_sync(mhi_cntrl);
  1103. if (!ret)
  1104. mhi_dev->dev_wake++;
  1105. return ret;
  1106. }
  1107. EXPORT_SYMBOL_GPL(mhi_device_get_sync);
  1108. void mhi_device_put(struct mhi_device *mhi_dev)
  1109. {
  1110. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1111. mhi_dev->dev_wake--;
  1112. read_lock_bh(&mhi_cntrl->pm_lock);
  1113. if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
  1114. mhi_trigger_resume(mhi_cntrl);
  1115. mhi_cntrl->wake_put(mhi_cntrl, false);
  1116. read_unlock_bh(&mhi_cntrl->pm_lock);
  1117. }
  1118. EXPORT_SYMBOL_GPL(mhi_device_put);