init.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. *
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/device.h>
  9. #include <linux/dma-direction.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/idr.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/list.h>
  14. #include <linux/mhi.h>
  15. #include <linux/mod_devicetable.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/wait.h>
  20. #include "internal.h"
  21. static DEFINE_IDA(mhi_controller_ida);
  22. const char * const mhi_ee_str[MHI_EE_MAX] = {
  23. [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
  24. [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
  25. [MHI_EE_AMSS] = "MISSION MODE",
  26. [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
  27. [MHI_EE_WFW] = "WLAN FIRMWARE",
  28. [MHI_EE_PTHRU] = "PASS THROUGH",
  29. [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
  30. [MHI_EE_FP] = "FLASH PROGRAMMER",
  31. [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
  32. [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
  33. };
  34. const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
  35. [DEV_ST_TRANSITION_PBL] = "PBL",
  36. [DEV_ST_TRANSITION_READY] = "READY",
  37. [DEV_ST_TRANSITION_SBL] = "SBL",
  38. [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
  39. [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
  40. [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
  41. [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
  42. };
  43. const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
  44. [MHI_CH_STATE_TYPE_RESET] = "RESET",
  45. [MHI_CH_STATE_TYPE_STOP] = "STOP",
  46. [MHI_CH_STATE_TYPE_START] = "START",
  47. };
  48. static const char * const mhi_pm_state_str[] = {
  49. [MHI_PM_STATE_DISABLE] = "DISABLE",
  50. [MHI_PM_STATE_POR] = "POWER ON RESET",
  51. [MHI_PM_STATE_M0] = "M0",
  52. [MHI_PM_STATE_M2] = "M2",
  53. [MHI_PM_STATE_M3_ENTER] = "M?->M3",
  54. [MHI_PM_STATE_M3] = "M3",
  55. [MHI_PM_STATE_M3_EXIT] = "M3->M0",
  56. [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
  57. [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
  58. [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
  59. [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
  60. [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
  61. };
  62. const char *to_mhi_pm_state_str(u32 state)
  63. {
  64. int index;
  65. if (state)
  66. index = __fls(state);
  67. if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
  68. return "Invalid State";
  69. return mhi_pm_state_str[index];
  70. }
  71. static ssize_t serial_number_show(struct device *dev,
  72. struct device_attribute *attr,
  73. char *buf)
  74. {
  75. struct mhi_device *mhi_dev = to_mhi_device(dev);
  76. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  77. return sysfs_emit(buf, "Serial Number: %u\n",
  78. mhi_cntrl->serial_number);
  79. }
  80. static DEVICE_ATTR_RO(serial_number);
  81. static ssize_t oem_pk_hash_show(struct device *dev,
  82. struct device_attribute *attr,
  83. char *buf)
  84. {
  85. struct mhi_device *mhi_dev = to_mhi_device(dev);
  86. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  87. int i, cnt = 0;
  88. for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
  89. cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
  90. i, mhi_cntrl->oem_pk_hash[i]);
  91. return cnt;
  92. }
  93. static DEVICE_ATTR_RO(oem_pk_hash);
  94. static ssize_t soc_reset_store(struct device *dev,
  95. struct device_attribute *attr,
  96. const char *buf,
  97. size_t count)
  98. {
  99. struct mhi_device *mhi_dev = to_mhi_device(dev);
  100. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  101. mhi_soc_reset(mhi_cntrl);
  102. return count;
  103. }
  104. static DEVICE_ATTR_WO(soc_reset);
  105. static struct attribute *mhi_dev_attrs[] = {
  106. &dev_attr_serial_number.attr,
  107. &dev_attr_oem_pk_hash.attr,
  108. &dev_attr_soc_reset.attr,
  109. NULL,
  110. };
  111. ATTRIBUTE_GROUPS(mhi_dev);
  112. /* MHI protocol requires the transfer ring to be aligned with ring length */
  113. static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
  114. struct mhi_ring *ring,
  115. u64 len)
  116. {
  117. ring->alloc_size = len + (len - 1);
  118. ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
  119. &ring->dma_handle, GFP_KERNEL);
  120. if (!ring->pre_aligned)
  121. return -ENOMEM;
  122. ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
  123. ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
  124. return 0;
  125. }
  126. void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
  127. {
  128. int i;
  129. struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
  130. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  131. if (mhi_event->offload_ev)
  132. continue;
  133. free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
  134. }
  135. free_irq(mhi_cntrl->irq[0], mhi_cntrl);
  136. }
  137. int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
  138. {
  139. struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
  140. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  141. unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
  142. int i, ret;
  143. /* if controller driver has set irq_flags, use it */
  144. if (mhi_cntrl->irq_flags)
  145. irq_flags = mhi_cntrl->irq_flags;
  146. /* Setup BHI_INTVEC IRQ */
  147. ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
  148. mhi_intvec_threaded_handler,
  149. irq_flags,
  150. "bhi", mhi_cntrl);
  151. if (ret)
  152. return ret;
  153. /*
  154. * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
  155. * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
  156. * IRQ_NOAUTOEN is not applicable.
  157. */
  158. disable_irq(mhi_cntrl->irq[0]);
  159. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  160. if (mhi_event->offload_ev)
  161. continue;
  162. if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
  163. MHI_ERR(dev, "irq %d not available for event ring\n",
  164. mhi_event->irq);
  165. ret = -EINVAL;
  166. goto error_request;
  167. }
  168. ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
  169. mhi_irq_handler,
  170. irq_flags,
  171. "mhi", mhi_event);
  172. if (ret) {
  173. MHI_ERR(dev, "Error requesting irq:%d for ev:%d\n",
  174. mhi_cntrl->irq[mhi_event->irq], i);
  175. goto error_request;
  176. }
  177. disable_irq(mhi_cntrl->irq[mhi_event->irq]);
  178. }
  179. return 0;
  180. error_request:
  181. for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
  182. if (mhi_event->offload_ev)
  183. continue;
  184. free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
  185. }
  186. free_irq(mhi_cntrl->irq[0], mhi_cntrl);
  187. return ret;
  188. }
  189. void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
  190. {
  191. int i;
  192. struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
  193. struct mhi_cmd *mhi_cmd;
  194. struct mhi_event *mhi_event;
  195. struct mhi_ring *ring;
  196. mhi_cmd = mhi_cntrl->mhi_cmd;
  197. for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
  198. ring = &mhi_cmd->ring;
  199. dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
  200. ring->pre_aligned, ring->dma_handle);
  201. ring->base = NULL;
  202. ring->iommu_base = 0;
  203. }
  204. dma_free_coherent(mhi_cntrl->cntrl_dev,
  205. sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
  206. mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
  207. mhi_event = mhi_cntrl->mhi_event;
  208. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  209. if (mhi_event->offload_ev)
  210. continue;
  211. ring = &mhi_event->ring;
  212. dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
  213. ring->pre_aligned, ring->dma_handle);
  214. ring->base = NULL;
  215. ring->iommu_base = 0;
  216. }
  217. dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
  218. mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
  219. mhi_ctxt->er_ctxt_addr);
  220. dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
  221. mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
  222. mhi_ctxt->chan_ctxt_addr);
  223. kfree(mhi_ctxt);
  224. mhi_cntrl->mhi_ctxt = NULL;
  225. }
  226. int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
  227. {
  228. struct mhi_ctxt *mhi_ctxt;
  229. struct mhi_chan_ctxt *chan_ctxt;
  230. struct mhi_event_ctxt *er_ctxt;
  231. struct mhi_cmd_ctxt *cmd_ctxt;
  232. struct mhi_chan *mhi_chan;
  233. struct mhi_event *mhi_event;
  234. struct mhi_cmd *mhi_cmd;
  235. u32 tmp;
  236. int ret = -ENOMEM, i;
  237. atomic_set(&mhi_cntrl->dev_wake, 0);
  238. atomic_set(&mhi_cntrl->pending_pkts, 0);
  239. mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
  240. if (!mhi_ctxt)
  241. return -ENOMEM;
  242. /* Setup channel ctxt */
  243. mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
  244. sizeof(*mhi_ctxt->chan_ctxt) *
  245. mhi_cntrl->max_chan,
  246. &mhi_ctxt->chan_ctxt_addr,
  247. GFP_KERNEL);
  248. if (!mhi_ctxt->chan_ctxt)
  249. goto error_alloc_chan_ctxt;
  250. mhi_chan = mhi_cntrl->mhi_chan;
  251. chan_ctxt = mhi_ctxt->chan_ctxt;
  252. for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
  253. /* Skip if it is an offload channel */
  254. if (mhi_chan->offload_ch)
  255. continue;
  256. tmp = le32_to_cpu(chan_ctxt->chcfg);
  257. tmp &= ~CHAN_CTX_CHSTATE_MASK;
  258. tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
  259. tmp &= ~CHAN_CTX_BRSTMODE_MASK;
  260. tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
  261. tmp &= ~CHAN_CTX_POLLCFG_MASK;
  262. tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
  263. chan_ctxt->chcfg = cpu_to_le32(tmp);
  264. chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
  265. chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
  266. mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
  267. mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
  268. }
  269. /* Setup event context */
  270. mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
  271. sizeof(*mhi_ctxt->er_ctxt) *
  272. mhi_cntrl->total_ev_rings,
  273. &mhi_ctxt->er_ctxt_addr,
  274. GFP_KERNEL);
  275. if (!mhi_ctxt->er_ctxt)
  276. goto error_alloc_er_ctxt;
  277. er_ctxt = mhi_ctxt->er_ctxt;
  278. mhi_event = mhi_cntrl->mhi_event;
  279. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
  280. mhi_event++) {
  281. struct mhi_ring *ring = &mhi_event->ring;
  282. /* Skip if it is an offload event */
  283. if (mhi_event->offload_ev)
  284. continue;
  285. tmp = le32_to_cpu(er_ctxt->intmod);
  286. tmp &= ~EV_CTX_INTMODC_MASK;
  287. tmp &= ~EV_CTX_INTMODT_MASK;
  288. tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
  289. er_ctxt->intmod = cpu_to_le32(tmp);
  290. er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
  291. er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
  292. mhi_event->db_cfg.db_mode = true;
  293. ring->el_size = sizeof(struct mhi_ring_element);
  294. ring->len = ring->el_size * ring->elements;
  295. ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
  296. if (ret)
  297. goto error_alloc_er;
  298. /*
  299. * If the read pointer equals to the write pointer, then the
  300. * ring is empty
  301. */
  302. ring->rp = ring->wp = ring->base;
  303. er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
  304. er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
  305. er_ctxt->rlen = cpu_to_le64(ring->len);
  306. ring->ctxt_wp = &er_ctxt->wp;
  307. }
  308. /* Setup cmd context */
  309. ret = -ENOMEM;
  310. mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
  311. sizeof(*mhi_ctxt->cmd_ctxt) *
  312. NR_OF_CMD_RINGS,
  313. &mhi_ctxt->cmd_ctxt_addr,
  314. GFP_KERNEL);
  315. if (!mhi_ctxt->cmd_ctxt)
  316. goto error_alloc_er;
  317. mhi_cmd = mhi_cntrl->mhi_cmd;
  318. cmd_ctxt = mhi_ctxt->cmd_ctxt;
  319. for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
  320. struct mhi_ring *ring = &mhi_cmd->ring;
  321. ring->el_size = sizeof(struct mhi_ring_element);
  322. ring->elements = CMD_EL_PER_RING;
  323. ring->len = ring->el_size * ring->elements;
  324. ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
  325. if (ret)
  326. goto error_alloc_cmd;
  327. ring->rp = ring->wp = ring->base;
  328. cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
  329. cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
  330. cmd_ctxt->rlen = cpu_to_le64(ring->len);
  331. ring->ctxt_wp = &cmd_ctxt->wp;
  332. }
  333. mhi_cntrl->mhi_ctxt = mhi_ctxt;
  334. return 0;
  335. error_alloc_cmd:
  336. for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
  337. struct mhi_ring *ring = &mhi_cmd->ring;
  338. dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
  339. ring->pre_aligned, ring->dma_handle);
  340. }
  341. dma_free_coherent(mhi_cntrl->cntrl_dev,
  342. sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
  343. mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
  344. i = mhi_cntrl->total_ev_rings;
  345. mhi_event = mhi_cntrl->mhi_event + i;
  346. error_alloc_er:
  347. for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
  348. struct mhi_ring *ring = &mhi_event->ring;
  349. if (mhi_event->offload_ev)
  350. continue;
  351. dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
  352. ring->pre_aligned, ring->dma_handle);
  353. }
  354. dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
  355. mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
  356. mhi_ctxt->er_ctxt_addr);
  357. error_alloc_er_ctxt:
  358. dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
  359. mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
  360. mhi_ctxt->chan_ctxt_addr);
  361. error_alloc_chan_ctxt:
  362. kfree(mhi_ctxt);
  363. return ret;
  364. }
  365. int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
  366. {
  367. u32 val;
  368. int i, ret;
  369. struct mhi_chan *mhi_chan;
  370. struct mhi_event *mhi_event;
  371. void __iomem *base = mhi_cntrl->regs;
  372. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  373. struct {
  374. u32 offset;
  375. u32 val;
  376. } reg_info[] = {
  377. {
  378. CCABAP_HIGHER,
  379. upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
  380. },
  381. {
  382. CCABAP_LOWER,
  383. lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
  384. },
  385. {
  386. ECABAP_HIGHER,
  387. upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
  388. },
  389. {
  390. ECABAP_LOWER,
  391. lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
  392. },
  393. {
  394. CRCBAP_HIGHER,
  395. upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
  396. },
  397. {
  398. CRCBAP_LOWER,
  399. lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
  400. },
  401. {
  402. MHICTRLBASE_HIGHER,
  403. upper_32_bits(mhi_cntrl->iova_start),
  404. },
  405. {
  406. MHICTRLBASE_LOWER,
  407. lower_32_bits(mhi_cntrl->iova_start),
  408. },
  409. {
  410. MHIDATABASE_HIGHER,
  411. upper_32_bits(mhi_cntrl->iova_start),
  412. },
  413. {
  414. MHIDATABASE_LOWER,
  415. lower_32_bits(mhi_cntrl->iova_start),
  416. },
  417. {
  418. MHICTRLLIMIT_HIGHER,
  419. upper_32_bits(mhi_cntrl->iova_stop),
  420. },
  421. {
  422. MHICTRLLIMIT_LOWER,
  423. lower_32_bits(mhi_cntrl->iova_stop),
  424. },
  425. {
  426. MHIDATALIMIT_HIGHER,
  427. upper_32_bits(mhi_cntrl->iova_stop),
  428. },
  429. {
  430. MHIDATALIMIT_LOWER,
  431. lower_32_bits(mhi_cntrl->iova_stop),
  432. },
  433. {0, 0}
  434. };
  435. MHI_VERB(dev, "Initializing MHI registers\n");
  436. /* Read channel db offset */
  437. ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
  438. if (ret) {
  439. MHI_ERR(dev, "Unable to read CHDBOFF register\n");
  440. return -EIO;
  441. }
  442. if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
  443. dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
  444. val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
  445. return -ERANGE;
  446. }
  447. /* Setup wake db */
  448. mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
  449. mhi_cntrl->wake_set = false;
  450. /* Setup channel db address for each channel in tre_ring */
  451. mhi_chan = mhi_cntrl->mhi_chan;
  452. for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
  453. mhi_chan->tre_ring.db_addr = base + val;
  454. /* Read event ring db offset */
  455. ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
  456. if (ret) {
  457. MHI_ERR(dev, "Unable to read ERDBOFF register\n");
  458. return -EIO;
  459. }
  460. if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
  461. dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
  462. val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
  463. return -ERANGE;
  464. }
  465. /* Setup event db address for each ev_ring */
  466. mhi_event = mhi_cntrl->mhi_event;
  467. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
  468. if (mhi_event->offload_ev)
  469. continue;
  470. mhi_event->ring.db_addr = base + val;
  471. }
  472. /* Setup DB register for primary CMD rings */
  473. mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
  474. /* Write to MMIO registers */
  475. for (i = 0; reg_info[i].offset; i++)
  476. mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
  477. reg_info[i].val);
  478. ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
  479. mhi_cntrl->total_ev_rings);
  480. if (ret) {
  481. MHI_ERR(dev, "Unable to write MHICFG register\n");
  482. return ret;
  483. }
  484. ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
  485. mhi_cntrl->hw_ev_rings);
  486. if (ret) {
  487. MHI_ERR(dev, "Unable to write MHICFG register\n");
  488. return ret;
  489. }
  490. mhi_misc_init_mmio(mhi_cntrl);
  491. return 0;
  492. }
  493. void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
  494. struct mhi_chan *mhi_chan)
  495. {
  496. struct mhi_ring *buf_ring;
  497. struct mhi_ring *tre_ring;
  498. struct mhi_chan_ctxt *chan_ctxt;
  499. u32 tmp;
  500. buf_ring = &mhi_chan->buf_ring;
  501. tre_ring = &mhi_chan->tre_ring;
  502. chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
  503. if (!chan_ctxt->rbase) /* Already uninitialized */
  504. return;
  505. dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
  506. tre_ring->pre_aligned, tre_ring->dma_handle);
  507. vfree(buf_ring->base);
  508. buf_ring->base = tre_ring->base = NULL;
  509. tre_ring->ctxt_wp = NULL;
  510. chan_ctxt->rbase = 0;
  511. chan_ctxt->rlen = 0;
  512. chan_ctxt->rp = 0;
  513. chan_ctxt->wp = 0;
  514. tmp = le32_to_cpu(chan_ctxt->chcfg);
  515. tmp &= ~CHAN_CTX_CHSTATE_MASK;
  516. tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
  517. chan_ctxt->chcfg = cpu_to_le32(tmp);
  518. /* Update to all cores */
  519. smp_wmb();
  520. }
  521. int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
  522. struct mhi_chan *mhi_chan)
  523. {
  524. struct mhi_ring *buf_ring;
  525. struct mhi_ring *tre_ring;
  526. struct mhi_chan_ctxt *chan_ctxt;
  527. u32 tmp;
  528. int ret;
  529. buf_ring = &mhi_chan->buf_ring;
  530. tre_ring = &mhi_chan->tre_ring;
  531. tre_ring->el_size = sizeof(struct mhi_ring_element);
  532. tre_ring->len = tre_ring->el_size * tre_ring->elements;
  533. chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
  534. ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
  535. if (ret)
  536. return -ENOMEM;
  537. buf_ring->el_size = sizeof(struct mhi_buf_info);
  538. buf_ring->len = buf_ring->el_size * buf_ring->elements;
  539. buf_ring->base = vzalloc(buf_ring->len);
  540. if (!buf_ring->base) {
  541. dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
  542. tre_ring->pre_aligned, tre_ring->dma_handle);
  543. return -ENOMEM;
  544. }
  545. tmp = le32_to_cpu(chan_ctxt->chcfg);
  546. tmp &= ~CHAN_CTX_CHSTATE_MASK;
  547. tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
  548. chan_ctxt->chcfg = cpu_to_le32(tmp);
  549. chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
  550. chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
  551. chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
  552. tre_ring->ctxt_wp = &chan_ctxt->wp;
  553. tre_ring->rp = tre_ring->wp = tre_ring->base;
  554. buf_ring->rp = buf_ring->wp = buf_ring->base;
  555. mhi_chan->db_cfg.db_mode = 1;
  556. /* Update to all cores */
  557. smp_wmb();
  558. return 0;
  559. }
  560. static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
  561. const struct mhi_controller_config *config)
  562. {
  563. struct mhi_event *mhi_event;
  564. const struct mhi_event_config *event_cfg;
  565. struct device *dev = mhi_cntrl->cntrl_dev;
  566. int i, num;
  567. num = config->num_events;
  568. mhi_cntrl->total_ev_rings = num;
  569. mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
  570. GFP_KERNEL);
  571. if (!mhi_cntrl->mhi_event)
  572. return -ENOMEM;
  573. /* Populate event ring */
  574. mhi_event = mhi_cntrl->mhi_event;
  575. for (i = 0; i < num; i++) {
  576. event_cfg = &config->event_cfg[i];
  577. mhi_event->er_index = i;
  578. mhi_event->ring.elements = event_cfg->num_elements;
  579. mhi_event->intmod = event_cfg->irq_moderation_ms;
  580. mhi_event->irq = event_cfg->irq;
  581. if (event_cfg->channel != U32_MAX) {
  582. /* This event ring has a dedicated channel */
  583. mhi_event->chan = event_cfg->channel;
  584. if (mhi_event->chan >= mhi_cntrl->max_chan) {
  585. MHI_ERR(dev,
  586. "Event Ring channel not available\n");
  587. goto error_ev_cfg;
  588. }
  589. mhi_event->mhi_chan =
  590. &mhi_cntrl->mhi_chan[mhi_event->chan];
  591. }
  592. mhi_event->priority = event_cfg->priority;
  593. mhi_event->db_cfg.brstmode = event_cfg->mode;
  594. if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
  595. goto error_ev_cfg;
  596. if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
  597. mhi_event->db_cfg.process_db = mhi_db_brstmode;
  598. else
  599. mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
  600. mhi_event->data_type = event_cfg->data_type;
  601. switch (mhi_event->data_type) {
  602. case MHI_ER_DATA:
  603. mhi_event->process_event = mhi_process_data_event_ring;
  604. break;
  605. case MHI_ER_CTRL:
  606. mhi_event->process_event = mhi_process_ctrl_ev_ring;
  607. break;
  608. case MHI_ER_BW_SCALE:
  609. mhi_event->process_event = mhi_process_misc_bw_ev_ring;
  610. break;
  611. case MHI_ER_TIMESYNC:
  612. mhi_event->process_event = mhi_process_misc_tsync_ev_ring;
  613. break;
  614. default:
  615. MHI_ERR(dev, "Event Ring type not supported\n");
  616. goto error_ev_cfg;
  617. }
  618. mhi_event->hw_ring = event_cfg->hardware_event;
  619. if (mhi_event->hw_ring)
  620. mhi_cntrl->hw_ev_rings++;
  621. else
  622. mhi_cntrl->sw_ev_rings++;
  623. mhi_event->cl_manage = event_cfg->client_managed;
  624. mhi_event->offload_ev = event_cfg->offload_channel;
  625. mhi_event++;
  626. }
  627. return 0;
  628. error_ev_cfg:
  629. kfree(mhi_cntrl->mhi_event);
  630. return -EINVAL;
  631. }
  632. static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
  633. const struct mhi_controller_config *config)
  634. {
  635. const struct mhi_channel_config *ch_cfg;
  636. struct device *dev = mhi_cntrl->cntrl_dev;
  637. int i;
  638. u32 chan;
  639. mhi_cntrl->max_chan = config->max_channels;
  640. /*
  641. * The allocation of MHI channels can exceed 32KB in some scenarios,
  642. * so to avoid any memory possible allocation failures, vzalloc is
  643. * used here
  644. */
  645. mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
  646. sizeof(*mhi_cntrl->mhi_chan));
  647. if (!mhi_cntrl->mhi_chan)
  648. return -ENOMEM;
  649. INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
  650. /* Populate channel configurations */
  651. for (i = 0; i < config->num_channels; i++) {
  652. struct mhi_chan *mhi_chan;
  653. ch_cfg = &config->ch_cfg[i];
  654. chan = ch_cfg->num;
  655. if (chan >= mhi_cntrl->max_chan) {
  656. MHI_ERR(dev, "Channel %d not available\n", chan);
  657. goto error_chan_cfg;
  658. }
  659. mhi_chan = &mhi_cntrl->mhi_chan[chan];
  660. mhi_chan->name = ch_cfg->name;
  661. mhi_chan->chan = chan;
  662. mhi_chan->tre_ring.elements = ch_cfg->num_elements;
  663. if (!mhi_chan->tre_ring.elements)
  664. goto error_chan_cfg;
  665. /*
  666. * For some channels, local ring length should be bigger than
  667. * the transfer ring length due to internal logical channels
  668. * in device. So host can queue much more buffers than transfer
  669. * ring length. Example, RSC channels should have a larger local
  670. * channel length than transfer ring length.
  671. */
  672. mhi_chan->buf_ring.elements = ch_cfg->local_elements;
  673. if (!mhi_chan->buf_ring.elements)
  674. mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
  675. mhi_chan->er_index = ch_cfg->event_ring;
  676. mhi_chan->dir = ch_cfg->dir;
  677. /*
  678. * For most channels, chtype is identical to channel directions.
  679. * So, if it is not defined then assign channel direction to
  680. * chtype
  681. */
  682. mhi_chan->type = ch_cfg->type;
  683. if (!mhi_chan->type)
  684. mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
  685. mhi_chan->ee_mask = ch_cfg->ee_mask;
  686. mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
  687. mhi_chan->lpm_notify = ch_cfg->lpm_notify;
  688. mhi_chan->offload_ch = ch_cfg->offload_channel;
  689. mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
  690. mhi_chan->pre_alloc = ch_cfg->auto_queue;
  691. mhi_chan->wake_capable = ch_cfg->wake_capable;
  692. /*
  693. * If MHI host allocates buffers, then the channel direction
  694. * should be DMA_FROM_DEVICE
  695. */
  696. if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
  697. MHI_ERR(dev, "Invalid channel configuration\n");
  698. goto error_chan_cfg;
  699. }
  700. /*
  701. * Bi-directional and direction less channel must be an
  702. * offload channel
  703. */
  704. if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
  705. mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
  706. MHI_ERR(dev, "Invalid channel configuration\n");
  707. goto error_chan_cfg;
  708. }
  709. if (!mhi_chan->offload_ch) {
  710. mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
  711. if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
  712. MHI_ERR(dev, "Invalid Door bell mode\n");
  713. goto error_chan_cfg;
  714. }
  715. }
  716. if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
  717. mhi_chan->db_cfg.process_db = mhi_db_brstmode;
  718. else
  719. mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
  720. mhi_chan->configured = true;
  721. if (mhi_chan->lpm_notify)
  722. list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
  723. }
  724. return 0;
  725. error_chan_cfg:
  726. vfree(mhi_cntrl->mhi_chan);
  727. return -EINVAL;
  728. }
  729. static int parse_config(struct mhi_controller *mhi_cntrl,
  730. const struct mhi_controller_config *config)
  731. {
  732. int ret;
  733. /* Parse MHI channel configuration */
  734. ret = parse_ch_cfg(mhi_cntrl, config);
  735. if (ret)
  736. return ret;
  737. /* Parse MHI event configuration */
  738. ret = parse_ev_cfg(mhi_cntrl, config);
  739. if (ret)
  740. goto error_ev_cfg;
  741. mhi_cntrl->timeout_ms = config->timeout_ms;
  742. if (!mhi_cntrl->timeout_ms)
  743. mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
  744. if (config->bhie_offset)
  745. mhi_cntrl->bhie = mhi_cntrl->regs + config->bhie_offset;
  746. mhi_cntrl->bounce_buf = config->use_bounce_buf;
  747. mhi_cntrl->buffer_len = config->buf_len;
  748. if (!mhi_cntrl->buffer_len)
  749. mhi_cntrl->buffer_len = MHI_MAX_MTU;
  750. /* By default, host is allowed to ring DB in both M0 and M2 states */
  751. mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
  752. if (config->m2_no_db)
  753. mhi_cntrl->db_access &= ~MHI_PM_M2;
  754. return 0;
  755. error_ev_cfg:
  756. vfree(mhi_cntrl->mhi_chan);
  757. return ret;
  758. }
  759. int mhi_register_controller(struct mhi_controller *mhi_cntrl,
  760. const struct mhi_controller_config *config)
  761. {
  762. struct mhi_event *mhi_event;
  763. struct mhi_chan *mhi_chan;
  764. struct mhi_cmd *mhi_cmd;
  765. struct mhi_device *mhi_dev;
  766. u32 soc_info;
  767. int ret, i;
  768. if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
  769. !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
  770. !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
  771. !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
  772. !mhi_cntrl->irq || !mhi_cntrl->reg_len)
  773. return -EINVAL;
  774. /* Initialise BHI and BHIe Offsets*/
  775. mhi_cntrl->bhi = NULL;
  776. mhi_cntrl->bhie = NULL;
  777. ret = parse_config(mhi_cntrl, config);
  778. if (ret)
  779. return -EINVAL;
  780. mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
  781. sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
  782. if (!mhi_cntrl->mhi_cmd) {
  783. ret = -ENOMEM;
  784. goto err_free_event;
  785. }
  786. INIT_LIST_HEAD(&mhi_cntrl->transition_list);
  787. mutex_init(&mhi_cntrl->pm_mutex);
  788. rwlock_init(&mhi_cntrl->pm_lock);
  789. spin_lock_init(&mhi_cntrl->transition_lock);
  790. spin_lock_init(&mhi_cntrl->wlock);
  791. INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
  792. init_waitqueue_head(&mhi_cntrl->state_event);
  793. mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
  794. if (!mhi_cntrl->hiprio_wq) {
  795. dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
  796. ret = -ENOMEM;
  797. goto err_free_cmd;
  798. }
  799. mhi_cmd = mhi_cntrl->mhi_cmd;
  800. for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
  801. spin_lock_init(&mhi_cmd->lock);
  802. mhi_event = mhi_cntrl->mhi_event;
  803. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  804. /* Skip for offload events */
  805. if (mhi_event->offload_ev)
  806. continue;
  807. mhi_event->mhi_cntrl = mhi_cntrl;
  808. spin_lock_init(&mhi_event->lock);
  809. if (mhi_event->priority == MHI_ER_PRIORITY_HI_SLEEP)
  810. INIT_WORK(&mhi_event->work, mhi_process_ev_work);
  811. else
  812. tasklet_init(&mhi_event->task,
  813. (mhi_event->data_type == MHI_ER_CTRL) ?
  814. mhi_ctrl_ev_task : mhi_ev_task,
  815. (ulong)mhi_event);
  816. }
  817. mhi_chan = mhi_cntrl->mhi_chan;
  818. for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
  819. mutex_init(&mhi_chan->mutex);
  820. init_completion(&mhi_chan->completion);
  821. rwlock_init(&mhi_chan->lock);
  822. /* used in setting bei field of TRE */
  823. mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
  824. mhi_chan->intmod = mhi_event->intmod;
  825. }
  826. if (mhi_cntrl->bounce_buf) {
  827. mhi_cntrl->map_single = mhi_map_single_use_bb;
  828. mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
  829. } else {
  830. mhi_cntrl->map_single = mhi_map_single_no_bb;
  831. mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
  832. }
  833. /* Read the MHI device info */
  834. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
  835. SOC_HW_VERSION_OFFS, &soc_info);
  836. if (ret)
  837. goto err_destroy_wq;
  838. mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
  839. mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
  840. mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
  841. mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
  842. mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
  843. if (mhi_cntrl->index < 0) {
  844. ret = mhi_cntrl->index;
  845. goto err_destroy_wq;
  846. }
  847. ret = mhi_init_irq_setup(mhi_cntrl);
  848. if (ret)
  849. goto err_ida_free;
  850. /* Register controller with MHI bus */
  851. mhi_dev = mhi_alloc_device(mhi_cntrl);
  852. if (IS_ERR(mhi_dev)) {
  853. dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
  854. ret = PTR_ERR(mhi_dev);
  855. goto error_setup_irq;
  856. }
  857. mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
  858. mhi_dev->mhi_cntrl = mhi_cntrl;
  859. dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
  860. mhi_dev->name = dev_name(&mhi_dev->dev);
  861. /* Init wakeup source */
  862. device_init_wakeup(&mhi_dev->dev, true);
  863. mhi_cntrl->mhi_dev = mhi_dev;
  864. ret = mhi_misc_register_controller(mhi_cntrl);
  865. if (ret) {
  866. dev_err(mhi_cntrl->cntrl_dev,
  867. "Could not enable miscellaneous features\n");
  868. mhi_cntrl->mhi_dev = NULL;
  869. goto err_ida_free;
  870. }
  871. ret = device_add(&mhi_dev->dev);
  872. if (ret)
  873. goto err_misc_release;
  874. ret = mhi_misc_sysfs_create(mhi_cntrl);
  875. if (ret)
  876. goto err_release_dev;
  877. mhi_create_debugfs(mhi_cntrl);
  878. return 0;
  879. err_release_dev:
  880. device_del(&mhi_dev->dev);
  881. err_misc_release:
  882. mhi_misc_unregister_controller(mhi_cntrl);
  883. error_setup_irq:
  884. mhi_deinit_free_irq(mhi_cntrl);
  885. err_ida_free:
  886. ida_free(&mhi_controller_ida, mhi_cntrl->index);
  887. err_destroy_wq:
  888. destroy_workqueue(mhi_cntrl->hiprio_wq);
  889. err_free_cmd:
  890. kfree(mhi_cntrl->mhi_cmd);
  891. err_free_event:
  892. kfree(mhi_cntrl->mhi_event);
  893. vfree(mhi_cntrl->mhi_chan);
  894. return ret;
  895. }
  896. EXPORT_SYMBOL_GPL(mhi_register_controller);
  897. void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
  898. {
  899. struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
  900. struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
  901. unsigned int i;
  902. mhi_deinit_free_irq(mhi_cntrl);
  903. mhi_misc_unregister_controller(mhi_cntrl);
  904. mhi_misc_sysfs_destroy(mhi_cntrl);
  905. /* Free the memory controller wanted to preserve for BHIe images */
  906. if (mhi_cntrl->img_pre_alloc) {
  907. mhi_cntrl->img_pre_alloc = false;
  908. if (mhi_cntrl->fbc_image)
  909. mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
  910. if (mhi_cntrl->rddm_image)
  911. mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image);
  912. }
  913. mhi_destroy_debugfs(mhi_cntrl);
  914. destroy_workqueue(mhi_cntrl->hiprio_wq);
  915. kfree(mhi_cntrl->mhi_cmd);
  916. kfree(mhi_cntrl->mhi_event);
  917. /* Drop the references to MHI devices created for channels */
  918. for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
  919. if (!mhi_chan->mhi_dev)
  920. continue;
  921. put_device(&mhi_chan->mhi_dev->dev);
  922. }
  923. vfree(mhi_cntrl->mhi_chan);
  924. device_del(&mhi_dev->dev);
  925. put_device(&mhi_dev->dev);
  926. ida_free(&mhi_controller_ida, mhi_cntrl->index);
  927. }
  928. EXPORT_SYMBOL_GPL(mhi_unregister_controller);
  929. struct mhi_controller *mhi_alloc_controller(void)
  930. {
  931. struct mhi_controller *mhi_cntrl;
  932. mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
  933. return mhi_cntrl;
  934. }
  935. EXPORT_SYMBOL_GPL(mhi_alloc_controller);
  936. void mhi_free_controller(struct mhi_controller *mhi_cntrl)
  937. {
  938. kfree(mhi_cntrl);
  939. }
  940. EXPORT_SYMBOL_GPL(mhi_free_controller);
  941. int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
  942. {
  943. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  944. u32 bhi_off, bhie_off, offset;
  945. int ret;
  946. mutex_lock(&mhi_cntrl->pm_mutex);
  947. ret = mhi_init_dev_ctxt(mhi_cntrl);
  948. if (ret)
  949. goto error_dev_ctxt;
  950. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
  951. if (ret) {
  952. MHI_ERR(dev, "Error getting BHI offset\n");
  953. goto error_reg_offset;
  954. }
  955. if (bhi_off >= mhi_cntrl->reg_len) {
  956. MHI_ERR(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
  957. bhi_off, mhi_cntrl->reg_len);
  958. ret = -EINVAL;
  959. goto error_reg_offset;
  960. }
  961. mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
  962. if (!mhi_cntrl->bhie && (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size)) {
  963. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
  964. &bhie_off);
  965. if (ret) {
  966. MHI_ERR(dev, "Error getting BHIE offset\n");
  967. goto error_reg_offset;
  968. }
  969. if (bhie_off >= mhi_cntrl->reg_len) {
  970. MHI_ERR(dev,
  971. "BHIe offset: 0x%x is out of range: 0x%zx\n",
  972. bhie_off, mhi_cntrl->reg_len);
  973. ret = -EINVAL;
  974. goto error_reg_offset;
  975. }
  976. mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
  977. }
  978. if (mhi_cntrl->rddm_size) {
  979. /*
  980. * This controller supports RDDM, so we need to manually clear
  981. * BHIE RX registers since POR values are undefined.
  982. */
  983. if (!((uintptr_t)(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS) & 0x0f)) {
  984. memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
  985. 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
  986. 4);
  987. } else {
  988. for (offset = BHIE_RXVECADDR_LOW_OFFS;
  989. offset <= BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS;
  990. offset += 4) {
  991. mhi_write_reg(mhi_cntrl, mhi_cntrl->bhie, offset, 0);
  992. if (!((uintptr_t)(mhi_cntrl->bhie + offset + 4) & 0x0f))
  993. break;
  994. }
  995. if (offset <= BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS)
  996. memset_io(mhi_cntrl->bhie + offset + 4,
  997. 0, BHIE_RXVECSTATUS_OFFS - offset);
  998. }
  999. /*
  1000. * Allocate RDDM table for debugging purpose if specified
  1001. */
  1002. mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
  1003. mhi_cntrl->rddm_size);
  1004. if (mhi_cntrl->rddm_image) {
  1005. ret = mhi_rddm_prepare(mhi_cntrl,
  1006. mhi_cntrl->rddm_image);
  1007. if (ret) {
  1008. mhi_free_bhie_table(mhi_cntrl,
  1009. &mhi_cntrl->rddm_image);
  1010. goto error_reg_offset;
  1011. }
  1012. }
  1013. }
  1014. mutex_unlock(&mhi_cntrl->pm_mutex);
  1015. return 0;
  1016. error_reg_offset:
  1017. mhi_deinit_dev_ctxt(mhi_cntrl);
  1018. error_dev_ctxt:
  1019. mutex_unlock(&mhi_cntrl->pm_mutex);
  1020. return ret;
  1021. }
  1022. EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
  1023. void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
  1024. {
  1025. if (mhi_cntrl->rddm_image)
  1026. mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image);
  1027. mhi_deinit_dev_ctxt(mhi_cntrl);
  1028. }
  1029. EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
  1030. static void mhi_release_device(struct device *dev)
  1031. {
  1032. struct mhi_device *mhi_dev = to_mhi_device(dev);
  1033. /*
  1034. * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
  1035. * devices for the channels will only get created if the mhi_dev
  1036. * associated with it is NULL. This scenario will happen during the
  1037. * controller suspend and resume.
  1038. */
  1039. if (mhi_dev->ul_chan)
  1040. mhi_dev->ul_chan->mhi_dev = NULL;
  1041. if (mhi_dev->dl_chan)
  1042. mhi_dev->dl_chan->mhi_dev = NULL;
  1043. kfree(mhi_dev);
  1044. }
  1045. struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
  1046. {
  1047. struct mhi_device *mhi_dev;
  1048. struct device *dev;
  1049. mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
  1050. if (!mhi_dev)
  1051. return ERR_PTR(-ENOMEM);
  1052. dev = &mhi_dev->dev;
  1053. device_initialize(dev);
  1054. dev->bus = &mhi_bus_type;
  1055. dev->release = mhi_release_device;
  1056. if (mhi_cntrl->mhi_dev) {
  1057. /* for MHI client devices, parent is the MHI controller device */
  1058. dev->parent = &mhi_cntrl->mhi_dev->dev;
  1059. } else {
  1060. /* for MHI controller device, parent is the bus device (e.g. pci device) */
  1061. dev->parent = mhi_cntrl->cntrl_dev;
  1062. }
  1063. mhi_dev->mhi_cntrl = mhi_cntrl;
  1064. mhi_dev->dev_wake = 0;
  1065. return mhi_dev;
  1066. }
  1067. static int mhi_driver_probe(struct device *dev)
  1068. {
  1069. struct mhi_device *mhi_dev = to_mhi_device(dev);
  1070. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1071. struct device_driver *drv = dev->driver;
  1072. struct mhi_driver *mhi_drv = to_mhi_driver(drv);
  1073. struct mhi_event *mhi_event;
  1074. struct mhi_chan *ul_chan = mhi_dev->ul_chan;
  1075. struct mhi_chan *dl_chan = mhi_dev->dl_chan;
  1076. int ret;
  1077. /* Bring device out of LPM */
  1078. ret = mhi_device_get_sync(mhi_dev);
  1079. if (ret)
  1080. return ret;
  1081. ret = -EINVAL;
  1082. if (ul_chan) {
  1083. /*
  1084. * If channel supports LPM notifications then status_cb should
  1085. * be provided
  1086. */
  1087. if (ul_chan->lpm_notify && !mhi_drv->status_cb)
  1088. goto exit_probe;
  1089. /* For non-offload channels then xfer_cb should be provided */
  1090. if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
  1091. goto exit_probe;
  1092. ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
  1093. }
  1094. ret = -EINVAL;
  1095. if (dl_chan) {
  1096. /*
  1097. * If channel supports LPM notifications then status_cb should
  1098. * be provided
  1099. */
  1100. if (dl_chan->lpm_notify && !mhi_drv->status_cb)
  1101. goto exit_probe;
  1102. /* For non-offload channels then xfer_cb should be provided */
  1103. if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
  1104. goto exit_probe;
  1105. mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
  1106. /*
  1107. * If the channel event ring is managed by client, then
  1108. * status_cb must be provided so that the framework can
  1109. * notify pending data
  1110. */
  1111. if (mhi_event->cl_manage && !mhi_drv->status_cb)
  1112. goto exit_probe;
  1113. dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
  1114. }
  1115. /* Call the user provided probe function */
  1116. ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
  1117. if (ret)
  1118. goto exit_probe;
  1119. mhi_device_put(mhi_dev);
  1120. return ret;
  1121. exit_probe:
  1122. mhi_unprepare_from_transfer(mhi_dev);
  1123. mhi_device_put(mhi_dev);
  1124. return ret;
  1125. }
  1126. static int mhi_driver_remove(struct device *dev)
  1127. {
  1128. struct mhi_device *mhi_dev = to_mhi_device(dev);
  1129. struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
  1130. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1131. struct mhi_chan *mhi_chan;
  1132. enum mhi_ch_state ch_state[] = {
  1133. MHI_CH_STATE_DISABLED,
  1134. MHI_CH_STATE_DISABLED
  1135. };
  1136. int dir;
  1137. /* Skip if it is a controller device */
  1138. if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
  1139. return 0;
  1140. /* Reset both channels */
  1141. for (dir = 0; dir < 2; dir++) {
  1142. mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
  1143. if (!mhi_chan)
  1144. continue;
  1145. /* Wake all threads waiting for completion */
  1146. write_lock_irq(&mhi_chan->lock);
  1147. mhi_chan->ccs = MHI_EV_CC_INVALID;
  1148. complete_all(&mhi_chan->completion);
  1149. write_unlock_irq(&mhi_chan->lock);
  1150. /* Set the channel state to disabled */
  1151. mutex_lock(&mhi_chan->mutex);
  1152. write_lock_irq(&mhi_chan->lock);
  1153. ch_state[dir] = mhi_chan->ch_state;
  1154. mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
  1155. write_unlock_irq(&mhi_chan->lock);
  1156. /* Reset the non-offload channel */
  1157. if (!mhi_chan->offload_ch)
  1158. mhi_reset_chan(mhi_cntrl, mhi_chan);
  1159. mutex_unlock(&mhi_chan->mutex);
  1160. }
  1161. mhi_drv->remove(mhi_dev);
  1162. /* De-init channel if it was enabled */
  1163. for (dir = 0; dir < 2; dir++) {
  1164. mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
  1165. if (!mhi_chan)
  1166. continue;
  1167. mutex_lock(&mhi_chan->mutex);
  1168. if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
  1169. ch_state[dir] == MHI_CH_STATE_STOP) &&
  1170. mhi_chan->ch_state != MHI_CH_STATE_DISABLED &&
  1171. !mhi_chan->offload_ch)
  1172. mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
  1173. mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
  1174. mutex_unlock(&mhi_chan->mutex);
  1175. }
  1176. while (mhi_dev->dev_wake)
  1177. mhi_device_put(mhi_dev);
  1178. return 0;
  1179. }
  1180. int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
  1181. {
  1182. struct device_driver *driver = &mhi_drv->driver;
  1183. if (!mhi_drv->probe || !mhi_drv->remove)
  1184. return -EINVAL;
  1185. driver->bus = &mhi_bus_type;
  1186. driver->owner = owner;
  1187. driver->probe = mhi_driver_probe;
  1188. driver->remove = mhi_driver_remove;
  1189. return driver_register(driver);
  1190. }
  1191. EXPORT_SYMBOL_GPL(__mhi_driver_register);
  1192. void mhi_driver_unregister(struct mhi_driver *mhi_drv)
  1193. {
  1194. driver_unregister(&mhi_drv->driver);
  1195. }
  1196. EXPORT_SYMBOL_GPL(mhi_driver_unregister);
  1197. static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
  1198. {
  1199. struct mhi_device *mhi_dev = to_mhi_device(dev);
  1200. return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
  1201. mhi_dev->name);
  1202. }
  1203. static int mhi_match(struct device *dev, struct device_driver *drv)
  1204. {
  1205. struct mhi_device *mhi_dev = to_mhi_device(dev);
  1206. struct mhi_driver *mhi_drv = to_mhi_driver(drv);
  1207. const struct mhi_device_id *id;
  1208. /*
  1209. * If the device is a controller type then there is no client driver
  1210. * associated with it
  1211. */
  1212. if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
  1213. return 0;
  1214. for (id = mhi_drv->id_table; id->chan[0]; id++)
  1215. if (!strcmp(mhi_dev->name, id->chan)) {
  1216. mhi_dev->id = id;
  1217. return 1;
  1218. }
  1219. return 0;
  1220. };
  1221. struct bus_type mhi_bus_type = {
  1222. .name = "mhi",
  1223. .dev_name = "mhi",
  1224. .match = mhi_match,
  1225. .uevent = mhi_uevent,
  1226. .dev_groups = mhi_dev_groups,
  1227. };
  1228. static int __init mhi_init(void)
  1229. {
  1230. mhi_misc_init();
  1231. mhi_debugfs_init();
  1232. return bus_register(&mhi_bus_type);
  1233. }
  1234. static void __exit mhi_exit(void)
  1235. {
  1236. mhi_misc_exit();
  1237. mhi_debugfs_exit();
  1238. bus_unregister(&mhi_bus_type);
  1239. }
  1240. postcore_initcall(mhi_init);
  1241. module_exit(mhi_exit);
  1242. MODULE_LICENSE("GPL v2");
  1243. MODULE_DESCRIPTION("MHI Host Interface");