misc.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. #include <linux/delay.h>
  4. #include <linux/device.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/list.h>
  7. #include <linux/mod_devicetable.h>
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/slab.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/wait.h>
  13. #include "internal.h"
  14. static const char * const mhi_log_level_str[MHI_MSG_LVL_MAX] = {
  15. [MHI_MSG_LVL_VERBOSE] = "Verbose",
  16. [MHI_MSG_LVL_INFO] = "Info",
  17. [MHI_MSG_LVL_ERROR] = "Error",
  18. [MHI_MSG_LVL_CRITICAL] = "Critical",
  19. [MHI_MSG_LVL_MASK_ALL] = "Mask all",
  20. };
  21. #define TO_MHI_LOG_LEVEL_STR(level) ((level >= MHI_MSG_LVL_MAX || \
  22. !mhi_log_level_str[level]) ? \
  23. "Mask all" : mhi_log_level_str[level])
  24. #define MHI_NUMERIC_DEVICE_ID(dev, domain, bus, slot) \
  25. ((dev & 0xFFFF) << 16 | (domain & 0xF) << 12 | (bus & 0xFF) << 4 | \
  26. (slot & 0xF))
  27. #define MHI_DTR_CHANNEL 19
  28. struct mhi_bus mhi_bus;
  29. void mhi_misc_init(void)
  30. {
  31. mutex_init(&mhi_bus.lock);
  32. INIT_LIST_HEAD(&mhi_bus.controller_list);
  33. }
  34. void mhi_misc_exit(void)
  35. {
  36. mutex_destroy(&mhi_bus.lock);
  37. }
  38. static void *mhi_misc_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
  39. {
  40. return (addr - ring->iommu_base) + ring->base;
  41. }
  42. static dma_addr_t mhi_misc_to_physical(struct mhi_ring *ring, void *addr)
  43. {
  44. return (addr - ring->base) + ring->iommu_base;
  45. }
  46. static ssize_t time_show(struct device *dev,
  47. struct device_attribute *attr,
  48. char *buf)
  49. {
  50. struct mhi_device *mhi_dev = to_mhi_device(dev);
  51. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  52. u64 t_host, t_device;
  53. int ret;
  54. ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device);
  55. if (ret) {
  56. MHI_ERR(dev, "Failed to obtain time, ret:%d\n", ret);
  57. return scnprintf(buf, PAGE_SIZE,
  58. "Request failed or feature unsupported\n");
  59. }
  60. return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (ticks)\n",
  61. t_host, t_device);
  62. }
  63. static DEVICE_ATTR_RO(time);
  64. static void mhi_time_async_cb(struct mhi_device *mhi_dev, u32 sequence,
  65. u64 local_time, u64 remote_time)
  66. {
  67. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  68. struct device *dev = &mhi_dev->dev;
  69. MHI_LOG(dev, "Time response: seq:%llx local: %llu remote: %llu (ticks)\n",
  70. sequence, local_time, remote_time);
  71. }
  72. static ssize_t time_async_show(struct device *dev,
  73. struct device_attribute *attr,
  74. char *buf)
  75. {
  76. struct mhi_device *mhi_dev = to_mhi_device(dev);
  77. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  78. u32 seq = get_random_u32();
  79. int ret;
  80. if (!seq)
  81. seq = 1;
  82. ret = mhi_get_remote_time(mhi_dev, seq, &mhi_time_async_cb);
  83. if (ret) {
  84. MHI_ERR(dev, "Failed to request time, seq:%llx, ret:%d\n", seq, ret);
  85. return scnprintf(buf, PAGE_SIZE,
  86. "Request failed or feature unsupported\n");
  87. }
  88. return scnprintf(buf, PAGE_SIZE,
  89. "Requested time asynchronously with seq:%llx\n", seq);
  90. }
  91. static DEVICE_ATTR_RO(time_async);
  92. static struct attribute *mhi_tsync_attrs[] = {
  93. &dev_attr_time.attr,
  94. &dev_attr_time_async.attr,
  95. NULL,
  96. };
  97. static const struct attribute_group mhi_tsync_group = {
  98. .attrs = mhi_tsync_attrs,
  99. };
  100. static ssize_t log_level_show(struct device *dev,
  101. struct device_attribute *attr,
  102. char *buf)
  103. {
  104. struct mhi_device *mhi_dev = to_mhi_device(dev);
  105. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  106. struct mhi_private *mhi_priv = dev_get_drvdata(&mhi_cntrl->mhi_dev->dev);
  107. if (!mhi_priv)
  108. return -EIO;
  109. return scnprintf(buf, PAGE_SIZE, "IPC log level begins from: %s\n",
  110. TO_MHI_LOG_LEVEL_STR(mhi_priv->log_lvl));
  111. }
  112. static ssize_t log_level_store(struct device *dev,
  113. struct device_attribute *attr,
  114. const char *buf,
  115. size_t count)
  116. {
  117. struct mhi_device *mhi_dev = to_mhi_device(dev);
  118. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  119. struct mhi_private *mhi_priv = dev_get_drvdata(&mhi_cntrl->mhi_dev->dev);
  120. enum MHI_DEBUG_LEVEL log_level;
  121. if (kstrtou32(buf, 0, &log_level) < 0)
  122. return -EINVAL;
  123. if (!mhi_priv)
  124. return -EIO;
  125. mhi_priv->log_lvl = log_level;
  126. MHI_LOG(dev, "IPC log level changed to: %s\n",
  127. TO_MHI_LOG_LEVEL_STR(log_level));
  128. return count;
  129. }
  130. static DEVICE_ATTR_RW(log_level);
  131. static struct attribute *mhi_misc_attrs[] = {
  132. &dev_attr_log_level.attr,
  133. NULL,
  134. };
  135. static const struct attribute_group mhi_misc_group = {
  136. .attrs = mhi_misc_attrs,
  137. };
  138. void mhi_force_reg_write(struct mhi_controller *mhi_cntrl)
  139. {
  140. struct mhi_private *mhi_priv =
  141. dev_get_drvdata(&mhi_cntrl->mhi_dev->dev);
  142. if (!(mhi_cntrl->db_access & MHI_PM_M2))
  143. flush_work(&mhi_priv->reg_write_work);
  144. }
  145. void mhi_reset_reg_write_q(struct mhi_controller *mhi_cntrl)
  146. {
  147. struct mhi_private *mhi_priv =
  148. dev_get_drvdata(&mhi_cntrl->mhi_dev->dev);
  149. if (mhi_cntrl->db_access & MHI_PM_M2)
  150. return;
  151. cancel_work_sync(&mhi_priv->reg_write_work);
  152. memset(mhi_priv->reg_write_q, 0,
  153. sizeof(struct reg_write_info) * REG_WRITE_QUEUE_LEN);
  154. mhi_priv->read_idx = 0;
  155. atomic_set(&mhi_priv->write_idx, -1);
  156. }
  157. static void mhi_reg_write_enqueue(struct mhi_private *mhi_priv,
  158. void __iomem *reg_addr, u32 val)
  159. {
  160. u32 q_index = atomic_inc_return(&mhi_priv->write_idx);
  161. q_index = q_index & (REG_WRITE_QUEUE_LEN - 1);
  162. if (mhi_priv->reg_write_q[q_index].valid)
  163. panic("queue full idx %d", q_index);
  164. mhi_priv->reg_write_q[q_index].reg_addr = reg_addr;
  165. mhi_priv->reg_write_q[q_index].val = val;
  166. /*
  167. * prevent reordering to make sure val is set before valid is set to
  168. * true. This prevents offload worker running on another core to write
  169. * stale value to register with valid set to true.
  170. */
  171. smp_wmb();
  172. mhi_priv->reg_write_q[q_index].valid = true;
  173. /*
  174. * make sure valid value is visible to other cores to prevent offload
  175. * worker from skipping the reg write.
  176. */
  177. smp_wmb();
  178. }
  179. void mhi_write_reg_offload(struct mhi_controller *mhi_cntrl,
  180. void __iomem *base,
  181. u32 offset,
  182. u32 val)
  183. {
  184. struct mhi_private *mhi_priv =
  185. dev_get_drvdata(&mhi_cntrl->mhi_dev->dev);
  186. mhi_reg_write_enqueue(mhi_priv, base + offset, val);
  187. queue_work(mhi_priv->offload_wq, &mhi_priv->reg_write_work);
  188. }
  189. void mhi_write_offload_wakedb(struct mhi_controller *mhi_cntrl, int db_val)
  190. {
  191. mhi_write_reg_offload(mhi_cntrl, mhi_cntrl->wake_db, 4,
  192. upper_32_bits(db_val));
  193. mhi_write_reg_offload(mhi_cntrl, mhi_cntrl->wake_db, 0,
  194. lower_32_bits(db_val));
  195. }
  196. void mhi_reg_write_work(struct work_struct *w)
  197. {
  198. struct mhi_private *mhi_priv = container_of(w,
  199. struct mhi_private,
  200. reg_write_work);
  201. struct mhi_controller *mhi_cntrl = mhi_priv->mhi_cntrl;
  202. struct pci_dev *parent = to_pci_dev(mhi_cntrl->cntrl_dev);
  203. struct reg_write_info *info =
  204. &mhi_priv->reg_write_q[mhi_priv->read_idx];
  205. if (!info->valid)
  206. return;
  207. if (!mhi_is_active(mhi_cntrl))
  208. return;
  209. if (msm_pcie_prevent_l1(parent))
  210. return;
  211. while (info->valid) {
  212. if (!mhi_is_active(mhi_cntrl))
  213. break;
  214. writel_relaxed(info->val, info->reg_addr);
  215. info->valid = false;
  216. mhi_priv->read_idx =
  217. (mhi_priv->read_idx + 1) &
  218. (REG_WRITE_QUEUE_LEN - 1);
  219. info = &mhi_priv->reg_write_q[mhi_priv->read_idx];
  220. }
  221. msm_pcie_allow_l1(parent);
  222. }
  223. int mhi_misc_sysfs_create(struct mhi_controller *mhi_cntrl)
  224. {
  225. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  226. int ret = 0;
  227. ret = sysfs_create_group(&dev->kobj, &mhi_misc_group);
  228. if (ret) {
  229. MHI_ERR(dev, "Failed to create misc sysfs group\n");
  230. return ret;
  231. }
  232. ret = sysfs_create_group(&dev->kobj, &mhi_tsync_group);
  233. if (ret) {
  234. MHI_ERR(dev, "Failed to create time synchronization sysfs group\n");
  235. return ret;
  236. }
  237. return ret;
  238. }
  239. void mhi_misc_sysfs_destroy(struct mhi_controller *mhi_cntrl)
  240. {
  241. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  242. sysfs_remove_group(&dev->kobj, &mhi_tsync_group);
  243. sysfs_remove_group(&dev->kobj, &mhi_misc_group);
  244. }
  245. int mhi_misc_register_controller(struct mhi_controller *mhi_cntrl)
  246. {
  247. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  248. struct mhi_private *mhi_priv = kzalloc(sizeof(*mhi_priv), GFP_KERNEL);
  249. struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
  250. struct pci_dev *parent = to_pci_dev(mhi_cntrl->cntrl_dev);
  251. int ret = 0;
  252. if (!mhi_priv)
  253. return -ENOMEM;
  254. if (parent) {
  255. dev_set_name(&mhi_dev->dev, "mhi_%04x_%02u.%02u.%02u",
  256. parent->device, pci_domain_nr(parent->bus),
  257. parent->bus->number, PCI_SLOT(parent->devfn));
  258. mhi_dev->name = dev_name(&mhi_dev->dev);
  259. mhi_priv->numeric_id = MHI_NUMERIC_DEVICE_ID(parent->device,
  260. pci_domain_nr(parent->bus),
  261. parent->bus->number,
  262. PCI_SLOT(parent->devfn));
  263. }
  264. mhi_priv->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES,
  265. mhi_dev->name, 0);
  266. mhi_priv->log_buf_extra = ipc_log_context_create(MHI_IPC_LOG_PAGES,
  267. "mhi_extra_logs", 0);
  268. mhi_priv->mhi_cntrl = mhi_cntrl;
  269. /* adding it to this list only for debug purpose */
  270. mutex_lock(&mhi_bus.lock);
  271. list_add_tail(&mhi_priv->node, &mhi_bus.controller_list);
  272. mutex_unlock(&mhi_bus.lock);
  273. dev_set_drvdata(dev, mhi_priv);
  274. mhi_priv->offload_wq = alloc_ordered_workqueue("mhi_offload_wq",
  275. WQ_HIGHPRI);
  276. if (!mhi_priv->offload_wq) {
  277. dev_err(mhi_cntrl->cntrl_dev,
  278. "Failed to allocate offload workqueue\n");
  279. ret = -ENOMEM;
  280. goto ipc_ctx_cleanup;
  281. }
  282. INIT_WORK(&mhi_priv->reg_write_work, mhi_reg_write_work);
  283. mhi_priv->reg_write_q = kcalloc(REG_WRITE_QUEUE_LEN,
  284. sizeof(*mhi_priv->reg_write_q),
  285. GFP_KERNEL);
  286. if (!mhi_priv->reg_write_q) {
  287. ret = -ENOMEM;
  288. goto wq_cleanup;
  289. }
  290. atomic_set(&mhi_priv->write_idx, -1);
  291. return 0;
  292. wq_cleanup:
  293. destroy_workqueue(mhi_priv->offload_wq);
  294. ipc_ctx_cleanup:
  295. ipc_log_context_destroy(mhi_priv->log_buf);
  296. return ret;
  297. }
  298. void mhi_misc_unregister_controller(struct mhi_controller *mhi_cntrl)
  299. {
  300. struct mhi_private *mhi_priv = dev_get_drvdata(&mhi_cntrl->mhi_dev->dev);
  301. if (!mhi_priv)
  302. return;
  303. mutex_lock(&mhi_bus.lock);
  304. list_del(&mhi_priv->node);
  305. mutex_unlock(&mhi_bus.lock);
  306. kfree(mhi_priv->reg_write_q);
  307. if (mhi_priv->sfr_info)
  308. kfree(mhi_priv->sfr_info->str);
  309. kfree(mhi_priv->sfr_info);
  310. kfree(mhi_priv->timesync);
  311. kfree(mhi_priv);
  312. }
  313. void *mhi_controller_get_privdata(struct mhi_controller *mhi_cntrl)
  314. {
  315. struct mhi_device *mhi_dev;
  316. struct mhi_private *mhi_priv;
  317. if (!mhi_cntrl)
  318. return NULL;
  319. mhi_dev = mhi_cntrl->mhi_dev;
  320. if (!mhi_dev)
  321. return NULL;
  322. mhi_priv = dev_get_drvdata(&mhi_dev->dev);
  323. if (!mhi_priv)
  324. return NULL;
  325. return mhi_priv->priv_data;
  326. }
  327. EXPORT_SYMBOL(mhi_controller_get_privdata);
  328. void mhi_controller_set_privdata(struct mhi_controller *mhi_cntrl, void *priv)
  329. {
  330. struct mhi_device *mhi_dev;
  331. struct mhi_private *mhi_priv;
  332. if (!mhi_cntrl)
  333. return;
  334. mhi_dev = mhi_cntrl->mhi_dev;
  335. if (!mhi_dev)
  336. return;
  337. mhi_priv = dev_get_drvdata(&mhi_dev->dev);
  338. if (!mhi_priv)
  339. return;
  340. mhi_priv->priv_data = priv;
  341. }
  342. EXPORT_SYMBOL(mhi_controller_set_privdata);
  343. static struct mhi_controller *find_mhi_controller_by_name(const char *name)
  344. {
  345. struct mhi_private *mhi_priv, *tmp_priv;
  346. struct mhi_controller *mhi_cntrl;
  347. list_for_each_entry_safe(mhi_priv, tmp_priv, &mhi_bus.controller_list,
  348. node) {
  349. mhi_cntrl = mhi_priv->mhi_cntrl;
  350. if (mhi_cntrl->mhi_dev->name && (!strcmp(name, mhi_cntrl->mhi_dev->name)))
  351. return mhi_cntrl;
  352. }
  353. return NULL;
  354. }
  355. struct mhi_controller *mhi_bdf_to_controller(u32 domain,
  356. u32 bus,
  357. u32 slot,
  358. u32 dev_id)
  359. {
  360. char name[32];
  361. snprintf(name, sizeof(name), "mhi_%04x_%02u.%02u.%02u", dev_id, domain,
  362. bus, slot);
  363. return find_mhi_controller_by_name(name);
  364. }
  365. EXPORT_SYMBOL(mhi_bdf_to_controller);
  366. static int mhi_notify_fatal_cb(struct device *dev, void *data)
  367. {
  368. mhi_notify(to_mhi_device(dev), MHI_CB_FATAL_ERROR);
  369. return 0;
  370. }
  371. int mhi_report_error(struct mhi_controller *mhi_cntrl)
  372. {
  373. struct device *dev;
  374. struct mhi_private *mhi_priv;
  375. struct mhi_sfr_info *sfr_info;
  376. enum mhi_pm_state cur_state;
  377. unsigned long flags;
  378. if (!mhi_cntrl)
  379. return -EINVAL;
  380. dev = &mhi_cntrl->mhi_dev->dev;
  381. mhi_priv = dev_get_drvdata(dev);
  382. sfr_info = mhi_priv->sfr_info;
  383. write_lock_irqsave(&mhi_cntrl->pm_lock, flags);
  384. cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT);
  385. if (cur_state != MHI_PM_SYS_ERR_DETECT) {
  386. MHI_ERR(dev,
  387. "Failed to move to state: %s from: %s\n",
  388. to_mhi_pm_state_str(MHI_PM_SYS_ERR_DETECT),
  389. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  390. write_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
  391. return -EPERM;
  392. }
  393. /* force inactive/error state */
  394. mhi_cntrl->dev_state = MHI_STATE_SYS_ERR;
  395. wake_up_all(&mhi_cntrl->state_event);
  396. write_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
  397. /* copy subsystem failure reason string if supported */
  398. if (sfr_info && sfr_info->buf_addr) {
  399. memcpy(sfr_info->str, sfr_info->buf_addr, sfr_info->len);
  400. MHI_ERR(dev, "mhi: %s sfr: %s\n", dev_name(dev), sfr_info->buf_addr);
  401. }
  402. /* Notify fatal error to all client drivers to halt processing */
  403. device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL,
  404. mhi_notify_fatal_cb);
  405. return 0;
  406. }
  407. EXPORT_SYMBOL(mhi_report_error);
  408. int mhi_device_configure(struct mhi_device *mhi_dev,
  409. enum dma_data_direction dir,
  410. struct mhi_buf *cfg_tbl,
  411. int elements)
  412. {
  413. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  414. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  415. struct mhi_chan *mhi_chan;
  416. struct mhi_event_ctxt *er_ctxt;
  417. struct mhi_chan_ctxt *ch_ctxt;
  418. int er_index, chan;
  419. switch (dir) {
  420. case DMA_TO_DEVICE:
  421. mhi_chan = mhi_dev->ul_chan;
  422. break;
  423. case DMA_BIDIRECTIONAL:
  424. case DMA_FROM_DEVICE:
  425. case DMA_NONE:
  426. mhi_chan = mhi_dev->dl_chan;
  427. break;
  428. default:
  429. return -EINVAL;
  430. }
  431. er_index = mhi_chan->er_index;
  432. chan = mhi_chan->chan;
  433. for (; elements > 0; elements--, cfg_tbl++) {
  434. /* update event context array */
  435. if (!strcmp(cfg_tbl->name, "ECA")) {
  436. er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index];
  437. if (sizeof(*er_ctxt) != cfg_tbl->len) {
  438. MHI_ERR(dev,
  439. "Invalid ECA size, expected:%zu actual%zu\n",
  440. sizeof(*er_ctxt), cfg_tbl->len);
  441. return -EINVAL;
  442. }
  443. memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt));
  444. continue;
  445. }
  446. /* update channel context array */
  447. if (!strcmp(cfg_tbl->name, "CCA")) {
  448. ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan];
  449. if (cfg_tbl->len != sizeof(*ch_ctxt)) {
  450. MHI_ERR(dev,
  451. "Invalid CCA size, expected:%zu actual:%zu\n",
  452. sizeof(*ch_ctxt), cfg_tbl->len);
  453. return -EINVAL;
  454. }
  455. memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt));
  456. continue;
  457. }
  458. return -EINVAL;
  459. }
  460. return 0;
  461. }
  462. EXPORT_SYMBOL(mhi_device_configure);
  463. void mhi_set_m2_timeout_ms(struct mhi_controller *mhi_cntrl, u32 timeout)
  464. {
  465. struct mhi_device *mhi_dev;
  466. struct mhi_private *mhi_priv;
  467. if (!mhi_cntrl)
  468. return;
  469. mhi_dev = mhi_cntrl->mhi_dev;
  470. if (!mhi_dev)
  471. return;
  472. mhi_priv = dev_get_drvdata(&mhi_dev->dev);
  473. if (!mhi_priv)
  474. return;
  475. mhi_priv->m2_timeout_ms = timeout;
  476. }
  477. EXPORT_SYMBOL(mhi_set_m2_timeout_ms);
  478. int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_clients)
  479. {
  480. struct mhi_chan *itr, *tmp;
  481. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  482. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  483. MHI_VERB(dev, "Entered with PM state: %s, MHI state: %s notify: %s\n",
  484. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  485. mhi_state_str(mhi_cntrl->dev_state),
  486. notify_clients ? "true" : "false");
  487. if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
  488. return 0;
  489. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
  490. return -EIO;
  491. read_lock_bh(&mhi_cntrl->pm_lock);
  492. WARN_ON(mhi_cntrl->pm_state != MHI_PM_M3);
  493. read_unlock_bh(&mhi_cntrl->pm_lock);
  494. if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM
  495. && mhi_is_active(mhi_cntrl)) {
  496. mhi_cntrl->ee = MHI_EE_RDDM;
  497. MHI_ERR(dev, "RDDM event occurred!\n");
  498. /* notify critical clients with early notifications */
  499. mhi_report_error(mhi_cntrl);
  500. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
  501. wake_up_all(&mhi_cntrl->state_event);
  502. return 0;
  503. }
  504. /* Notify clients about exiting LPM */
  505. if (notify_clients) {
  506. list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans,
  507. node) {
  508. mutex_lock(&itr->mutex);
  509. if (itr->mhi_dev)
  510. mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
  511. mutex_unlock(&itr->mutex);
  512. }
  513. }
  514. /* disable primary event ring processing to prevent interference */
  515. tasklet_disable(&mhi_cntrl->mhi_event->task);
  516. write_lock_irq(&mhi_cntrl->pm_lock);
  517. /* re-check to make sure no error has occurred before proceeding */
  518. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  519. write_unlock_irq(&mhi_cntrl->pm_lock);
  520. tasklet_enable(&mhi_cntrl->mhi_event->task);
  521. return -EIO;
  522. }
  523. /* restore the states */
  524. mhi_cntrl->pm_state = mhi_priv->saved_pm_state;
  525. mhi_cntrl->dev_state = mhi_priv->saved_dev_state;
  526. write_unlock_irq(&mhi_cntrl->pm_lock);
  527. switch (mhi_cntrl->pm_state) {
  528. case MHI_PM_M0:
  529. mhi_pm_m0_transition(mhi_cntrl);
  530. break;
  531. case MHI_PM_M2:
  532. read_lock_bh(&mhi_cntrl->pm_lock);
  533. mhi_cntrl->wake_get(mhi_cntrl, true);
  534. mhi_cntrl->wake_put(mhi_cntrl, true);
  535. read_unlock_bh(&mhi_cntrl->pm_lock);
  536. break;
  537. default:
  538. MHI_ERR(dev, "Unexpected PM state:%s after restore\n",
  539. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  540. }
  541. /* enable primary event ring processing and check for events */
  542. tasklet_enable(&mhi_cntrl->mhi_event->task);
  543. mhi_irq_handler(0, mhi_cntrl->mhi_event);
  544. return 0;
  545. }
  546. EXPORT_SYMBOL(mhi_pm_fast_resume);
  547. int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_clients)
  548. {
  549. struct mhi_chan *itr, *tmp;
  550. struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
  551. struct device *dev = &mhi_dev->dev;
  552. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  553. enum mhi_pm_state new_state;
  554. int ret;
  555. if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
  556. return -EINVAL;
  557. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
  558. return -EIO;
  559. /* check if host/clients have any bus votes or packets to be sent */
  560. if (atomic_read(&mhi_cntrl->pending_pkts))
  561. return -EBUSY;
  562. /* wait for the device to attempt a low power mode (M2 entry) */
  563. wait_event_timeout(mhi_cntrl->state_event,
  564. mhi_cntrl->dev_state == MHI_STATE_M2,
  565. msecs_to_jiffies(mhi_priv->m2_timeout_ms));
  566. /* disable primary event ring processing to prevent interference */
  567. tasklet_disable(&mhi_cntrl->mhi_event->task);
  568. write_lock_irq(&mhi_cntrl->pm_lock);
  569. /* re-check if host/clients have any bus votes or packets to be sent */
  570. if (atomic_read(&mhi_cntrl->pending_pkts)) {
  571. ret = -EBUSY;
  572. goto error_suspend;
  573. }
  574. /* re-check to make sure no error has occurred before proceeding */
  575. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  576. ret = -EIO;
  577. goto error_suspend;
  578. }
  579. MHI_VERB(dev, "Allowing Fast M3 transition with notify: %s\n",
  580. notify_clients ? "true" : "false");
  581. /* save the current states */
  582. mhi_priv->saved_pm_state = mhi_cntrl->pm_state;
  583. mhi_priv->saved_dev_state = mhi_cntrl->dev_state;
  584. /* move from M2 to M0 as device can allow the transition but not host */
  585. if (mhi_cntrl->pm_state == MHI_PM_M2) {
  586. new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
  587. if (new_state != MHI_PM_M0) {
  588. MHI_ERR(dev, "Error setting to PM state: %s from: %s\n",
  589. to_mhi_pm_state_str(MHI_PM_M0),
  590. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  591. ret = -EIO;
  592. goto error_suspend;
  593. }
  594. }
  595. new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
  596. if (new_state != MHI_PM_M3_ENTER) {
  597. MHI_ERR(dev, "Error setting to PM state: %s from: %s\n",
  598. to_mhi_pm_state_str(MHI_PM_M3_ENTER),
  599. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  600. ret = -EIO;
  601. goto error_suspend;
  602. }
  603. /* set dev_state to M3_FAST and host pm_state to M3 */
  604. new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
  605. if (new_state != MHI_PM_M3) {
  606. MHI_ERR(dev, "Error setting to PM state: %s from: %s\n",
  607. to_mhi_pm_state_str(MHI_PM_M3),
  608. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  609. ret = -EIO;
  610. goto error_suspend;
  611. }
  612. mhi_cntrl->dev_state = MHI_STATE_M3_FAST;
  613. mhi_cntrl->M3_fast++;
  614. write_unlock_irq(&mhi_cntrl->pm_lock);
  615. /* finish reg writes before DRV hand-off to avoid noc err */
  616. mhi_force_reg_write(mhi_cntrl);
  617. /* enable primary event ring processing and check for events */
  618. tasklet_enable(&mhi_cntrl->mhi_event->task);
  619. mhi_irq_handler(0, mhi_cntrl->mhi_event);
  620. /* Notify clients about entering LPM */
  621. if (notify_clients) {
  622. list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans,
  623. node) {
  624. mutex_lock(&itr->mutex);
  625. if (itr->mhi_dev)
  626. mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
  627. mutex_unlock(&itr->mutex);
  628. }
  629. }
  630. return 0;
  631. error_suspend:
  632. write_unlock_irq(&mhi_cntrl->pm_lock);
  633. /* enable primary event ring processing and check for events */
  634. tasklet_enable(&mhi_cntrl->mhi_event->task);
  635. mhi_irq_handler(0, mhi_cntrl->mhi_event);
  636. return ret;
  637. }
  638. EXPORT_SYMBOL(mhi_pm_fast_suspend);
  639. static void mhi_process_sfr(struct mhi_controller *mhi_cntrl,
  640. struct file_info *info)
  641. {
  642. struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf;
  643. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  644. u8 *sfr_buf, *file_offset = info->file_offset;
  645. u32 file_size = info->file_size;
  646. u32 rem_seg_len = info->rem_seg_len;
  647. u32 seg_idx = info->seg_idx;
  648. sfr_buf = kzalloc(file_size + 1, GFP_KERNEL);
  649. if (!sfr_buf)
  650. return;
  651. while (file_size) {
  652. /* file offset starting from seg base */
  653. if (!rem_seg_len) {
  654. file_offset = mhi_buf[seg_idx].buf;
  655. if (file_size > mhi_buf[seg_idx].len)
  656. rem_seg_len = mhi_buf[seg_idx].len;
  657. else
  658. rem_seg_len = file_size;
  659. }
  660. if (file_size <= rem_seg_len) {
  661. memcpy(sfr_buf, file_offset, file_size);
  662. break;
  663. }
  664. memcpy(sfr_buf, file_offset, rem_seg_len);
  665. sfr_buf += rem_seg_len;
  666. file_size -= rem_seg_len;
  667. rem_seg_len = 0;
  668. seg_idx++;
  669. if (seg_idx == mhi_cntrl->rddm_image->entries) {
  670. MHI_ERR(dev, "invalid size for SFR file\n");
  671. goto err;
  672. }
  673. }
  674. sfr_buf[info->file_size] = '\0';
  675. /* force sfr string to log in kernel msg */
  676. MHI_ERR(dev, "%s\n", sfr_buf);
  677. err:
  678. kfree(sfr_buf);
  679. }
  680. static int mhi_find_next_file_offset(struct mhi_controller *mhi_cntrl,
  681. struct file_info *info,
  682. struct rddm_table_info *table_info)
  683. {
  684. struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf;
  685. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  686. if (info->rem_seg_len >= table_info->size) {
  687. info->file_offset += table_info->size;
  688. info->rem_seg_len -= table_info->size;
  689. return 0;
  690. }
  691. info->file_size = table_info->size - info->rem_seg_len;
  692. info->rem_seg_len = 0;
  693. /* iterate over segments until eof is reached */
  694. while (info->file_size) {
  695. info->seg_idx++;
  696. if (info->seg_idx == mhi_cntrl->rddm_image->entries) {
  697. MHI_ERR(dev, "invalid size for file %s\n",
  698. table_info->file_name);
  699. return -EINVAL;
  700. }
  701. if (info->file_size > mhi_buf[info->seg_idx].len) {
  702. info->file_size -= mhi_buf[info->seg_idx].len;
  703. } else {
  704. info->file_offset = mhi_buf[info->seg_idx].buf +
  705. info->file_size;
  706. info->rem_seg_len = mhi_buf[info->seg_idx].len -
  707. info->file_size;
  708. info->file_size = 0;
  709. }
  710. }
  711. return 0;
  712. }
  713. void mhi_dump_sfr(struct mhi_controller *mhi_cntrl)
  714. {
  715. struct mhi_buf *mhi_buf = mhi_cntrl->rddm_image->mhi_buf;
  716. struct rddm_header *rddm_header =
  717. (struct rddm_header *)mhi_buf->buf;
  718. struct rddm_table_info *table_info;
  719. struct file_info info;
  720. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  721. u32 table_size, n;
  722. memset(&info, 0, sizeof(info));
  723. if (rddm_header->header_size > sizeof(*rddm_header) ||
  724. rddm_header->header_size < 8) {
  725. MHI_ERR(dev, "invalid reported header size %u\n",
  726. rddm_header->header_size);
  727. return;
  728. }
  729. table_size = (rddm_header->header_size - 8) / sizeof(*table_info);
  730. if (!table_size) {
  731. MHI_ERR(dev, "invalid rddm table size %u\n", table_size);
  732. return;
  733. }
  734. info.file_offset = (u8 *)rddm_header + rddm_header->header_size;
  735. info.rem_seg_len = mhi_buf[0].len - rddm_header->header_size;
  736. for (n = 0; n < table_size; n++) {
  737. table_info = &rddm_header->table_info[n];
  738. if (!strcmp(table_info->file_name, "Q6-SFR.bin")) {
  739. info.file_size = table_info->size;
  740. mhi_process_sfr(mhi_cntrl, &info);
  741. return;
  742. }
  743. if (mhi_find_next_file_offset(mhi_cntrl, &info, table_info))
  744. return;
  745. }
  746. }
  747. EXPORT_SYMBOL(mhi_dump_sfr);
  748. bool mhi_scan_rddm_cookie(struct mhi_controller *mhi_cntrl, u32 cookie)
  749. {
  750. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  751. int ret;
  752. u32 val;
  753. if (!mhi_cntrl->rddm_image || !cookie)
  754. return false;
  755. MHI_VERB(dev, "Checking BHI debug register for 0x%x\n", cookie);
  756. if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
  757. return false;
  758. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_ERRDBG2, &val);
  759. if (ret)
  760. return false;
  761. MHI_VERB(dev, "BHI_ERRDBG2 value:0x%x\n", val);
  762. if (val == cookie)
  763. return true;
  764. return false;
  765. }
  766. EXPORT_SYMBOL(mhi_scan_rddm_cookie);
  767. void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl)
  768. {
  769. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  770. enum mhi_state state;
  771. enum mhi_ee_type ee;
  772. int i, ret;
  773. u32 val;
  774. void __iomem *mhi_base = mhi_cntrl->regs;
  775. void __iomem *bhi_base = mhi_cntrl->bhi;
  776. void __iomem *bhie_base = mhi_cntrl->bhie;
  777. void __iomem *wake_db = mhi_cntrl->wake_db;
  778. struct {
  779. const char *name;
  780. int offset;
  781. void __iomem *base;
  782. } debug_reg[] = {
  783. { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
  784. { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
  785. { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
  786. { "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
  787. { "BHI_EXECENV", BHI_EXECENV, bhi_base},
  788. { "BHI_STATUS", BHI_STATUS, bhi_base},
  789. { "MHI_CNTRL", MHICTRL, mhi_base},
  790. { "MHI_STATUS", MHISTATUS, mhi_base},
  791. { "MHI_WAKE_DB", 0, wake_db},
  792. { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
  793. { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
  794. { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
  795. { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
  796. { NULL },
  797. };
  798. MHI_ERR(dev, "host pm_state:%s dev_state:%s ee:%s\n",
  799. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  800. mhi_state_str(mhi_cntrl->dev_state),
  801. TO_MHI_EXEC_STR(mhi_cntrl->ee));
  802. state = mhi_get_mhi_state(mhi_cntrl);
  803. ee = mhi_get_exec_env(mhi_cntrl);
  804. MHI_ERR(dev, "device ee: %s dev_state: %s\n", TO_MHI_EXEC_STR(ee),
  805. mhi_state_str(state));
  806. for (i = 0; debug_reg[i].name; i++) {
  807. if (!debug_reg[i].base)
  808. continue;
  809. ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base,
  810. debug_reg[i].offset, &val);
  811. MHI_ERR(dev, "reg: %s val: 0x%x, ret: %d\n", debug_reg[i].name,
  812. val, ret);
  813. }
  814. }
  815. EXPORT_SYMBOL(mhi_debug_reg_dump);
  816. int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us,
  817. bool in_panic)
  818. {
  819. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  820. struct device *dev = &mhi_dev->dev;
  821. unsigned long pm_lock_flags;
  822. read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
  823. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  824. read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
  825. return -EIO;
  826. }
  827. mhi_cntrl->wake_get(mhi_cntrl, true);
  828. read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
  829. mhi_dev->dev_wake++;
  830. pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
  831. mhi_cntrl->runtime_get(mhi_cntrl);
  832. /* Return if client doesn't want us to wait */
  833. if (!timeout_us) {
  834. if (mhi_cntrl->pm_state != MHI_PM_M0)
  835. MHI_ERR(dev, "Return without waiting for M0\n");
  836. mhi_cntrl->runtime_put(mhi_cntrl);
  837. return 0;
  838. }
  839. if (in_panic) {
  840. while (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M0 &&
  841. !MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) &&
  842. timeout_us > 0) {
  843. udelay(MHI_FORCE_WAKE_DELAY_US);
  844. timeout_us -= MHI_FORCE_WAKE_DELAY_US;
  845. }
  846. } else {
  847. while (mhi_cntrl->pm_state != MHI_PM_M0 &&
  848. !MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) &&
  849. timeout_us > 0) {
  850. udelay(MHI_FORCE_WAKE_DELAY_US);
  851. timeout_us -= MHI_FORCE_WAKE_DELAY_US;
  852. }
  853. }
  854. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || timeout_us <= 0) {
  855. MHI_ERR(dev, "Did not enter M0, cur_state: %s pm_state: %s\n",
  856. mhi_state_str(mhi_cntrl->dev_state),
  857. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  858. read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
  859. mhi_cntrl->wake_put(mhi_cntrl, false);
  860. read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
  861. mhi_dev->dev_wake--;
  862. mhi_cntrl->runtime_put(mhi_cntrl);
  863. return -ETIMEDOUT;
  864. }
  865. mhi_cntrl->runtime_put(mhi_cntrl);
  866. return 0;
  867. }
  868. EXPORT_SYMBOL(mhi_device_get_sync_atomic);
  869. static int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
  870. u32 capability, u32 *offset)
  871. {
  872. u32 cur_cap, next_offset;
  873. int ret;
  874. /* get the 1st supported capability offset */
  875. ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET,
  876. MISC_CAP_MASK, offset);
  877. if (ret)
  878. return ret;
  879. do {
  880. if (*offset >= MHI_REG_SIZE)
  881. return -ENXIO;
  882. ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset,
  883. CAP_CAPID_MASK, &cur_cap);
  884. if (ret)
  885. return ret;
  886. if (cur_cap == capability)
  887. return 0;
  888. ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset,
  889. CAP_NEXT_CAP_MASK, &next_offset);
  890. if (ret)
  891. return ret;
  892. *offset = next_offset;
  893. } while (next_offset);
  894. return -ENXIO;
  895. }
  896. /* to be used only if a single event ring with the type is present */
  897. static int mhi_get_er_index(struct mhi_controller *mhi_cntrl,
  898. enum mhi_er_data_type type)
  899. {
  900. int i;
  901. struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
  902. /* find event ring for requested type */
  903. for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
  904. if (mhi_event->data_type == type)
  905. return mhi_event->er_index;
  906. }
  907. return -ENOENT;
  908. }
  909. static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl,
  910. void __iomem *bw_scale_db)
  911. {
  912. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  913. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  914. int ret, er_index;
  915. u32 bw_cfg_offset;
  916. /* controller doesn't support dynamic bw switch */
  917. if (!mhi_priv->bw_scale)
  918. return -ENODEV;
  919. ret = mhi_get_capability_offset(mhi_cntrl, BW_SCALE_CAP_ID,
  920. &bw_cfg_offset);
  921. if (ret)
  922. return ret;
  923. /* No ER configured to support BW scale */
  924. er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE);
  925. if (er_index < 0)
  926. return er_index;
  927. bw_cfg_offset += BW_SCALE_CFG_OFFSET;
  928. mhi_priv->bw_scale_db = bw_scale_db;
  929. /* advertise host support */
  930. mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
  931. MHI_BW_SCALE_SETUP(er_index));
  932. MHI_VERB(dev, "Bandwidth scaling setup complete. Event ring:%d\n",
  933. er_index);
  934. return 0;
  935. }
  936. int mhi_controller_setup_timesync(struct mhi_controller *mhi_cntrl,
  937. u64 (*time_get)(struct mhi_controller *c),
  938. int (*lpm_disable)(struct mhi_controller *c),
  939. int (*lpm_enable)(struct mhi_controller *c))
  940. {
  941. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  942. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  943. struct mhi_timesync *mhi_tsync = kzalloc(sizeof(*mhi_tsync),
  944. GFP_KERNEL);
  945. if (!mhi_tsync)
  946. return -ENOMEM;
  947. mhi_tsync->time_get = time_get;
  948. mhi_tsync->lpm_disable = lpm_disable;
  949. mhi_tsync->lpm_enable = lpm_enable;
  950. mhi_priv->timesync = mhi_tsync;
  951. return 0;
  952. }
  953. EXPORT_SYMBOL(mhi_controller_setup_timesync);
  954. static int mhi_init_timesync(struct mhi_controller *mhi_cntrl,
  955. void __iomem *time_db)
  956. {
  957. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  958. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  959. struct mhi_timesync *mhi_tsync = mhi_priv->timesync;
  960. u32 time_offset;
  961. int ret, er_index;
  962. if (!mhi_tsync)
  963. return -EINVAL;
  964. ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID,
  965. &time_offset);
  966. if (ret)
  967. return ret;
  968. /* save time_offset for obtaining time via MMIO register reads */
  969. mhi_tsync->time_reg = mhi_cntrl->regs + time_offset;
  970. mutex_init(&mhi_tsync->mutex);
  971. /* get timesync event ring configuration */
  972. er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_TIMESYNC);
  973. if (er_index < 0)
  974. return 0;
  975. spin_lock_init(&mhi_tsync->lock);
  976. INIT_LIST_HEAD(&mhi_tsync->head);
  977. mhi_tsync->time_db = time_db;
  978. /* advertise host support */
  979. mhi_write_reg(mhi_cntrl, mhi_tsync->time_reg, TIMESYNC_CFG_OFFSET,
  980. MHI_TIMESYNC_DB_SETUP(er_index));
  981. MHI_VERB(dev, "Time synchronization DB mode setup complete. Event ring:%d\n",
  982. er_index);
  983. return 0;
  984. }
  985. int mhi_init_host_notification(struct mhi_controller *mhi_cntrl,
  986. void __iomem *host_notify_db)
  987. {
  988. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  989. int ret;
  990. u32 host_notify_cfg_offset;
  991. ret = mhi_get_capability_offset(mhi_cntrl, MHI_HOST_NOTIFY_CAP_ID,
  992. &host_notify_cfg_offset);
  993. if (ret)
  994. return ret;
  995. host_notify_cfg_offset += MHI_HOST_NOTIFY_CFG_OFFSET;
  996. mhi_cntrl->host_notify_db = host_notify_db;
  997. /* advertise host support */
  998. mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, host_notify_cfg_offset,
  999. MHI_HOST_NOTIFY_CFG_SETUP);
  1000. MHI_VERB(dev, "Host notification DB setup complete.\n");
  1001. return 0;
  1002. }
  1003. int mhi_misc_init_mmio(struct mhi_controller *mhi_cntrl)
  1004. {
  1005. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1006. u32 chdb_off;
  1007. int ret;
  1008. /* Read channel db offset */
  1009. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, CHDBOFF,
  1010. &chdb_off);
  1011. if (ret) {
  1012. MHI_ERR(dev, "Unable to read CHDBOFF register\n");
  1013. return -EIO;
  1014. }
  1015. ret = mhi_init_bw_scale(mhi_cntrl, (mhi_cntrl->regs + chdb_off +
  1016. (8 * MHI_BW_SCALE_CHAN_DB)));
  1017. if (ret)
  1018. MHI_LOG(dev, "BW scale setup failure\n");
  1019. ret = mhi_init_timesync(mhi_cntrl, (mhi_cntrl->regs + chdb_off +
  1020. (8 * MHI_TIMESYNC_CHAN_DB)));
  1021. if (ret)
  1022. MHI_LOG(dev, "Time synchronization setup failure\n");
  1023. ret = mhi_init_host_notification(mhi_cntrl, (mhi_cntrl->regs + chdb_off +
  1024. (8 * MHI_HOST_NOTIFY_DB)));
  1025. if (ret)
  1026. MHI_LOG(dev, "Host notification doorbell setup failure\n");
  1027. return 0;
  1028. }
  1029. int mhi_host_notify_db_disable_trace(struct mhi_controller *mhi_cntrl)
  1030. {
  1031. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1032. enum mhi_state state;
  1033. enum mhi_ee_type ee;
  1034. unsigned long pm_lock_flags;
  1035. if (mhi_cntrl->host_notify_db) {
  1036. read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
  1037. if (mhi_cntrl->pm_state == MHI_PM_DISABLE) {
  1038. read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
  1039. return -EINVAL;
  1040. }
  1041. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  1042. read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
  1043. return -EIO;
  1044. }
  1045. state = mhi_get_mhi_state(mhi_cntrl);
  1046. ee = mhi_get_exec_env(mhi_cntrl);
  1047. MHI_VERB(dev, "Entered with MHI state: %s, EE: %s\n",
  1048. mhi_state_str(state),
  1049. TO_MHI_EXEC_STR(ee));
  1050. /* Make sure that we are indeed in M0 state and not in RDDM as well */
  1051. if (state == MHI_STATE_M0 && ee == MHI_EE_AMSS) {
  1052. mhi_write_db(mhi_cntrl, mhi_cntrl->host_notify_db, 1);
  1053. read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
  1054. MHI_LOG(dev, "Host notification DB write Success\n");
  1055. return 0;
  1056. }
  1057. read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
  1058. MHI_LOG(dev, "Cannot invoke DB due to invalid M state and/or EE\n");
  1059. return -EPERM;
  1060. }
  1061. MHI_LOG(dev, "Host notifiction DB feature NOT supported or enabled\n");
  1062. return -EPERM;
  1063. }
  1064. EXPORT_SYMBOL(mhi_host_notify_db_disable_trace);
  1065. /* Recycle by fast forwarding WP to the last posted event */
  1066. static void mhi_recycle_fwd_ev_ring_element
  1067. (struct mhi_controller *mhi_cntrl, struct mhi_ring *ring)
  1068. {
  1069. dma_addr_t ctxt_wp;
  1070. /* update the WP */
  1071. ring->wp += ring->el_size;
  1072. if (ring->wp >= (ring->base + ring->len))
  1073. ring->wp = ring->base;
  1074. /* update the context WP based on the RP to support fast forwarding */
  1075. ctxt_wp = ring->iommu_base + (ring->wp - ring->base);
  1076. *ring->ctxt_wp = ctxt_wp;
  1077. /* update the RP */
  1078. ring->rp += ring->el_size;
  1079. if (ring->rp >= (ring->base + ring->len))
  1080. ring->rp = ring->base;
  1081. /* visible to other cores */
  1082. smp_wmb();
  1083. }
  1084. /* dedicated bw scale event ring processing */
  1085. int mhi_process_misc_tsync_ev_ring(struct mhi_controller *mhi_cntrl,
  1086. struct mhi_event *mhi_event,
  1087. u32 event_quota)
  1088. {
  1089. struct mhi_ring_element *dev_rp;
  1090. struct mhi_ring *ev_ring = &mhi_event->ring;
  1091. struct mhi_event_ctxt *er_ctxt =
  1092. &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
  1093. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1094. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1095. struct mhi_timesync *mhi_tsync = mhi_priv->timesync;
  1096. u32 sequence;
  1097. u64 remote_time;
  1098. int ret = 0;
  1099. spin_lock_bh(&mhi_event->lock);
  1100. if (!is_valid_ring_ptr(ev_ring, er_ctxt->rp)) {
  1101. MHI_ERR(dev, "Event ring rp points outside of the event ring or unalign rp %llx\n",
  1102. er_ctxt->rp);
  1103. spin_unlock_bh(&mhi_event->lock);
  1104. goto exit_tsync_process;
  1105. }
  1106. dev_rp = mhi_misc_to_virtual(ev_ring, er_ctxt->rp);
  1107. if (ev_ring->rp == dev_rp) {
  1108. spin_unlock_bh(&mhi_event->lock);
  1109. goto exit_tsync_process;
  1110. }
  1111. /* if rp points to base, we need to wrap it around */
  1112. if (dev_rp == ev_ring->base)
  1113. dev_rp = ev_ring->base + ev_ring->len;
  1114. dev_rp--;
  1115. /* fast forward to currently processed element and recycle er */
  1116. ev_ring->rp = dev_rp;
  1117. ev_ring->wp = dev_rp - 1;
  1118. if (ev_ring->wp < ev_ring->base)
  1119. ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
  1120. mhi_recycle_fwd_ev_ring_element(mhi_cntrl, ev_ring);
  1121. if (WARN_ON(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_TSYNC_EVENT)) {
  1122. MHI_ERR(dev, "!TIMESYNC event\n");
  1123. ret = -EINVAL;
  1124. spin_unlock_bh(&mhi_event->lock);
  1125. goto exit_tsync_process;
  1126. }
  1127. sequence = MHI_TRE_GET_EV_SEQ(dev_rp);
  1128. remote_time = MHI_TRE_GET_EV_TIME(dev_rp);
  1129. MHI_VERB(dev, "Received TSYNC event with seq: 0x%llx time: 0x%llx\n",
  1130. sequence, remote_time);
  1131. read_lock_bh(&mhi_cntrl->pm_lock);
  1132. if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
  1133. mhi_ring_er_db(mhi_event);
  1134. read_unlock_bh(&mhi_cntrl->pm_lock);
  1135. spin_unlock_bh(&mhi_event->lock);
  1136. mutex_lock(&mhi_tsync->mutex);
  1137. if (WARN_ON(mhi_tsync->int_sequence != sequence)) {
  1138. MHI_ERR(dev, "Unexpected response: 0x%llx Expected: 0x%llx\n",
  1139. sequence, mhi_tsync->int_sequence);
  1140. mhi_cntrl->runtime_put(mhi_cntrl);
  1141. mhi_device_put(mhi_cntrl->mhi_dev);
  1142. mutex_unlock(&mhi_tsync->mutex);
  1143. ret = -EINVAL;
  1144. goto exit_tsync_process;
  1145. }
  1146. do {
  1147. struct tsync_node *tsync_node;
  1148. spin_lock(&mhi_tsync->lock);
  1149. tsync_node = list_first_entry_or_null(&mhi_tsync->head,
  1150. struct tsync_node, node);
  1151. if (!tsync_node) {
  1152. spin_unlock(&mhi_tsync->lock);
  1153. break;
  1154. }
  1155. list_del(&tsync_node->node);
  1156. spin_unlock(&mhi_tsync->lock);
  1157. tsync_node->cb_func(tsync_node->mhi_dev,
  1158. tsync_node->sequence,
  1159. mhi_tsync->local_time, remote_time);
  1160. kfree(tsync_node);
  1161. } while (true);
  1162. mhi_tsync->db_pending = false;
  1163. mhi_tsync->remote_time = remote_time;
  1164. complete(&mhi_tsync->completion);
  1165. mhi_cntrl->runtime_put(mhi_cntrl);
  1166. mhi_device_put(mhi_cntrl->mhi_dev);
  1167. mutex_unlock(&mhi_tsync->mutex);
  1168. exit_tsync_process:
  1169. MHI_VERB(dev, "exit er_index: %u, ret: %d\n", mhi_event->er_index, ret);
  1170. return ret;
  1171. }
  1172. /* dedicated bw scale event ring processing */
  1173. int mhi_process_misc_bw_ev_ring(struct mhi_controller *mhi_cntrl,
  1174. struct mhi_event *mhi_event,
  1175. u32 event_quota)
  1176. {
  1177. struct mhi_ring_element *dev_rp;
  1178. struct mhi_ring *ev_ring = &mhi_event->ring;
  1179. struct mhi_event_ctxt *er_ctxt =
  1180. &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
  1181. struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
  1182. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1183. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1184. enum mhi_bw_scale_req_status result = MHI_BW_SCALE_NACK;
  1185. int ret = -EINVAL;
  1186. if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
  1187. goto exit_bw_scale_process;
  1188. spin_lock_bh(&mhi_event->lock);
  1189. if (!is_valid_ring_ptr(ev_ring, er_ctxt->rp)) {
  1190. MHI_ERR(dev, "Event ring rp points outside of the event ring or unalign rp %llx\n",
  1191. er_ctxt->rp);
  1192. spin_unlock_bh(&mhi_event->lock);
  1193. return 0;
  1194. }
  1195. dev_rp = mhi_misc_to_virtual(ev_ring, er_ctxt->rp);
  1196. /**
  1197. * Check the ev ring local pointer is same as ctxt pointer
  1198. * if both are same do not process ev ring.
  1199. */
  1200. if (ev_ring->rp == dev_rp) {
  1201. MHI_VERB(dev, "Ignore BW event:0x%llx ev_ring RP:0x%llx\n",
  1202. dev_rp->ptr,
  1203. (u64)mhi_misc_to_physical(ev_ring, ev_ring->rp));
  1204. spin_unlock_bh(&mhi_event->lock);
  1205. return 0;
  1206. }
  1207. /* if rp points to base, we need to wrap it around */
  1208. if (dev_rp == ev_ring->base)
  1209. dev_rp = ev_ring->base + ev_ring->len;
  1210. dev_rp--;
  1211. /* fast forward to currently processed element and recycle er */
  1212. ev_ring->rp = dev_rp;
  1213. ev_ring->wp = dev_rp - 1;
  1214. if (ev_ring->wp < ev_ring->base)
  1215. ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
  1216. mhi_recycle_fwd_ev_ring_element(mhi_cntrl, ev_ring);
  1217. if (WARN_ON(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT)) {
  1218. MHI_ERR(dev, "!BW SCALE REQ event\n");
  1219. spin_unlock_bh(&mhi_event->lock);
  1220. goto exit_bw_scale_process;
  1221. }
  1222. link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp);
  1223. link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp);
  1224. link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp);
  1225. MHI_VERB(dev, "Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n",
  1226. link_info.sequence_num,
  1227. link_info.target_link_speed,
  1228. link_info.target_link_width);
  1229. read_lock_bh(&mhi_cntrl->pm_lock);
  1230. if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
  1231. mhi_ring_er_db(mhi_event);
  1232. read_unlock_bh(&mhi_cntrl->pm_lock);
  1233. spin_unlock_bh(&mhi_event->lock);
  1234. ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
  1235. if (ret)
  1236. goto exit_bw_scale_process;
  1237. mhi_cntrl->runtime_get(mhi_cntrl);
  1238. mutex_lock(&mhi_cntrl->pm_mutex);
  1239. ret = mhi_priv->bw_scale(mhi_cntrl, &link_info);
  1240. if (!ret) {
  1241. *cur_info = link_info;
  1242. result = MHI_BW_SCALE_SUCCESS;
  1243. } else if (ret == -EINVAL) {
  1244. result = MHI_BW_SCALE_INVALID;
  1245. }
  1246. write_lock_bh(&mhi_cntrl->pm_lock);
  1247. mhi_priv->bw_response = MHI_BW_SCALE_RESULT(result,
  1248. link_info.sequence_num);
  1249. if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
  1250. mhi_write_reg(mhi_cntrl, mhi_priv->bw_scale_db, 0,
  1251. mhi_priv->bw_response);
  1252. mhi_priv->bw_response = 0;
  1253. } else {
  1254. MHI_VERB(dev, "Cached BW response for seq: %u, result: %d\n",
  1255. link_info.sequence_num, mhi_priv->bw_response);
  1256. }
  1257. write_unlock_bh(&mhi_cntrl->pm_lock);
  1258. mhi_cntrl->runtime_put(mhi_cntrl);
  1259. mhi_device_put(mhi_cntrl->mhi_dev);
  1260. mutex_unlock(&mhi_cntrl->pm_mutex);
  1261. exit_bw_scale_process:
  1262. MHI_VERB(dev, "exit er_index:%u ret:%d\n", mhi_event->er_index, ret);
  1263. return ret;
  1264. }
  1265. void mhi_misc_dbs_pending(struct mhi_controller *mhi_cntrl)
  1266. {
  1267. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1268. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1269. if (mhi_priv->bw_scale && mhi_priv->bw_response) {
  1270. mhi_write_reg(mhi_cntrl, mhi_priv->bw_scale_db, 0,
  1271. mhi_priv->bw_response);
  1272. MHI_VERB(dev, "Completed BW response: %d\n", mhi_priv->bw_response);
  1273. mhi_priv->bw_response = 0;
  1274. }
  1275. }
  1276. void mhi_controller_set_bw_scale_cb(struct mhi_controller *mhi_cntrl,
  1277. int (*cb_func)(struct mhi_controller *mhi_cntrl,
  1278. struct mhi_link_info *link_info))
  1279. {
  1280. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1281. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1282. mhi_priv->bw_scale = cb_func;
  1283. }
  1284. EXPORT_SYMBOL(mhi_controller_set_bw_scale_cb);
  1285. void mhi_controller_set_base(struct mhi_controller *mhi_cntrl, phys_addr_t base)
  1286. {
  1287. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1288. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1289. mhi_priv->base_addr = base;
  1290. }
  1291. EXPORT_SYMBOL(mhi_controller_set_base);
  1292. int mhi_controller_get_base(struct mhi_controller *mhi_cntrl, phys_addr_t *base)
  1293. {
  1294. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1295. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1296. if (mhi_priv->base_addr) {
  1297. *base = mhi_priv->base_addr;
  1298. return 0;
  1299. }
  1300. return -EINVAL;
  1301. }
  1302. EXPORT_SYMBOL(mhi_controller_get_base);
  1303. u32 mhi_controller_get_numeric_id(struct mhi_controller *mhi_cntrl)
  1304. {
  1305. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1306. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1307. return mhi_priv->numeric_id;
  1308. }
  1309. EXPORT_SYMBOL(mhi_controller_get_numeric_id);
  1310. int mhi_get_channel_db_base(struct mhi_device *mhi_dev, phys_addr_t *value)
  1311. {
  1312. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1313. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1314. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1315. u32 offset;
  1316. int ret;
  1317. if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
  1318. return -EIO;
  1319. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, CHDBOFF,
  1320. &offset);
  1321. if (ret)
  1322. return -EIO;
  1323. *value = mhi_priv->base_addr + offset;
  1324. return ret;
  1325. }
  1326. EXPORT_SYMBOL(mhi_get_channel_db_base);
  1327. int mhi_get_event_ring_db_base(struct mhi_device *mhi_dev, phys_addr_t *value)
  1328. {
  1329. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1330. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1331. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1332. u32 offset;
  1333. int ret;
  1334. if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
  1335. return -EIO;
  1336. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, ERDBOFF,
  1337. &offset);
  1338. if (ret)
  1339. return -EIO;
  1340. *value = mhi_priv->base_addr + offset;
  1341. return ret;
  1342. }
  1343. EXPORT_SYMBOL(mhi_get_event_ring_db_base);
  1344. struct mhi_device *mhi_get_device_for_channel(struct mhi_controller *mhi_cntrl,
  1345. u32 channel)
  1346. {
  1347. if (channel >= mhi_cntrl->max_chan)
  1348. return NULL;
  1349. return mhi_cntrl->mhi_chan[channel].mhi_dev;
  1350. }
  1351. EXPORT_SYMBOL(mhi_get_device_for_channel);
  1352. void mhi_controller_set_loglevel(struct mhi_controller *mhi_cntrl,
  1353. enum MHI_DEBUG_LEVEL lvl)
  1354. {
  1355. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1356. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1357. mhi_priv->log_lvl = lvl;
  1358. }
  1359. EXPORT_SYMBOL(mhi_controller_set_loglevel);
  1360. #if !IS_ENABLED(CONFIG_MHI_DTR)
  1361. long mhi_device_ioctl(struct mhi_device *mhi_dev, unsigned int cmd,
  1362. unsigned long arg)
  1363. {
  1364. return -EIO;
  1365. }
  1366. EXPORT_SYMBOL(mhi_device_ioctl);
  1367. #endif
  1368. int mhi_controller_set_sfr_support(struct mhi_controller *mhi_cntrl, size_t len)
  1369. {
  1370. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1371. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1372. struct mhi_sfr_info *sfr_info;
  1373. sfr_info = kzalloc(sizeof(*sfr_info), GFP_KERNEL);
  1374. if (!sfr_info)
  1375. return -ENOMEM;
  1376. sfr_info->len = len;
  1377. sfr_info->str = kzalloc(len, GFP_KERNEL);
  1378. if (!sfr_info->str)
  1379. return -ENOMEM;
  1380. mhi_priv->sfr_info = sfr_info;
  1381. return 0;
  1382. }
  1383. EXPORT_SYMBOL(mhi_controller_set_sfr_support);
  1384. void mhi_misc_mission_mode(struct mhi_controller *mhi_cntrl)
  1385. {
  1386. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1387. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1388. struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info;
  1389. struct mhi_device *dtr_dev;
  1390. u64 local, remote;
  1391. int ret = -EIO;
  1392. /* Attempt to print local and remote SOC time delta for debug */
  1393. ret = mhi_get_remote_time_sync(mhi_cntrl->mhi_dev, &local, &remote);
  1394. if (!ret)
  1395. MHI_LOG(dev, "Timesync: local: %llx, remote: %llx\n", local, remote);
  1396. /* IP_CTRL DTR channel ID */
  1397. dtr_dev = mhi_get_device_for_channel(mhi_cntrl, MHI_DTR_CHANNEL);
  1398. if (dtr_dev)
  1399. mhi_notify(dtr_dev, MHI_CB_DTR_START_CHANNELS);
  1400. /* initialize SFR */
  1401. if (!sfr_info)
  1402. return;
  1403. /* do a clean-up if we reach here post SSR */
  1404. memset(sfr_info->str, 0, sfr_info->len);
  1405. sfr_info->buf_addr = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
  1406. sfr_info->len,
  1407. &sfr_info->dma_addr,
  1408. GFP_KERNEL);
  1409. if (!sfr_info->buf_addr) {
  1410. MHI_ERR(dev, "Failed to allocate memory for sfr\n");
  1411. return;
  1412. }
  1413. init_completion(&sfr_info->completion);
  1414. ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_SFR_CFG);
  1415. if (ret) {
  1416. MHI_ERR(dev, "Failed to send sfr cfg cmd\n");
  1417. return;
  1418. }
  1419. ret = wait_for_completion_timeout(&sfr_info->completion,
  1420. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  1421. if (!ret || sfr_info->ccs != MHI_EV_CC_SUCCESS)
  1422. MHI_ERR(dev, "Failed to get sfr cfg cmd completion\n");
  1423. }
  1424. void mhi_misc_disable(struct mhi_controller *mhi_cntrl)
  1425. {
  1426. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1427. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1428. struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info;
  1429. if (sfr_info && sfr_info->buf_addr) {
  1430. dma_free_coherent(mhi_cntrl->cntrl_dev, sfr_info->len,
  1431. sfr_info->buf_addr, sfr_info->dma_addr);
  1432. sfr_info->buf_addr = NULL;
  1433. }
  1434. }
  1435. void mhi_misc_cmd_configure(struct mhi_controller *mhi_cntrl, unsigned int type,
  1436. u64 *ptr, u32 *dword0, u32 *dword1)
  1437. {
  1438. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1439. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1440. struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info;
  1441. if (type == MHI_CMD_SFR_CFG && sfr_info) {
  1442. *ptr = MHI_TRE_CMD_SFR_CFG_PTR(sfr_info->dma_addr);
  1443. *dword0 = MHI_TRE_CMD_SFR_CFG_DWORD0(sfr_info->len - 1);
  1444. *dword1 = MHI_TRE_CMD_SFR_CFG_DWORD1;
  1445. }
  1446. }
  1447. void mhi_misc_cmd_completion(struct mhi_controller *mhi_cntrl,
  1448. unsigned int type, unsigned int ccs)
  1449. {
  1450. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1451. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1452. struct mhi_sfr_info *sfr_info = mhi_priv->sfr_info;
  1453. if (type == MHI_CMD_SFR_CFG && sfr_info) {
  1454. sfr_info->ccs = ccs;
  1455. complete(&sfr_info->completion);
  1456. }
  1457. }
  1458. int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
  1459. u64 *t_host,
  1460. u64 *t_dev)
  1461. {
  1462. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1463. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1464. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1465. struct mhi_timesync *mhi_tsync = mhi_priv->timesync;
  1466. u64 local_time;
  1467. u32 tdev_lo = U32_MAX, tdev_hi = U32_MAX;
  1468. int ret;
  1469. /* not all devices support time features */
  1470. if (!mhi_tsync)
  1471. return -EINVAL;
  1472. if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
  1473. MHI_ERR(dev, "MHI is not in active state, pm_state:%s\n",
  1474. to_mhi_pm_state_str(mhi_cntrl->pm_state));
  1475. return -EIO;
  1476. }
  1477. mutex_lock(&mhi_tsync->mutex);
  1478. /* return times from last async request completion */
  1479. if (mhi_tsync->db_pending) {
  1480. local_time = mhi_tsync->local_time;
  1481. mutex_unlock(&mhi_tsync->mutex);
  1482. ret = wait_for_completion_timeout(&mhi_tsync->completion,
  1483. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  1484. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || !ret) {
  1485. MHI_ERR(dev, "Pending DB request did not complete, abort\n");
  1486. return -EAGAIN;
  1487. }
  1488. *t_host = local_time;
  1489. *t_dev = mhi_tsync->remote_time;
  1490. return 0;
  1491. }
  1492. /* bring to M0 state */
  1493. ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
  1494. if (ret)
  1495. goto error_unlock;
  1496. mhi_cntrl->runtime_get(mhi_cntrl);
  1497. /* disable link level low power modes */
  1498. ret = mhi_tsync->lpm_disable(mhi_cntrl);
  1499. if (ret)
  1500. goto error_invalid_state;
  1501. /*
  1502. * time critical code to fetch device times,
  1503. * delay between these two steps should be
  1504. * deterministic as possible.
  1505. */
  1506. preempt_disable();
  1507. local_irq_disable();
  1508. ret = mhi_read_reg(mhi_cntrl, mhi_tsync->time_reg,
  1509. TIMESYNC_TIME_HIGH_OFFSET, &tdev_hi);
  1510. if (ret)
  1511. MHI_ERR(dev, "Time HIGH register read error\n");
  1512. ret = mhi_read_reg(mhi_cntrl, mhi_tsync->time_reg,
  1513. TIMESYNC_TIME_LOW_OFFSET, &tdev_lo);
  1514. if (ret)
  1515. MHI_ERR(dev, "Time LOW register read error\n");
  1516. ret = mhi_read_reg(mhi_cntrl, mhi_tsync->time_reg,
  1517. TIMESYNC_TIME_HIGH_OFFSET, &tdev_hi);
  1518. if (ret)
  1519. MHI_ERR(dev, "Time HIGH register read error\n");
  1520. *t_dev = (u64) tdev_hi << 32 | tdev_lo;
  1521. *t_host = mhi_tsync->time_get(mhi_cntrl);
  1522. local_irq_enable();
  1523. preempt_enable();
  1524. mhi_tsync->lpm_enable(mhi_cntrl);
  1525. error_invalid_state:
  1526. mhi_cntrl->runtime_put(mhi_cntrl);
  1527. mhi_device_put(mhi_cntrl->mhi_dev);
  1528. error_unlock:
  1529. mutex_unlock(&mhi_tsync->mutex);
  1530. return ret;
  1531. }
  1532. EXPORT_SYMBOL(mhi_get_remote_time_sync);
  1533. int mhi_get_remote_time(struct mhi_device *mhi_dev,
  1534. u32 sequence,
  1535. void (*cb_func)(struct mhi_device *mhi_dev,
  1536. u32 sequence,
  1537. u64 local_time,
  1538. u64 remote_time))
  1539. {
  1540. struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  1541. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1542. struct mhi_private *mhi_priv = dev_get_drvdata(dev);
  1543. struct mhi_timesync *mhi_tsync = mhi_priv->timesync;
  1544. struct tsync_node *tsync_node;
  1545. int ret = 0;
  1546. /* not all devices support all time features */
  1547. if (!mhi_tsync || !mhi_tsync->time_db)
  1548. return -EINVAL;
  1549. mutex_lock(&mhi_tsync->mutex);
  1550. ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
  1551. if (ret)
  1552. goto error_unlock;
  1553. mhi_cntrl->runtime_get(mhi_cntrl);
  1554. MHI_LOG(dev, "Enter with pm_state:%s MHI_STATE:%s\n",
  1555. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  1556. mhi_state_str(mhi_cntrl->dev_state));
  1557. /*
  1558. * technically we can use GFP_KERNEL, but wants to avoid
  1559. * # of times scheduling out
  1560. */
  1561. tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC);
  1562. if (!tsync_node) {
  1563. ret = -ENOMEM;
  1564. goto error_no_mem;
  1565. }
  1566. tsync_node->sequence = sequence;
  1567. tsync_node->cb_func = cb_func;
  1568. tsync_node->mhi_dev = mhi_dev;
  1569. if (mhi_tsync->db_pending) {
  1570. mhi_cntrl->runtime_put(mhi_cntrl);
  1571. mhi_device_put(mhi_cntrl->mhi_dev);
  1572. goto skip_tsync_db;
  1573. }
  1574. mhi_tsync->int_sequence++;
  1575. if (mhi_tsync->int_sequence == 0xFFFFFFFF)
  1576. mhi_tsync->int_sequence = 0;
  1577. /* disable link level low power modes */
  1578. ret = mhi_tsync->lpm_disable(mhi_cntrl);
  1579. if (ret) {
  1580. MHI_ERR(dev, "LPM disable request failed for %s!\n", mhi_dev->name);
  1581. goto error_invalid_state;
  1582. }
  1583. /*
  1584. * time critical code, delay between these two steps should be
  1585. * deterministic as possible.
  1586. */
  1587. preempt_disable();
  1588. local_irq_disable();
  1589. mhi_tsync->local_time = mhi_tsync->time_get(mhi_cntrl);
  1590. mhi_write_reg(mhi_cntrl, mhi_tsync->time_db, 0, mhi_tsync->int_sequence);
  1591. /* write must go through immediately */
  1592. wmb();
  1593. local_irq_enable();
  1594. preempt_enable();
  1595. mhi_tsync->lpm_enable(mhi_cntrl);
  1596. MHI_VERB(dev, "time DB request with seq:0x%llx\n", mhi_tsync->int_sequence);
  1597. mhi_tsync->db_pending = true;
  1598. init_completion(&mhi_tsync->completion);
  1599. skip_tsync_db:
  1600. spin_lock(&mhi_tsync->lock);
  1601. list_add_tail(&tsync_node->node, &mhi_tsync->head);
  1602. spin_unlock(&mhi_tsync->lock);
  1603. mutex_unlock(&mhi_tsync->mutex);
  1604. return 0;
  1605. error_invalid_state:
  1606. kfree(tsync_node);
  1607. error_no_mem:
  1608. mhi_cntrl->runtime_put(mhi_cntrl);
  1609. mhi_device_put(mhi_cntrl->mhi_dev);
  1610. error_unlock:
  1611. mutex_unlock(&mhi_tsync->mutex);
  1612. return ret;
  1613. }
  1614. EXPORT_SYMBOL(mhi_get_remote_time);
  1615. /* MHI host reset request*/
  1616. int mhi_force_reset(struct mhi_controller *mhi_cntrl)
  1617. {
  1618. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  1619. MHI_VERB(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
  1620. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  1621. mhi_state_str(mhi_cntrl->dev_state),
  1622. TO_MHI_EXEC_STR(mhi_cntrl->ee));
  1623. /* notify critical clients in absence of RDDM */
  1624. mhi_report_error(mhi_cntrl);
  1625. mhi_soc_reset(mhi_cntrl);
  1626. return mhi_rddm_download_status(mhi_cntrl);
  1627. }
  1628. EXPORT_SYMBOL(mhi_force_reset);
  1629. /* Get SoC info before registering mhi controller */
  1630. int mhi_get_soc_info(struct mhi_controller *mhi_cntrl)
  1631. {
  1632. u32 soc_info;
  1633. int ret;
  1634. /* Read the MHI device info */
  1635. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
  1636. SOC_HW_VERSION_OFFS, &soc_info);
  1637. if (ret)
  1638. goto done;
  1639. mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
  1640. mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
  1641. mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
  1642. mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
  1643. done:
  1644. return ret;
  1645. }
  1646. EXPORT_SYMBOL(mhi_get_soc_info);