mhi_qcom.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  3. #include <linux/debugfs.h>
  4. #include <linux/delay.h>
  5. #include <linux/device.h>
  6. #include <linux/dma-direction.h>
  7. #include <linux/list.h>
  8. #include <linux/of.h>
  9. #include <linux/memblock.h>
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/pm_runtime.h>
  13. #include <linux/slab.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/mhi.h>
  16. #include <linux/mhi_misc.h>
  17. #include "mhi_qcom.h"
  18. #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, elems, ev_ring, ee, \
  19. dbmode, lpm, poll, offload, modeswitch, \
  20. ch_type) \
  21. { \
  22. .dir = DMA_TO_DEVICE, \
  23. .num = ch_num, \
  24. .name = ch_name, \
  25. .num_elements = elems, \
  26. .event_ring = ev_ring, \
  27. .ee_mask = BIT(ee), \
  28. .pollcfg = poll, \
  29. .doorbell = dbmode, \
  30. .lpm_notify = lpm, \
  31. .offload_channel = offload, \
  32. .doorbell_mode_switch = modeswitch, \
  33. .wake_capable = false, \
  34. .auto_queue = false, \
  35. .local_elements = 0, \
  36. .type = ch_type, \
  37. }
  38. #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, elems, ev_ring, ee, \
  39. dbmode, lpm, poll, offload, modeswitch, \
  40. wake, autoq, local_el, ch_type) \
  41. { \
  42. .dir = DMA_FROM_DEVICE, \
  43. .num = ch_num, \
  44. .name = ch_name, \
  45. .num_elements = elems, \
  46. .event_ring = ev_ring, \
  47. .ee_mask = BIT(ee), \
  48. .pollcfg = poll, \
  49. .doorbell = dbmode, \
  50. .lpm_notify = lpm, \
  51. .offload_channel = offload, \
  52. .doorbell_mode_switch = modeswitch, \
  53. .wake_capable = wake, \
  54. .auto_queue = autoq, \
  55. .local_elements = local_el, \
  56. .type = ch_type, \
  57. }
  58. #define MHI_EVENT_CONFIG(ev_ring, ev_irq, type, num_elems, int_mod, \
  59. prio, dbmode, hw, cl_manage, offload, ch_num) \
  60. { \
  61. .num_elements = num_elems, \
  62. .irq_moderation_ms = int_mod, \
  63. .irq = ev_irq, \
  64. .priority = prio, \
  65. .mode = dbmode, \
  66. .data_type = type, \
  67. .hardware_event = hw, \
  68. .client_managed = cl_manage, \
  69. .offload_channel = offload, \
  70. .channel = ch_num, \
  71. }
  72. static const struct mhi_channel_config modem_qcom_sdx65_mhi_channels[] = {
  73. /* SBL channels */
  74. MHI_CHANNEL_CONFIG_UL(2, "SAHARA", 128, 1, MHI_EE_SBL,
  75. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  76. MHI_CHANNEL_CONFIG_DL(3, "SAHARA", 128, 1, MHI_EE_SBL,
  77. MHI_DB_BRST_DISABLE, false, 0, false, false,
  78. false, false, 0, 0),
  79. MHI_CHANNEL_CONFIG_DL(25, "BL", 32, 1, MHI_EE_SBL,
  80. MHI_DB_BRST_DISABLE, false, 0, false, false,
  81. false, false, 0, 0),
  82. /* AMSS channels */
  83. MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 2, MHI_EE_AMSS,
  84. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  85. MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 2, MHI_EE_AMSS,
  86. MHI_DB_BRST_DISABLE, false, 0, false, false,
  87. false, false, 0, 0),
  88. MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1, MHI_EE_AMSS,
  89. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  90. MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3, MHI_EE_AMSS,
  91. MHI_DB_BRST_DISABLE, false, 0, false, false,
  92. false, false, 0, 0),
  93. MHI_CHANNEL_CONFIG_UL(8, "QDSS", 64, 1, MHI_EE_AMSS,
  94. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  95. MHI_CHANNEL_CONFIG_DL(9, "QDSS", 64, 1, MHI_EE_AMSS,
  96. MHI_DB_BRST_DISABLE, false, 0, false, false,
  97. false, false, 0, 0),
  98. MHI_CHANNEL_CONFIG_UL(10, "EFS", 64, 1, MHI_EE_AMSS,
  99. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  100. /* wake-capable */
  101. MHI_CHANNEL_CONFIG_DL(11, "EFS", 64, 1, MHI_EE_AMSS,
  102. MHI_DB_BRST_DISABLE, false, 0, false, false,
  103. true, false, 0, 0),
  104. MHI_CHANNEL_CONFIG_UL(14, "QMI0", 64, 1, MHI_EE_AMSS,
  105. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  106. MHI_CHANNEL_CONFIG_DL(15, "QMI0", 64, 2, MHI_EE_AMSS,
  107. MHI_DB_BRST_DISABLE, false, 0, false, false,
  108. false, false, 0, 0),
  109. MHI_CHANNEL_CONFIG_UL(16, "QMI1", 64, 3, MHI_EE_AMSS,
  110. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  111. MHI_CHANNEL_CONFIG_DL(17, "QMI1", 64, 3, MHI_EE_AMSS,
  112. MHI_DB_BRST_DISABLE, false, 0, false, false,
  113. false, false, 0, 0),
  114. MHI_CHANNEL_CONFIG_UL(18, "IP_CTRL", 64, 1, MHI_EE_AMSS,
  115. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  116. /* auto-queue */
  117. MHI_CHANNEL_CONFIG_DL(19, "IP_CTRL", 64, 1, MHI_EE_AMSS,
  118. MHI_DB_BRST_DISABLE, false, 0, false, false,
  119. false, true, 0, 0),
  120. MHI_CHANNEL_CONFIG_UL(20, "IPCR", 32, 2, MHI_EE_AMSS,
  121. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  122. /* auto-queue */
  123. MHI_CHANNEL_CONFIG_DL(21, "IPCR", 32, 2, MHI_EE_AMSS,
  124. MHI_DB_BRST_DISABLE, false, 0, false, false,
  125. false, true, 0, 0),
  126. MHI_CHANNEL_CONFIG_UL(26, "DCI", 64, 3, MHI_EE_AMSS,
  127. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  128. MHI_CHANNEL_CONFIG_DL(27, "DCI", 64, 3, MHI_EE_AMSS,
  129. MHI_DB_BRST_DISABLE, false, 0, false, false,
  130. false, false, 0, 0),
  131. MHI_CHANNEL_CONFIG_UL(32, "DUN", 64, 3, MHI_EE_AMSS,
  132. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  133. MHI_CHANNEL_CONFIG_DL(33, "DUN", 64, 3, MHI_EE_AMSS,
  134. MHI_DB_BRST_DISABLE, false, 0, false, false,
  135. false, false, 0, 0),
  136. MHI_CHANNEL_CONFIG_UL(80, "AUDIO_VOICE_0", 32, 1, MHI_EE_AMSS,
  137. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  138. MHI_CHANNEL_CONFIG_DL(81, "AUDIO_VOICE_0", 32, 1, MHI_EE_AMSS,
  139. MHI_DB_BRST_DISABLE, false, 0, false, false,
  140. false, false, 0, 0),
  141. MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 512, 6, MHI_EE_AMSS,
  142. MHI_DB_BRST_ENABLE, false, 0, false, true, 0),
  143. MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 512, 7, MHI_EE_AMSS,
  144. MHI_DB_BRST_ENABLE, false, 0, false, false,
  145. false, false, 0, 0),
  146. MHI_CHANNEL_CONFIG_DL(102, "IP_HW_ADPL", 1, 8, MHI_EE_AMSS,
  147. MHI_DB_BRST_DISABLE, true, 0, true, false,
  148. false, false, 0, 0),
  149. MHI_CHANNEL_CONFIG_DL(103, "IP_HW_QDSS", 1, 9, MHI_EE_AMSS,
  150. MHI_DB_BRST_DISABLE, false, 0, false, false,
  151. false, false, 0, 0),
  152. MHI_CHANNEL_CONFIG_DL(104, "IP_HW0_RSC", 512, 7, MHI_EE_AMSS,
  153. MHI_DB_BRST_ENABLE, false, 0, false, false,
  154. false, false, 3078,
  155. MHI_CH_TYPE_INBOUND_COALESCED),
  156. MHI_CHANNEL_CONFIG_UL(105, "RMNET_DATA_LL", 512, 10, MHI_EE_AMSS,
  157. MHI_DB_BRST_ENABLE, false, 0, false, true, 0),
  158. MHI_CHANNEL_CONFIG_DL(106, "RMNET_DATA_LL", 512, 10, MHI_EE_AMSS,
  159. MHI_DB_BRST_ENABLE, false, 0, false, false,
  160. false, false, 0, 0),
  161. MHI_CHANNEL_CONFIG_UL(107, "IP_HW_MHIP_1", 1, 11, MHI_EE_AMSS,
  162. MHI_DB_BRST_DISABLE, false, 0, true, true, 0),
  163. MHI_CHANNEL_CONFIG_DL(108, "IP_HW_MHIP_1", 1, 12, MHI_EE_AMSS,
  164. MHI_DB_BRST_DISABLE, true, 0, true, false,
  165. false, false, 0, 0),
  166. MHI_CHANNEL_CONFIG_UL(109, "RMNET_CTL", 128, 13, MHI_EE_AMSS,
  167. MHI_DB_BRST_DISABLE, false, 0, false, false, 0),
  168. MHI_CHANNEL_CONFIG_DL(110, "RMNET_CTL", 128, 14, MHI_EE_AMSS,
  169. MHI_DB_BRST_DISABLE, false, 0, false, false,
  170. false, false, 0, 0),
  171. };
  172. static struct mhi_event_config modem_qcom_sdx65_mhi_events[] = {
  173. MHI_EVENT_CONFIG(0, 1, MHI_ER_CTRL, 64, 0,
  174. MHI_ER_PRIORITY_HI_NOSLEEP, MHI_DB_BRST_DISABLE, false,
  175. false, false, 0),
  176. MHI_EVENT_CONFIG(1, 2, MHI_ER_DATA, 256, 0,
  177. MHI_ER_PRIORITY_DEFAULT_NOSLEEP, MHI_DB_BRST_DISABLE,
  178. false, false, false, 0),
  179. MHI_EVENT_CONFIG(2, 3, MHI_ER_DATA, 256, 0,
  180. MHI_ER_PRIORITY_DEFAULT_NOSLEEP, MHI_DB_BRST_DISABLE,
  181. false, false, false, 0),
  182. MHI_EVENT_CONFIG(3, 4, MHI_ER_DATA, 256, 0,
  183. MHI_ER_PRIORITY_DEFAULT_NOSLEEP, MHI_DB_BRST_DISABLE,
  184. false, false, false, 0),
  185. MHI_EVENT_CONFIG(4, 5, MHI_ER_BW_SCALE, 64, 0,
  186. MHI_ER_PRIORITY_HI_SLEEP, MHI_DB_BRST_DISABLE, false,
  187. false, false, 0),
  188. MHI_EVENT_CONFIG(5, 6, MHI_ER_TIMESYNC, 64, 0,
  189. MHI_ER_PRIORITY_HI_SLEEP, MHI_DB_BRST_DISABLE, false,
  190. false, false, 0),
  191. /* Hardware channels request dedicated hardware event rings */
  192. MHI_EVENT_CONFIG(6, 7, MHI_ER_DATA, 1024, 5,
  193. MHI_ER_PRIORITY_DEFAULT_NOSLEEP, MHI_DB_BRST_ENABLE,
  194. true, false, false, 100),
  195. MHI_EVENT_CONFIG(7, 7, MHI_ER_DATA, 2048, 5,
  196. MHI_ER_PRIORITY_DEFAULT_NOSLEEP,
  197. MHI_DB_BRST_ENABLE, true, true, false, 101),
  198. MHI_EVENT_CONFIG(8, 8, MHI_ER_DATA, 0, 0,
  199. MHI_ER_PRIORITY_DEFAULT_NOSLEEP, MHI_DB_BRST_ENABLE,
  200. true, true, true, 102),
  201. MHI_EVENT_CONFIG(9, 9, MHI_ER_DATA, 1024, 5,
  202. MHI_ER_PRIORITY_DEFAULT_NOSLEEP, MHI_DB_BRST_DISABLE,
  203. true, false, false, 103),
  204. MHI_EVENT_CONFIG(10, 10, MHI_ER_DATA, 1024, 1,
  205. MHI_ER_PRIORITY_HI_NOSLEEP, MHI_DB_BRST_ENABLE, true,
  206. false, false, 0),
  207. MHI_EVENT_CONFIG(11, 11, MHI_ER_DATA, 0, 0,
  208. MHI_ER_PRIORITY_DEFAULT_NOSLEEP, MHI_DB_BRST_ENABLE,
  209. true, true, true, 107),
  210. MHI_EVENT_CONFIG(12, 12, MHI_ER_DATA, 0, 0,
  211. MHI_ER_PRIORITY_DEFAULT_NOSLEEP, MHI_DB_BRST_ENABLE,
  212. true, true, true, 108),
  213. MHI_EVENT_CONFIG(13, 13, MHI_ER_DATA, 1024, 1,
  214. MHI_ER_PRIORITY_HI_NOSLEEP, MHI_DB_BRST_DISABLE, true,
  215. false, false, 109),
  216. MHI_EVENT_CONFIG(14, 15, MHI_ER_DATA, 1024, 0,
  217. MHI_ER_PRIORITY_HI_NOSLEEP, MHI_DB_BRST_DISABLE, true,
  218. false, false, 110),
  219. };
  220. static const struct mhi_controller_config modem_qcom_sdx65_mhi_config = {
  221. .max_channels = 128,
  222. .timeout_ms = 2000,
  223. .buf_len = 0x8000,
  224. .num_channels = ARRAY_SIZE(modem_qcom_sdx65_mhi_channels),
  225. .ch_cfg = modem_qcom_sdx65_mhi_channels,
  226. .num_events = ARRAY_SIZE(modem_qcom_sdx65_mhi_events),
  227. .event_cfg = modem_qcom_sdx65_mhi_events,
  228. };
  229. static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
  230. .device_id = 0x0308,
  231. .name = "esoc0",
  232. .fw_image = "sdx65m/xbl.elf",
  233. .edl_image = "sdx65m/edl.mbn",
  234. .config = &modem_qcom_sdx65_mhi_config,
  235. .bar_num = MHI_PCI_BAR_NUM,
  236. .dma_data_width = 64,
  237. .allow_m1 = false,
  238. .skip_forced_suspend = true,
  239. .sfr_support = true,
  240. .timesync = true,
  241. .drv_support = false,
  242. };
  243. static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
  244. .device_id = 0x0309,
  245. .name = "esoc0",
  246. .fw_image = "sdx75m/xbl.elf",
  247. .edl_image = "sdx75m/edl.mbn",
  248. .config = &modem_qcom_sdx65_mhi_config,
  249. .bar_num = MHI_PCI_BAR_NUM,
  250. .dma_data_width = 64,
  251. .allow_m1 = false,
  252. .skip_forced_suspend = true,
  253. .sfr_support = true,
  254. .timesync = true,
  255. .drv_support = false,
  256. };
  257. static const struct mhi_pci_dev_info mhi_qcom_debug_info = {
  258. .device_id = MHI_PCIE_DEBUG_ID,
  259. .name = "qcom-debug",
  260. .fw_image = "debug.mbn",
  261. .edl_image = "debug.mbn",
  262. .config = &modem_qcom_sdx65_mhi_config,
  263. .bar_num = MHI_PCI_BAR_NUM,
  264. .dma_data_width = 64,
  265. .allow_m1 = true,
  266. .skip_forced_suspend = true,
  267. .sfr_support = false,
  268. .timesync = false,
  269. .drv_support = false,
  270. };
  271. static const struct pci_device_id mhi_pcie_device_id[] = {
  272. { PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0308),
  273. .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
  274. { PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0309),
  275. .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
  276. { PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID),
  277. .driver_data = (kernel_ulong_t) &mhi_qcom_debug_info },
  278. { }
  279. };
  280. MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id);
  281. static enum mhi_debug_mode debug_mode;
  282. const char * const mhi_debug_mode_str[MHI_DEBUG_MODE_MAX] = {
  283. [MHI_DEBUG_OFF] = "Debug mode OFF",
  284. [MHI_DEBUG_ON] = "Debug mode ON",
  285. [MHI_DEBUG_NO_LPM] = "Debug mode - no LPM",
  286. };
  287. const char * const mhi_suspend_mode_str[MHI_SUSPEND_MODE_MAX] = {
  288. [MHI_ACTIVE_STATE] = "Active",
  289. [MHI_DEFAULT_SUSPEND] = "Default",
  290. [MHI_FAST_LINK_OFF] = "Fast Link Off",
  291. [MHI_FAST_LINK_ON] = "Fast Link On",
  292. };
  293. static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl);
  294. static int mhi_link_status(struct mhi_controller *mhi_cntrl)
  295. {
  296. struct pci_dev *pci_dev = to_pci_dev(mhi_cntrl->cntrl_dev);
  297. u16 dev_id;
  298. int ret;
  299. /* try reading device IDs, a mismatch could indicate a link down */
  300. ret = pci_read_config_word(pci_dev, PCI_DEVICE_ID, &dev_id);
  301. return (ret || dev_id != pci_dev->device) ? -EIO : 0;
  302. }
  303. static int mhi_qcom_read_reg(struct mhi_controller *mhi_cntrl,
  304. void __iomem *addr, u32 *out)
  305. {
  306. u32 tmp = readl_relaxed(addr);
  307. if (PCI_INVALID_READ(tmp) && mhi_link_status(mhi_cntrl))
  308. return -EIO;
  309. *out = tmp;
  310. return 0;
  311. }
  312. static void mhi_qcom_write_reg(struct mhi_controller *mhi_cntrl,
  313. void __iomem *addr, u32 val)
  314. {
  315. writel_relaxed(val, addr);
  316. }
  317. static u64 mhi_qcom_time_get(struct mhi_controller *mhi_cntrl)
  318. {
  319. return mhi_arch_time_get(mhi_cntrl);
  320. }
  321. static int mhi_qcom_lpm_disable(struct mhi_controller *mhi_cntrl)
  322. {
  323. return mhi_arch_link_lpm_disable(mhi_cntrl);
  324. }
  325. static int mhi_qcom_lpm_enable(struct mhi_controller *mhi_cntrl)
  326. {
  327. return mhi_arch_link_lpm_enable(mhi_cntrl);
  328. }
  329. static int mhi_debugfs_power_up(void *data, u64 val)
  330. {
  331. struct mhi_controller *mhi_cntrl = data;
  332. struct mhi_qcom_priv *mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  333. int ret;
  334. if (!val || mhi_priv->powered_on)
  335. return -EINVAL;
  336. MHI_CNTRL_LOG("Trigger power up from %s\n",
  337. TO_MHI_DEBUG_MODE_STR(debug_mode));
  338. ret = mhi_qcom_power_up(mhi_cntrl);
  339. if (ret) {
  340. MHI_CNTRL_ERR("Failed to power up MHI\n");
  341. return ret;
  342. }
  343. mhi_priv->powered_on = true;
  344. return ret;
  345. }
  346. DEFINE_DEBUGFS_ATTRIBUTE(debugfs_power_up_fops, NULL,
  347. mhi_debugfs_power_up, "%llu\n");
  348. static int mhi_debugfs_trigger_m0(void *data, u64 val)
  349. {
  350. struct mhi_controller *mhi_cntrl = data;
  351. MHI_CNTRL_LOG("Trigger M3 Exit\n");
  352. pm_runtime_get(mhi_cntrl->cntrl_dev);
  353. pm_runtime_put(mhi_cntrl->cntrl_dev);
  354. return 0;
  355. }
  356. DEFINE_DEBUGFS_ATTRIBUTE(debugfs_trigger_m0_fops, NULL,
  357. mhi_debugfs_trigger_m0, "%llu\n");
  358. static int mhi_debugfs_trigger_m3(void *data, u64 val)
  359. {
  360. struct mhi_controller *mhi_cntrl = data;
  361. MHI_CNTRL_LOG("Trigger M3 Entry\n");
  362. pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
  363. pm_request_autosuspend(mhi_cntrl->cntrl_dev);
  364. return 0;
  365. }
  366. DEFINE_DEBUGFS_ATTRIBUTE(debugfs_trigger_m3_fops, NULL,
  367. mhi_debugfs_trigger_m3, "%llu\n");
  368. static int mhi_debugfs_disable_pci_lpm_get(void *data, u64 *val)
  369. {
  370. struct mhi_controller *mhi_cntrl = data;
  371. struct mhi_qcom_priv *mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  372. *val = mhi_priv->disable_pci_lpm;
  373. MHI_CNTRL_LOG("PCIe low power modes (D3 hot/cold) are %s\n",
  374. mhi_priv->disable_pci_lpm ? "Disabled" : "Enabled");
  375. return 0;
  376. }
  377. static int mhi_debugfs_disable_pci_lpm_set(void *data, u64 val)
  378. {
  379. struct mhi_controller *mhi_cntrl = data;
  380. struct mhi_qcom_priv *mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  381. mutex_lock(&mhi_cntrl->pm_mutex);
  382. mhi_priv->disable_pci_lpm = val ? true : false;
  383. mutex_unlock(&mhi_cntrl->pm_mutex);
  384. MHI_CNTRL_LOG("%s PCIe low power modes (D3 hot/cold)\n",
  385. val ? "Disabled" : "Enabled");
  386. return 0;
  387. }
  388. DEFINE_DEBUGFS_ATTRIBUTE(debugfs_pci_lpm_fops, mhi_debugfs_disable_pci_lpm_get,
  389. mhi_debugfs_disable_pci_lpm_set, "%llu\n");
  390. void mhi_deinit_pci_dev(struct pci_dev *pci_dev,
  391. const struct mhi_pci_dev_info *dev_info)
  392. {
  393. struct mhi_controller *mhi_cntrl = dev_get_drvdata(&pci_dev->dev);
  394. if (!mhi_cntrl)
  395. return;
  396. pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
  397. pm_runtime_dont_use_autosuspend(mhi_cntrl->cntrl_dev);
  398. pm_runtime_disable(mhi_cntrl->cntrl_dev);
  399. pci_free_irq_vectors(pci_dev);
  400. kfree(mhi_cntrl->irq);
  401. mhi_cntrl->irq = NULL;
  402. iounmap(mhi_cntrl->regs);
  403. mhi_cntrl->regs = NULL;
  404. mhi_cntrl->reg_len = 0;
  405. mhi_cntrl->nr_irqs = 0;
  406. pci_clear_master(pci_dev);
  407. pci_release_region(pci_dev, dev_info->bar_num);
  408. pci_disable_device(pci_dev);
  409. }
  410. static int mhi_init_pci_dev(struct pci_dev *pci_dev,
  411. const struct mhi_pci_dev_info *dev_info)
  412. {
  413. struct mhi_controller *mhi_cntrl = dev_get_drvdata(&pci_dev->dev);
  414. phys_addr_t base;
  415. int ret;
  416. int i;
  417. if (!mhi_cntrl)
  418. return -ENODEV;
  419. ret = pci_assign_resource(pci_dev, dev_info->bar_num);
  420. if (ret) {
  421. MHI_CNTRL_ERR("Error assign pci resources, ret: %d\n", ret);
  422. return ret;
  423. }
  424. ret = pci_enable_device(pci_dev);
  425. if (ret) {
  426. MHI_CNTRL_ERR("Error enabling device, ret: %d\n", ret);
  427. goto error_enable_device;
  428. }
  429. ret = pci_request_region(pci_dev, dev_info->bar_num, "mhi");
  430. if (ret) {
  431. MHI_CNTRL_ERR("Error pci_request_region, ret: %d\n", ret);
  432. goto error_request_region;
  433. }
  434. pci_set_master(pci_dev);
  435. base = pci_resource_start(pci_dev, dev_info->bar_num);
  436. mhi_cntrl->reg_len = pci_resource_len(pci_dev, dev_info->bar_num);
  437. mhi_cntrl->regs = ioremap(base, mhi_cntrl->reg_len);
  438. if (!mhi_cntrl->regs) {
  439. MHI_CNTRL_ERR("Error ioremap region\n");
  440. goto error_ioremap;
  441. }
  442. /* reserved MSI for BHI plus one for each event ring */
  443. mhi_cntrl->nr_irqs = dev_info->config->num_events + 1;
  444. ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->nr_irqs,
  445. mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
  446. if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->nr_irqs) {
  447. MHI_CNTRL_ERR("Failed to enable MSI, ret: %d\n", ret);
  448. goto error_req_msi;
  449. }
  450. mhi_cntrl->irq = kmalloc_array(mhi_cntrl->nr_irqs,
  451. sizeof(*mhi_cntrl->irq), GFP_KERNEL);
  452. if (!mhi_cntrl->irq) {
  453. ret = -ENOMEM;
  454. goto error_alloc_msi_vec;
  455. }
  456. for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
  457. mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i);
  458. if (mhi_cntrl->irq[i] < 0) {
  459. ret = mhi_cntrl->irq[i];
  460. goto error_get_irq_vec;
  461. }
  462. }
  463. /* configure runtime pm */
  464. pm_runtime_set_autosuspend_delay(mhi_cntrl->cntrl_dev,
  465. MHI_RPM_SUSPEND_TMR_MS);
  466. pm_runtime_use_autosuspend(mhi_cntrl->cntrl_dev);
  467. pm_suspend_ignore_children(mhi_cntrl->cntrl_dev, true);
  468. /*
  469. * pci framework will increment usage count (twice) before
  470. * calling local device driver probe function.
  471. * 1st pci.c pci_pm_init() calls pm_runtime_forbid
  472. * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
  473. * Framework expect pci device driver to call
  474. * pm_runtime_put_noidle to decrement usage count after
  475. * successful probe and call pm_runtime_allow to enable
  476. * runtime suspend.
  477. */
  478. pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
  479. pm_runtime_put_noidle(mhi_cntrl->cntrl_dev);
  480. return 0;
  481. error_get_irq_vec:
  482. kfree(mhi_cntrl->irq);
  483. mhi_cntrl->irq = NULL;
  484. error_alloc_msi_vec:
  485. pci_free_irq_vectors(pci_dev);
  486. error_req_msi:
  487. iounmap(mhi_cntrl->regs);
  488. error_ioremap:
  489. pci_clear_master(pci_dev);
  490. error_request_region:
  491. pci_disable_device(pci_dev);
  492. error_enable_device:
  493. pci_release_region(pci_dev, dev_info->bar_num);
  494. return ret;
  495. }
  496. static int mhi_runtime_suspend(struct device *dev)
  497. {
  498. struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
  499. struct mhi_qcom_priv *mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  500. int ret = 0;
  501. MHI_CNTRL_LOG("Entered\n");
  502. mutex_lock(&mhi_cntrl->pm_mutex);
  503. if (!mhi_priv->powered_on) {
  504. MHI_CNTRL_LOG("Not fully powered, return success\n");
  505. mutex_unlock(&mhi_cntrl->pm_mutex);
  506. return 0;
  507. }
  508. ret = mhi_pm_suspend(mhi_cntrl);
  509. if (ret) {
  510. MHI_CNTRL_LOG("Abort due to ret: %d\n", ret);
  511. mhi_priv->suspend_mode = MHI_ACTIVE_STATE;
  512. goto exit_runtime_suspend;
  513. }
  514. mhi_priv->suspend_mode = MHI_DEFAULT_SUSPEND;
  515. ret = mhi_arch_link_suspend(mhi_cntrl);
  516. /* failed suspending link abort mhi suspend */
  517. if (ret) {
  518. MHI_CNTRL_LOG("Failed to suspend link, abort suspend\n");
  519. mhi_pm_resume(mhi_cntrl);
  520. mhi_priv->suspend_mode = MHI_ACTIVE_STATE;
  521. }
  522. exit_runtime_suspend:
  523. mutex_unlock(&mhi_cntrl->pm_mutex);
  524. MHI_CNTRL_LOG("Exited with ret: %d\n", ret);
  525. return (ret < 0) ? -EBUSY : 0;
  526. }
  527. static int mhi_runtime_idle(struct device *dev)
  528. {
  529. struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
  530. MHI_CNTRL_LOG("Entered returning -EBUSY\n");
  531. /*
  532. * RPM framework during runtime resume always calls
  533. * rpm_idle to see if device ready to suspend.
  534. * If dev.power usage_count count is 0, rpm fw will call
  535. * rpm_idle cb to see if device is ready to suspend.
  536. * if cb return 0, or cb not defined the framework will
  537. * assume device driver is ready to suspend;
  538. * therefore, fw will schedule runtime suspend.
  539. * In MHI power management, MHI host shall go to
  540. * runtime suspend only after entering MHI State M2, even if
  541. * usage count is 0. Return -EBUSY to disable automatic suspend.
  542. */
  543. return -EBUSY;
  544. }
  545. static int mhi_runtime_resume(struct device *dev)
  546. {
  547. int ret = 0;
  548. struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
  549. struct mhi_qcom_priv *mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  550. MHI_CNTRL_LOG("Entered\n");
  551. mutex_lock(&mhi_cntrl->pm_mutex);
  552. if (!mhi_priv->powered_on) {
  553. MHI_CNTRL_LOG("Not fully powered, return success\n");
  554. mutex_unlock(&mhi_cntrl->pm_mutex);
  555. return 0;
  556. }
  557. /* turn on link */
  558. ret = mhi_arch_link_resume(mhi_cntrl);
  559. if (ret)
  560. goto rpm_resume_exit;
  561. /* transition to M0 state */
  562. if (mhi_priv->suspend_mode == MHI_DEFAULT_SUSPEND)
  563. ret = mhi_pm_resume(mhi_cntrl);
  564. else
  565. ret = mhi_pm_fast_resume(mhi_cntrl, MHI_FAST_LINK_ON);
  566. mhi_priv->suspend_mode = MHI_ACTIVE_STATE;
  567. rpm_resume_exit:
  568. mutex_unlock(&mhi_cntrl->pm_mutex);
  569. MHI_CNTRL_LOG("Exited with ret: %d\n", ret);
  570. return (ret < 0) ? -EBUSY : 0;
  571. }
  572. static int mhi_system_resume(struct device *dev)
  573. {
  574. return mhi_runtime_resume(dev);
  575. }
  576. int mhi_system_suspend(struct device *dev)
  577. {
  578. struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
  579. struct mhi_qcom_priv *mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  580. const struct mhi_pci_dev_info *dev_info = mhi_priv->dev_info;
  581. int ret;
  582. MHI_CNTRL_LOG("Entered\n");
  583. /* No DRV support - use regular suspends */
  584. if (!dev_info->drv_support)
  585. return mhi_runtime_suspend(dev);
  586. mutex_lock(&mhi_cntrl->pm_mutex);
  587. if (!mhi_priv->powered_on) {
  588. MHI_CNTRL_LOG("Not fully powered, return success\n");
  589. mutex_unlock(&mhi_cntrl->pm_mutex);
  590. return 0;
  591. }
  592. /*
  593. * pci framework always makes a dummy vote to rpm
  594. * framework to resume before calling system suspend
  595. * hence usage count is minimum one
  596. */
  597. if (atomic_read(&dev->power.usage_count) > 1) {
  598. /*
  599. * clients have requested to keep link on, try
  600. * fast suspend. No need to notify clients since
  601. * we will not be turning off the pcie link
  602. */
  603. ret = mhi_pm_fast_suspend(mhi_cntrl, false);
  604. mhi_priv->suspend_mode = MHI_FAST_LINK_ON;
  605. } else {
  606. /* try normal suspend */
  607. mhi_priv->suspend_mode = MHI_DEFAULT_SUSPEND;
  608. ret = mhi_pm_suspend(mhi_cntrl);
  609. /*
  610. * normal suspend failed because we're busy, try
  611. * fast suspend before aborting system suspend.
  612. * this could happens if client has disabled
  613. * device lpm but no active vote for PCIe from
  614. * apps processor
  615. */
  616. if (ret == -EBUSY) {
  617. ret = mhi_pm_fast_suspend(mhi_cntrl, true);
  618. mhi_priv->suspend_mode = MHI_FAST_LINK_ON;
  619. }
  620. }
  621. if (ret) {
  622. MHI_CNTRL_LOG("Abort due to ret: %d\n", ret);
  623. mhi_priv->suspend_mode = MHI_ACTIVE_STATE;
  624. goto exit_system_suspend;
  625. }
  626. ret = mhi_arch_link_suspend(mhi_cntrl);
  627. /* failed suspending link abort mhi suspend */
  628. if (ret) {
  629. MHI_CNTRL_LOG("Failed to suspend link, abort suspend\n");
  630. if (mhi_priv->suspend_mode == MHI_DEFAULT_SUSPEND)
  631. mhi_pm_resume(mhi_cntrl);
  632. else
  633. mhi_pm_fast_resume(mhi_cntrl, MHI_FAST_LINK_OFF);
  634. mhi_priv->suspend_mode = MHI_ACTIVE_STATE;
  635. }
  636. exit_system_suspend:
  637. mutex_unlock(&mhi_cntrl->pm_mutex);
  638. MHI_CNTRL_LOG("Exited with ret: %d\n", ret);
  639. return ret;
  640. }
  641. static int mhi_suspend_noirq(struct device *dev)
  642. {
  643. return 0;
  644. }
  645. static int mhi_resume_noirq(struct device *dev)
  646. {
  647. return 0;
  648. }
  649. static int mhi_force_suspend(struct mhi_controller *mhi_cntrl)
  650. {
  651. struct mhi_qcom_priv *mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  652. int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms, 100);
  653. int ret = -EIO;
  654. MHI_CNTRL_LOG("Entered\n");
  655. mutex_lock(&mhi_cntrl->pm_mutex);
  656. for (; itr; itr--) {
  657. /*
  658. * This function get called soon as device entered mission mode
  659. * so most of the channels are still in disabled state. However,
  660. * sbl channels are active and clients could be trying to close
  661. * channels while we trying to suspend the link. So, we need to
  662. * re-try if MHI is busy
  663. */
  664. ret = mhi_pm_suspend(mhi_cntrl);
  665. if (!ret || ret != -EBUSY)
  666. break;
  667. MHI_CNTRL_LOG("MHI busy, sleeping and retry\n");
  668. msleep(100);
  669. }
  670. if (ret) {
  671. MHI_CNTRL_ERR("Force suspend ret:%d\n", ret);
  672. goto exit_force_suspend;
  673. }
  674. mhi_priv->suspend_mode = MHI_DEFAULT_SUSPEND;
  675. ret = mhi_arch_link_suspend(mhi_cntrl);
  676. exit_force_suspend:
  677. mutex_unlock(&mhi_cntrl->pm_mutex);
  678. return ret;
  679. }
  680. static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
  681. {
  682. int ret;
  683. /* when coming out of SSR, initial states are not valid */
  684. mhi_cntrl->ee = MHI_EE_MAX;
  685. mhi_cntrl->dev_state = MHI_STATE_RESET;
  686. ret = mhi_prepare_for_power_up(mhi_cntrl);
  687. if (ret)
  688. return ret;
  689. ret = mhi_async_power_up(mhi_cntrl);
  690. if (ret) {
  691. mhi_unprepare_after_power_down(mhi_cntrl);
  692. return ret;
  693. }
  694. if (mhi_cntrl->debugfs_dentry) {
  695. debugfs_create_file("m0", 0444, mhi_cntrl->debugfs_dentry, mhi_cntrl,
  696. &debugfs_trigger_m0_fops);
  697. debugfs_create_file("m3", 0444, mhi_cntrl->debugfs_dentry, mhi_cntrl,
  698. &debugfs_trigger_m3_fops);
  699. debugfs_create_file("disable_pci_lpm", 0644, mhi_cntrl->debugfs_dentry,
  700. mhi_cntrl, &debugfs_pci_lpm_fops);
  701. }
  702. return ret;
  703. }
  704. static int mhi_runtime_get(struct mhi_controller *mhi_cntrl)
  705. {
  706. return pm_runtime_get(mhi_cntrl->cntrl_dev);
  707. }
  708. static void mhi_runtime_put(struct mhi_controller *mhi_cntrl)
  709. {
  710. pm_runtime_put_noidle(mhi_cntrl->cntrl_dev);
  711. }
  712. static void mhi_runtime_last_busy(struct mhi_controller *mhi_cntrl)
  713. {
  714. pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
  715. }
  716. static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
  717. enum mhi_callback reason)
  718. {
  719. struct mhi_qcom_priv *mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  720. const struct mhi_pci_dev_info *dev_info = mhi_priv->dev_info;
  721. struct device *dev = mhi_cntrl->cntrl_dev;
  722. int ret;
  723. switch (reason) {
  724. case MHI_CB_IDLE:
  725. MHI_CNTRL_LOG("Schedule runtime suspend\n");
  726. pm_runtime_mark_last_busy(dev);
  727. pm_request_autosuspend(dev);
  728. break;
  729. case MHI_CB_EE_MISSION_MODE:
  730. MHI_CNTRL_LOG("Mission mode entry\n");
  731. if (debug_mode == MHI_DEBUG_NO_LPM) {
  732. mhi_arch_mission_mode_enter(mhi_cntrl);
  733. MHI_CNTRL_LOG("Exit due to: %s\n",
  734. TO_MHI_DEBUG_MODE_STR(debug_mode));
  735. break;
  736. }
  737. /*
  738. * we need to force a suspend so device can switch to
  739. * mission mode pcie phy settings.
  740. */
  741. if (!dev_info->skip_forced_suspend) {
  742. pm_runtime_get(dev);
  743. ret = mhi_force_suspend(mhi_cntrl);
  744. if (!ret) {
  745. MHI_CNTRL_LOG("Resume after forced suspend\n");
  746. mhi_runtime_resume(dev);
  747. }
  748. pm_runtime_put(dev);
  749. }
  750. mhi_arch_mission_mode_enter(mhi_cntrl);
  751. pm_runtime_allow(dev);
  752. break;
  753. default:
  754. MHI_CNTRL_LOG("Unhandled cb: 0x%x\n", reason);
  755. }
  756. }
  757. /* Setting to use this mhi_qcom_pm_domain ops will let PM framework override the
  758. * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
  759. * has to take care everything device driver needed which is currently done
  760. * from pci_dev_pm_ops.
  761. */
  762. static struct dev_pm_domain mhi_qcom_pm_domain = {
  763. .ops = {
  764. SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
  765. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mhi_suspend_noirq,
  766. mhi_resume_noirq)
  767. SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
  768. mhi_runtime_resume,
  769. mhi_runtime_idle)
  770. }
  771. };
  772. #ifdef CONFIG_MHI_BUS_DEBUG
  773. #define MHI_QCOM_DEBUG_LEVEL MHI_MSG_LVL_VERBOSE
  774. static struct dentry *mhi_qcom_debugfs;
  775. static int mhi_qcom_debugfs_debug_mode_show(struct seq_file *m, void *d)
  776. {
  777. seq_printf(m, "%s\n", TO_MHI_DEBUG_MODE_STR(debug_mode));
  778. return 0;
  779. }
  780. static ssize_t mhi_qcom_debugfs_debug_mode_write(struct file *file,
  781. const char __user *ubuf,
  782. size_t count, loff_t *ppos)
  783. {
  784. struct seq_file *m = file->private_data;
  785. u32 input;
  786. if (kstrtou32_from_user(ubuf, count, 0, &input))
  787. return -EINVAL;
  788. if (input >= MHI_DEBUG_MODE_MAX)
  789. return -EINVAL;
  790. debug_mode = input;
  791. seq_printf(m, "Changed debug mode to: %s\n",
  792. TO_MHI_DEBUG_MODE_STR(debug_mode));
  793. return count;
  794. }
  795. static int mhi_qcom_debugfs_debug_mode_open(struct inode *inode, struct file *p)
  796. {
  797. return single_open(p, mhi_qcom_debugfs_debug_mode_show,
  798. inode->i_private);
  799. }
  800. static const struct file_operations debugfs_debug_mode_fops = {
  801. .open = mhi_qcom_debugfs_debug_mode_open,
  802. .write = mhi_qcom_debugfs_debug_mode_write,
  803. .release = single_release,
  804. .read = seq_read,
  805. };
  806. void mhi_qcom_debugfs_init(void)
  807. {
  808. mhi_qcom_debugfs = debugfs_create_dir("mhi_qcom", NULL);
  809. debugfs_create_file("debug_mode", 0644, mhi_qcom_debugfs, NULL,
  810. &debugfs_debug_mode_fops);
  811. }
  812. void mhi_qcom_debugfs_exit(void)
  813. {
  814. debugfs_remove_recursive(mhi_qcom_debugfs);
  815. mhi_qcom_debugfs = NULL;
  816. }
  817. #else
  818. #define MHI_QCOM_DEBUG_LEVEL MHI_MSG_LVL_ERROR
  819. static inline void mhi_qcom_debugfs_init(void)
  820. {
  821. }
  822. static inline void mhi_qcom_debugfs_exit(void)
  823. {
  824. }
  825. #endif
  826. static int mhi_qcom_register_controller(struct mhi_controller *mhi_cntrl,
  827. struct mhi_qcom_priv *mhi_priv)
  828. {
  829. const struct mhi_pci_dev_info *dev_info = mhi_priv->dev_info;
  830. const struct mhi_controller_config *mhi_cntrl_config = dev_info->config;
  831. struct pci_dev *pci_dev = to_pci_dev(mhi_cntrl->cntrl_dev);
  832. struct device_node *of_node = pci_dev->dev.of_node;
  833. struct mhi_device *mhi_dev;
  834. bool use_s1;
  835. u32 addr_win[2];
  836. const char *iommu_dma_type;
  837. int ret;
  838. mhi_cntrl->iova_start = 0;
  839. mhi_cntrl->iova_stop = DMA_BIT_MASK(dev_info->dma_data_width);
  840. of_node = of_parse_phandle(of_node, "qcom,iommu-group", 0);
  841. if (of_node) {
  842. use_s1 = true;
  843. /*
  844. * s1 translation can be in bypass or fastmap mode
  845. * if "qcom,iommu-dma" property is missing, we assume s1 is
  846. * enabled and in default (no fastmap/atomic) mode
  847. */
  848. ret = of_property_read_string(of_node, "qcom,iommu-dma",
  849. &iommu_dma_type);
  850. if (!ret && !strcmp("bypass", iommu_dma_type))
  851. use_s1 = false;
  852. /*
  853. * if s1 translation enabled pull iova addr from dt using
  854. * iommu-dma-addr-pool property specified addresses
  855. */
  856. if (use_s1) {
  857. ret = of_property_read_u32_array(of_node,
  858. "qcom,iommu-dma-addr-pool",
  859. addr_win, 2);
  860. if (ret) {
  861. of_node_put(of_node);
  862. return -EINVAL;
  863. }
  864. /*
  865. * If S1 is enabled, set MHI_CTRL start address to 0
  866. * so we can use low level mapping api to map buffers
  867. * outside of smmu domain
  868. */
  869. mhi_cntrl->iova_start = 0;
  870. mhi_cntrl->iova_stop = addr_win[0] + addr_win[1];
  871. }
  872. of_node_put(of_node);
  873. }
  874. /* setup power management apis */
  875. mhi_cntrl->status_cb = mhi_status_cb;
  876. mhi_cntrl->runtime_get = mhi_runtime_get;
  877. mhi_cntrl->runtime_put = mhi_runtime_put;
  878. mhi_cntrl->runtime_last_busy = mhi_runtime_last_busy;
  879. mhi_cntrl->read_reg = mhi_qcom_read_reg;
  880. mhi_cntrl->write_reg = mhi_qcom_write_reg;
  881. ret = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
  882. if (ret)
  883. return ret;
  884. mhi_cntrl->fw_image = dev_info->fw_image;
  885. mhi_cntrl->edl_image = dev_info->edl_image;
  886. mhi_controller_set_privdata(mhi_cntrl, mhi_priv);
  887. mhi_controller_set_loglevel(mhi_cntrl, MHI_QCOM_DEBUG_LEVEL);
  888. mhi_controller_set_base(mhi_cntrl,
  889. pci_resource_start(pci_dev, dev_info->bar_num));
  890. if (dev_info->sfr_support) {
  891. ret = mhi_controller_set_sfr_support(mhi_cntrl,
  892. MHI_MAX_SFR_LEN);
  893. if (ret)
  894. goto error_register;
  895. }
  896. if (dev_info->timesync) {
  897. ret = mhi_controller_setup_timesync(mhi_cntrl,
  898. &mhi_qcom_time_get,
  899. &mhi_qcom_lpm_disable,
  900. &mhi_qcom_lpm_enable);
  901. if (ret)
  902. goto error_register;
  903. }
  904. if (dev_info->drv_support)
  905. pci_dev->dev.pm_domain = &mhi_qcom_pm_domain;
  906. /* set name based on PCIe BDF format */
  907. mhi_dev = mhi_cntrl->mhi_dev;
  908. dev_set_name(&mhi_dev->dev, "mhi_%04x_%02u.%02u.%02u", pci_dev->device,
  909. pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
  910. PCI_SLOT(pci_dev->devfn));
  911. mhi_dev->name = dev_name(&mhi_dev->dev);
  912. mhi_priv->cntrl_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
  913. dev_info->name, 0);
  914. return 0;
  915. error_register:
  916. mhi_unregister_controller(mhi_cntrl);
  917. return -EINVAL;
  918. }
  919. int mhi_qcom_pci_probe(struct pci_dev *pci_dev,
  920. struct mhi_controller *mhi_cntrl,
  921. struct mhi_qcom_priv *mhi_priv)
  922. {
  923. const struct mhi_pci_dev_info *dev_info = mhi_priv->dev_info;
  924. int ret;
  925. dev_set_drvdata(&pci_dev->dev, mhi_cntrl);
  926. mhi_cntrl->cntrl_dev = &pci_dev->dev;
  927. ret = mhi_init_pci_dev(pci_dev, dev_info);
  928. if (ret)
  929. return ret;
  930. /* driver removal boolen set to true indicates initial probe */
  931. if (mhi_priv->driver_remove) {
  932. ret = mhi_qcom_register_controller(mhi_cntrl, mhi_priv);
  933. if (ret)
  934. goto error_init_pci;
  935. }
  936. mhi_priv->powered_on = true;
  937. ret = mhi_arch_pcie_init(mhi_cntrl);
  938. if (ret)
  939. goto error_init_pci;
  940. ret = dma_set_mask_and_coherent(mhi_cntrl->cntrl_dev,
  941. DMA_BIT_MASK(dev_info->dma_data_width));
  942. if (ret)
  943. goto error_init_pci;
  944. if (debug_mode) {
  945. if (mhi_cntrl->debugfs_dentry)
  946. debugfs_create_file("power_up", 0644,
  947. mhi_cntrl->debugfs_dentry,
  948. mhi_cntrl, &debugfs_power_up_fops);
  949. mhi_priv->powered_on = false;
  950. return 0;
  951. }
  952. /* start power up sequence */
  953. ret = mhi_qcom_power_up(mhi_cntrl);
  954. if (ret) {
  955. MHI_CNTRL_ERR("Failed to power up MHI\n");
  956. mhi_priv->powered_on = false;
  957. goto error_power_up;
  958. }
  959. pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
  960. return 0;
  961. error_power_up:
  962. mhi_arch_pcie_deinit(mhi_cntrl);
  963. error_init_pci:
  964. mhi_deinit_pci_dev(pci_dev, dev_info);
  965. dev_set_drvdata(&pci_dev->dev, NULL);
  966. mhi_cntrl->cntrl_dev = NULL;
  967. return ret;
  968. }
  969. int mhi_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
  970. {
  971. const struct mhi_pci_dev_info *dev_info =
  972. (struct mhi_pci_dev_info *) id->driver_data;
  973. struct mhi_controller *mhi_cntrl;
  974. struct mhi_qcom_priv *mhi_priv;
  975. u32 domain = pci_domain_nr(pci_dev->bus);
  976. u32 bus = pci_dev->bus->number;
  977. u32 dev_id = pci_dev->device;
  978. u32 slot = PCI_SLOT(pci_dev->devfn);
  979. int ret;
  980. /* see if we already registered */
  981. mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id);
  982. if (!mhi_cntrl) {
  983. mhi_cntrl = mhi_alloc_controller();
  984. if (!mhi_cntrl)
  985. return -ENOMEM;
  986. }
  987. mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  988. if (!mhi_priv) {
  989. mhi_priv = kzalloc(sizeof(*mhi_priv), GFP_KERNEL);
  990. if (!mhi_priv)
  991. return -ENOMEM;
  992. }
  993. /* set as true to initiate clean-up after first probe fails */
  994. mhi_priv->driver_remove = true;
  995. mhi_priv->dev_info = dev_info;
  996. ret = mhi_qcom_pci_probe(pci_dev, mhi_cntrl, mhi_priv);
  997. if (ret) {
  998. kfree(mhi_priv);
  999. mhi_free_controller(mhi_cntrl);
  1000. }
  1001. return ret;
  1002. }
  1003. void mhi_pci_remove(struct pci_dev *pci_dev)
  1004. {
  1005. struct mhi_controller *mhi_cntrl;
  1006. struct mhi_qcom_priv *mhi_priv;
  1007. u32 domain = pci_domain_nr(pci_dev->bus);
  1008. u32 bus = pci_dev->bus->number;
  1009. u32 dev_id = pci_dev->device;
  1010. u32 slot = PCI_SLOT(pci_dev->devfn);
  1011. /* see if we already registered */
  1012. mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id);
  1013. if (!mhi_cntrl)
  1014. return;
  1015. mhi_priv = mhi_controller_get_privdata(mhi_cntrl);
  1016. if (!mhi_priv)
  1017. return;
  1018. /* if link is in suspend, wake it up */
  1019. pm_runtime_get_sync(mhi_cntrl->cntrl_dev);
  1020. if (mhi_priv->powered_on) {
  1021. MHI_CNTRL_LOG("Triggering shutdown process\n");
  1022. mhi_power_down(mhi_cntrl, false);
  1023. mhi_unprepare_after_power_down(mhi_cntrl);
  1024. }
  1025. mhi_priv->powered_on = false;
  1026. pm_runtime_put_noidle(mhi_cntrl->cntrl_dev);
  1027. /* allow arch driver to free memory and unregister esoc if set */
  1028. mhi_priv->driver_remove = true;
  1029. mhi_arch_pcie_deinit(mhi_cntrl);
  1030. /* turn the link off */
  1031. mhi_deinit_pci_dev(pci_dev, mhi_priv->dev_info);
  1032. mhi_unregister_controller(mhi_cntrl);
  1033. kfree(mhi_priv);
  1034. mhi_free_controller(mhi_cntrl);
  1035. }
  1036. static const struct dev_pm_ops pm_ops = {
  1037. SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
  1038. mhi_runtime_resume,
  1039. mhi_runtime_idle)
  1040. SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
  1041. };
  1042. static struct pci_driver mhi_pcie_driver = {
  1043. .name = "mhi",
  1044. .id_table = mhi_pcie_device_id,
  1045. .probe = mhi_pci_probe,
  1046. .remove = mhi_pci_remove,
  1047. .driver = {
  1048. .pm = &pm_ops
  1049. }
  1050. };
  1051. static int __init mhi_qcom_init(void)
  1052. {
  1053. int ret = 0;
  1054. mhi_qcom_debugfs_init();
  1055. ret = pci_register_driver(&mhi_pcie_driver);
  1056. if (ret) {
  1057. mhi_qcom_debugfs_exit();
  1058. return ret;
  1059. }
  1060. return 0;
  1061. }
  1062. static void __exit mhi_qcom_exit(void)
  1063. {
  1064. pci_unregister_driver(&mhi_pcie_driver);
  1065. mhi_qcom_debugfs_exit();
  1066. }
  1067. module_init(mhi_qcom_init);
  1068. module_exit(mhi_qcom_exit);
  1069. MODULE_LICENSE("GPL");
  1070. MODULE_ALIAS("MHI_CORE");
  1071. MODULE_DESCRIPTION("MHI Host Driver");