pci_generic.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * MHI PCI driver - MHI over PCI controller driver
  4. *
  5. * This module is a generic driver for registering MHI-over-PCI devices,
  6. * such as PCIe QCOM modems.
  7. *
  8. * Copyright (C) 2020 Linaro Ltd <[email protected]>
  9. */
  10. #include <linux/aer.h>
  11. #include <linux/delay.h>
  12. #include <linux/device.h>
  13. #include <linux/mhi.h>
  14. #include <linux/module.h>
  15. #include <linux/pci.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/timer.h>
  18. #include <linux/workqueue.h>
  19. #define MHI_PCI_DEFAULT_BAR_NUM 0
  20. #define MHI_POST_RESET_DELAY_MS 2000
  21. #define HEALTH_CHECK_PERIOD (HZ * 2)
  22. /**
  23. * struct mhi_pci_dev_info - MHI PCI device specific information
  24. * @config: MHI controller configuration
  25. * @name: name of the PCI module
  26. * @fw: firmware path (if any)
  27. * @edl: emergency download mode firmware path (if any)
  28. * @bar_num: PCI base address register to use for MHI MMIO register space
  29. * @dma_data_width: DMA transfer word size (32 or 64 bits)
  30. * @mru_default: default MRU size for MBIM network packets
  31. * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
  32. * of inband wake support (such as sdx24)
  33. */
  34. struct mhi_pci_dev_info {
  35. const struct mhi_controller_config *config;
  36. const char *name;
  37. const char *fw;
  38. const char *edl;
  39. unsigned int bar_num;
  40. unsigned int dma_data_width;
  41. unsigned int mru_default;
  42. bool sideband_wake;
  43. };
  44. #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
  45. { \
  46. .num = ch_num, \
  47. .name = ch_name, \
  48. .num_elements = el_count, \
  49. .event_ring = ev_ring, \
  50. .dir = DMA_TO_DEVICE, \
  51. .ee_mask = BIT(MHI_EE_AMSS), \
  52. .pollcfg = 0, \
  53. .doorbell = MHI_DB_BRST_DISABLE, \
  54. .lpm_notify = false, \
  55. .offload_channel = false, \
  56. .doorbell_mode_switch = false, \
  57. } \
  58. #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
  59. { \
  60. .num = ch_num, \
  61. .name = ch_name, \
  62. .num_elements = el_count, \
  63. .event_ring = ev_ring, \
  64. .dir = DMA_FROM_DEVICE, \
  65. .ee_mask = BIT(MHI_EE_AMSS), \
  66. .pollcfg = 0, \
  67. .doorbell = MHI_DB_BRST_DISABLE, \
  68. .lpm_notify = false, \
  69. .offload_channel = false, \
  70. .doorbell_mode_switch = false, \
  71. }
  72. #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
  73. { \
  74. .num = ch_num, \
  75. .name = ch_name, \
  76. .num_elements = el_count, \
  77. .event_ring = ev_ring, \
  78. .dir = DMA_FROM_DEVICE, \
  79. .ee_mask = BIT(MHI_EE_AMSS), \
  80. .pollcfg = 0, \
  81. .doorbell = MHI_DB_BRST_DISABLE, \
  82. .lpm_notify = false, \
  83. .offload_channel = false, \
  84. .doorbell_mode_switch = false, \
  85. .auto_queue = true, \
  86. }
  87. #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
  88. { \
  89. .num_elements = el_count, \
  90. .irq_moderation_ms = 0, \
  91. .irq = (ev_ring) + 1, \
  92. .priority = 1, \
  93. .mode = MHI_DB_BRST_DISABLE, \
  94. .data_type = MHI_ER_CTRL, \
  95. .hardware_event = false, \
  96. .client_managed = false, \
  97. .offload_channel = false, \
  98. }
  99. #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
  100. { \
  101. .num = ch_num, \
  102. .name = ch_name, \
  103. .num_elements = el_count, \
  104. .event_ring = ev_ring, \
  105. .dir = DMA_TO_DEVICE, \
  106. .ee_mask = BIT(MHI_EE_AMSS), \
  107. .pollcfg = 0, \
  108. .doorbell = MHI_DB_BRST_ENABLE, \
  109. .lpm_notify = false, \
  110. .offload_channel = false, \
  111. .doorbell_mode_switch = true, \
  112. } \
  113. #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
  114. { \
  115. .num = ch_num, \
  116. .name = ch_name, \
  117. .num_elements = el_count, \
  118. .event_ring = ev_ring, \
  119. .dir = DMA_FROM_DEVICE, \
  120. .ee_mask = BIT(MHI_EE_AMSS), \
  121. .pollcfg = 0, \
  122. .doorbell = MHI_DB_BRST_ENABLE, \
  123. .lpm_notify = false, \
  124. .offload_channel = false, \
  125. .doorbell_mode_switch = true, \
  126. }
  127. #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
  128. { \
  129. .num = ch_num, \
  130. .name = ch_name, \
  131. .num_elements = el_count, \
  132. .event_ring = ev_ring, \
  133. .dir = DMA_TO_DEVICE, \
  134. .ee_mask = BIT(MHI_EE_SBL), \
  135. .pollcfg = 0, \
  136. .doorbell = MHI_DB_BRST_DISABLE, \
  137. .lpm_notify = false, \
  138. .offload_channel = false, \
  139. .doorbell_mode_switch = false, \
  140. } \
  141. #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
  142. { \
  143. .num = ch_num, \
  144. .name = ch_name, \
  145. .num_elements = el_count, \
  146. .event_ring = ev_ring, \
  147. .dir = DMA_FROM_DEVICE, \
  148. .ee_mask = BIT(MHI_EE_SBL), \
  149. .pollcfg = 0, \
  150. .doorbell = MHI_DB_BRST_DISABLE, \
  151. .lpm_notify = false, \
  152. .offload_channel = false, \
  153. .doorbell_mode_switch = false, \
  154. }
  155. #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
  156. { \
  157. .num = ch_num, \
  158. .name = ch_name, \
  159. .num_elements = el_count, \
  160. .event_ring = ev_ring, \
  161. .dir = DMA_TO_DEVICE, \
  162. .ee_mask = BIT(MHI_EE_FP), \
  163. .pollcfg = 0, \
  164. .doorbell = MHI_DB_BRST_DISABLE, \
  165. .lpm_notify = false, \
  166. .offload_channel = false, \
  167. .doorbell_mode_switch = false, \
  168. } \
  169. #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
  170. { \
  171. .num = ch_num, \
  172. .name = ch_name, \
  173. .num_elements = el_count, \
  174. .event_ring = ev_ring, \
  175. .dir = DMA_FROM_DEVICE, \
  176. .ee_mask = BIT(MHI_EE_FP), \
  177. .pollcfg = 0, \
  178. .doorbell = MHI_DB_BRST_DISABLE, \
  179. .lpm_notify = false, \
  180. .offload_channel = false, \
  181. .doorbell_mode_switch = false, \
  182. }
  183. #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
  184. { \
  185. .num_elements = el_count, \
  186. .irq_moderation_ms = 5, \
  187. .irq = (ev_ring) + 1, \
  188. .priority = 1, \
  189. .mode = MHI_DB_BRST_DISABLE, \
  190. .data_type = MHI_ER_DATA, \
  191. .hardware_event = false, \
  192. .client_managed = false, \
  193. .offload_channel = false, \
  194. }
  195. #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
  196. { \
  197. .num_elements = el_count, \
  198. .irq_moderation_ms = 1, \
  199. .irq = (ev_ring) + 1, \
  200. .priority = 1, \
  201. .mode = MHI_DB_BRST_DISABLE, \
  202. .data_type = MHI_ER_DATA, \
  203. .hardware_event = true, \
  204. .client_managed = false, \
  205. .offload_channel = false, \
  206. .channel = ch_num, \
  207. }
  208. static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
  209. MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
  210. MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
  211. MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
  212. MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
  213. MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
  214. MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
  215. MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
  216. MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
  217. MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
  218. MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
  219. MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
  220. MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
  221. };
  222. static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
  223. /* first ring is control+data ring */
  224. MHI_EVENT_CONFIG_CTRL(0, 64),
  225. /* DIAG dedicated event ring */
  226. MHI_EVENT_CONFIG_DATA(1, 128),
  227. /* Hardware channels request dedicated hardware event rings */
  228. MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
  229. MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
  230. };
  231. static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
  232. .max_channels = 128,
  233. .timeout_ms = 8000,
  234. .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
  235. .ch_cfg = modem_qcom_v1_mhi_channels,
  236. .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
  237. .event_cfg = modem_qcom_v1_mhi_events,
  238. };
  239. static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
  240. .name = "qcom-sdx65m",
  241. .fw = "qcom/sdx65m/xbl.elf",
  242. .edl = "qcom/sdx65m/edl.mbn",
  243. .config = &modem_qcom_v1_mhiv_config,
  244. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  245. .dma_data_width = 32,
  246. .sideband_wake = false,
  247. };
  248. static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
  249. .name = "qcom-sdx55m",
  250. .fw = "qcom/sdx55m/sbl1.mbn",
  251. .edl = "qcom/sdx55m/edl.mbn",
  252. .config = &modem_qcom_v1_mhiv_config,
  253. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  254. .dma_data_width = 32,
  255. .mru_default = 32768,
  256. .sideband_wake = false,
  257. };
  258. static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
  259. .name = "qcom-sdx24",
  260. .edl = "qcom/prog_firehose_sdx24.mbn",
  261. .config = &modem_qcom_v1_mhiv_config,
  262. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  263. .dma_data_width = 32,
  264. .sideband_wake = true,
  265. };
  266. static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
  267. MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
  268. MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
  269. MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
  270. MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
  271. MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
  272. MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
  273. MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
  274. MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
  275. MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
  276. MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
  277. /* The EDL firmware is a flash-programmer exposing firehose protocol */
  278. MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
  279. MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
  280. MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
  281. MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
  282. };
  283. static struct mhi_event_config mhi_quectel_em1xx_events[] = {
  284. MHI_EVENT_CONFIG_CTRL(0, 128),
  285. MHI_EVENT_CONFIG_DATA(1, 128),
  286. MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
  287. MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
  288. };
  289. static const struct mhi_controller_config modem_quectel_em1xx_config = {
  290. .max_channels = 128,
  291. .timeout_ms = 20000,
  292. .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
  293. .ch_cfg = mhi_quectel_em1xx_channels,
  294. .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
  295. .event_cfg = mhi_quectel_em1xx_events,
  296. };
  297. static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
  298. .name = "quectel-em1xx",
  299. .edl = "qcom/prog_firehose_sdx24.mbn",
  300. .config = &modem_quectel_em1xx_config,
  301. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  302. .dma_data_width = 32,
  303. .mru_default = 32768,
  304. .sideband_wake = true,
  305. };
  306. static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
  307. MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
  308. MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
  309. MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
  310. MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
  311. MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
  312. MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
  313. MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
  314. MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
  315. MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
  316. MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
  317. };
  318. static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
  319. MHI_EVENT_CONFIG_CTRL(0, 128),
  320. MHI_EVENT_CONFIG_DATA(1, 128),
  321. MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
  322. MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
  323. };
  324. static const struct mhi_controller_config modem_foxconn_sdx55_config = {
  325. .max_channels = 128,
  326. .timeout_ms = 20000,
  327. .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
  328. .ch_cfg = mhi_foxconn_sdx55_channels,
  329. .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
  330. .event_cfg = mhi_foxconn_sdx55_events,
  331. };
  332. static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
  333. .name = "foxconn-sdx55",
  334. .fw = "qcom/sdx55m/sbl1.mbn",
  335. .edl = "qcom/sdx55m/edl.mbn",
  336. .config = &modem_foxconn_sdx55_config,
  337. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  338. .dma_data_width = 32,
  339. .mru_default = 32768,
  340. .sideband_wake = false,
  341. };
  342. static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
  343. .name = "foxconn-sdx65",
  344. .config = &modem_foxconn_sdx55_config,
  345. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  346. .dma_data_width = 32,
  347. .mru_default = 32768,
  348. .sideband_wake = false,
  349. };
  350. static const struct mhi_channel_config mhi_mv3x_channels[] = {
  351. MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
  352. MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
  353. /* MBIM Control Channel */
  354. MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
  355. MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
  356. /* MBIM Data Channel */
  357. MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
  358. MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
  359. };
  360. static struct mhi_event_config mhi_mv3x_events[] = {
  361. MHI_EVENT_CONFIG_CTRL(0, 256),
  362. MHI_EVENT_CONFIG_DATA(1, 256),
  363. MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
  364. MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
  365. };
  366. static const struct mhi_controller_config modem_mv3x_config = {
  367. .max_channels = 128,
  368. .timeout_ms = 20000,
  369. .num_channels = ARRAY_SIZE(mhi_mv3x_channels),
  370. .ch_cfg = mhi_mv3x_channels,
  371. .num_events = ARRAY_SIZE(mhi_mv3x_events),
  372. .event_cfg = mhi_mv3x_events,
  373. };
  374. static const struct mhi_pci_dev_info mhi_mv31_info = {
  375. .name = "cinterion-mv31",
  376. .config = &modem_mv3x_config,
  377. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  378. .dma_data_width = 32,
  379. .mru_default = 32768,
  380. };
  381. static const struct mhi_pci_dev_info mhi_mv32_info = {
  382. .name = "cinterion-mv32",
  383. .config = &modem_mv3x_config,
  384. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  385. .dma_data_width = 32,
  386. .mru_default = 32768,
  387. };
  388. static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
  389. MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
  390. MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
  391. MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
  392. MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
  393. MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
  394. MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
  395. MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
  396. MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
  397. MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
  398. MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
  399. MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
  400. MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
  401. };
  402. static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
  403. /* first ring is control+data and DIAG ring */
  404. MHI_EVENT_CONFIG_CTRL(0, 2048),
  405. /* Hardware channels request dedicated hardware event rings */
  406. MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
  407. MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
  408. };
  409. static const struct mhi_controller_config modem_sierra_em919x_config = {
  410. .max_channels = 128,
  411. .timeout_ms = 24000,
  412. .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
  413. .ch_cfg = mhi_sierra_em919x_channels,
  414. .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
  415. .event_cfg = modem_sierra_em919x_mhi_events,
  416. };
  417. static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
  418. .name = "sierra-em919x",
  419. .config = &modem_sierra_em919x_config,
  420. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  421. .dma_data_width = 32,
  422. .sideband_wake = false,
  423. };
  424. static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
  425. MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
  426. MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
  427. MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
  428. MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
  429. MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
  430. MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
  431. };
  432. static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
  433. MHI_EVENT_CONFIG_CTRL(0, 128),
  434. MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
  435. MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
  436. };
  437. static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
  438. .max_channels = 128,
  439. .timeout_ms = 20000,
  440. .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
  441. .ch_cfg = mhi_telit_fn980_hw_v1_channels,
  442. .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
  443. .event_cfg = mhi_telit_fn980_hw_v1_events,
  444. };
  445. static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
  446. .name = "telit-fn980-hwv1",
  447. .fw = "qcom/sdx55m/sbl1.mbn",
  448. .edl = "qcom/sdx55m/edl.mbn",
  449. .config = &modem_telit_fn980_hw_v1_config,
  450. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  451. .dma_data_width = 32,
  452. .mru_default = 32768,
  453. .sideband_wake = false,
  454. };
  455. static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
  456. MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
  457. MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
  458. MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
  459. MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
  460. MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
  461. MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
  462. MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
  463. MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
  464. MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
  465. MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
  466. MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
  467. MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
  468. };
  469. static struct mhi_event_config mhi_telit_fn990_events[] = {
  470. MHI_EVENT_CONFIG_CTRL(0, 128),
  471. MHI_EVENT_CONFIG_DATA(1, 128),
  472. MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
  473. MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
  474. };
  475. static const struct mhi_controller_config modem_telit_fn990_config = {
  476. .max_channels = 128,
  477. .timeout_ms = 20000,
  478. .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
  479. .ch_cfg = mhi_telit_fn990_channels,
  480. .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
  481. .event_cfg = mhi_telit_fn990_events,
  482. };
  483. static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
  484. .name = "telit-fn990",
  485. .config = &modem_telit_fn990_config,
  486. .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
  487. .dma_data_width = 32,
  488. .sideband_wake = false,
  489. .mru_default = 32768,
  490. };
  491. /* Keep the list sorted based on the PID. New VID should be added as the last entry */
  492. static const struct pci_device_id mhi_pci_id_table[] = {
  493. { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
  494. .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
  495. /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
  496. { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
  497. .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
  498. /* Telit FN980 hardware revision v1 */
  499. { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
  500. .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
  501. { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
  502. .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
  503. /* Telit FN990 */
  504. { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
  505. .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
  506. { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
  507. .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
  508. { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
  509. .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
  510. { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
  511. .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
  512. { PCI_DEVICE(0x1eac, 0x2001), /* EM120R-GL for FCCL (sdx24) */
  513. .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
  514. /* T99W175 (sdx55), Both for eSIM and Non-eSIM */
  515. { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
  516. .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
  517. /* DW5930e (sdx55), With eSIM, It's also T99W175 */
  518. { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
  519. .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
  520. /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
  521. { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
  522. .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
  523. /* T99W175 (sdx55), Based on Qualcomm new baseline */
  524. { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
  525. .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
  526. /* T99W175 (sdx55) */
  527. { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
  528. .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
  529. /* T99W368 (sdx65) */
  530. { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
  531. .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
  532. /* T99W373 (sdx62) */
  533. { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
  534. .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
  535. /* MV31-W (Cinterion) */
  536. { PCI_DEVICE(0x1269, 0x00b3),
  537. .driver_data = (kernel_ulong_t) &mhi_mv31_info },
  538. /* MV31-W (Cinterion), based on new baseline */
  539. { PCI_DEVICE(0x1269, 0x00b4),
  540. .driver_data = (kernel_ulong_t) &mhi_mv31_info },
  541. /* MV32-WA (Cinterion) */
  542. { PCI_DEVICE(0x1269, 0x00ba),
  543. .driver_data = (kernel_ulong_t) &mhi_mv32_info },
  544. /* MV32-WB (Cinterion) */
  545. { PCI_DEVICE(0x1269, 0x00bb),
  546. .driver_data = (kernel_ulong_t) &mhi_mv32_info },
  547. { }
  548. };
  549. MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
  550. enum mhi_pci_device_status {
  551. MHI_PCI_DEV_STARTED,
  552. MHI_PCI_DEV_SUSPENDED,
  553. };
  554. struct mhi_pci_device {
  555. struct mhi_controller mhi_cntrl;
  556. struct pci_saved_state *pci_state;
  557. struct work_struct recovery_work;
  558. struct timer_list health_check_timer;
  559. unsigned long status;
  560. };
  561. static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
  562. void __iomem *addr, u32 *out)
  563. {
  564. *out = readl(addr);
  565. return 0;
  566. }
  567. static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
  568. void __iomem *addr, u32 val)
  569. {
  570. writel(val, addr);
  571. }
  572. static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
  573. enum mhi_callback cb)
  574. {
  575. struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
  576. /* Nothing to do for now */
  577. switch (cb) {
  578. case MHI_CB_FATAL_ERROR:
  579. case MHI_CB_SYS_ERROR:
  580. dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
  581. pm_runtime_forbid(&pdev->dev);
  582. break;
  583. case MHI_CB_EE_MISSION_MODE:
  584. pm_runtime_allow(&pdev->dev);
  585. break;
  586. default:
  587. break;
  588. }
  589. }
  590. static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
  591. {
  592. /* no-op */
  593. }
  594. static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
  595. {
  596. /* no-op */
  597. }
  598. static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
  599. {
  600. /* no-op */
  601. }
  602. static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
  603. {
  604. struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
  605. u16 vendor = 0;
  606. if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
  607. return false;
  608. if (vendor == (u16) ~0 || vendor == 0)
  609. return false;
  610. return true;
  611. }
  612. static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
  613. unsigned int bar_num, u64 dma_mask)
  614. {
  615. struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
  616. int err;
  617. err = pci_assign_resource(pdev, bar_num);
  618. if (err)
  619. return err;
  620. err = pcim_enable_device(pdev);
  621. if (err) {
  622. dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
  623. return err;
  624. }
  625. err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
  626. if (err) {
  627. dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
  628. return err;
  629. }
  630. mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
  631. mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
  632. err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
  633. if (err) {
  634. dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
  635. return err;
  636. }
  637. pci_set_master(pdev);
  638. return 0;
  639. }
  640. static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
  641. const struct mhi_controller_config *mhi_cntrl_config)
  642. {
  643. struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
  644. int nr_vectors, i;
  645. int *irq;
  646. /*
  647. * Alloc one MSI vector for BHI + one vector per event ring, ideally...
  648. * No explicit pci_free_irq_vectors required, done by pcim_release.
  649. */
  650. mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
  651. nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
  652. if (nr_vectors < 0) {
  653. dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
  654. nr_vectors);
  655. return nr_vectors;
  656. }
  657. if (nr_vectors < mhi_cntrl->nr_irqs) {
  658. dev_warn(&pdev->dev, "using shared MSI\n");
  659. /* Patch msi vectors, use only one (shared) */
  660. for (i = 0; i < mhi_cntrl_config->num_events; i++)
  661. mhi_cntrl_config->event_cfg[i].irq = 0;
  662. mhi_cntrl->nr_irqs = 1;
  663. }
  664. irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
  665. if (!irq)
  666. return -ENOMEM;
  667. for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
  668. int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
  669. irq[i] = pci_irq_vector(pdev, vector);
  670. }
  671. mhi_cntrl->irq = irq;
  672. return 0;
  673. }
  674. static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
  675. {
  676. /* The runtime_get() MHI callback means:
  677. * Do whatever is requested to leave M3.
  678. */
  679. return pm_runtime_get(mhi_cntrl->cntrl_dev);
  680. }
  681. static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
  682. {
  683. /* The runtime_put() MHI callback means:
  684. * Device can be moved in M3 state.
  685. */
  686. pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
  687. pm_runtime_put(mhi_cntrl->cntrl_dev);
  688. }
  689. static void mhi_pci_recovery_work(struct work_struct *work)
  690. {
  691. struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
  692. recovery_work);
  693. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  694. struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
  695. int err;
  696. dev_warn(&pdev->dev, "device recovery started\n");
  697. del_timer(&mhi_pdev->health_check_timer);
  698. pm_runtime_forbid(&pdev->dev);
  699. /* Clean up MHI state */
  700. if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
  701. mhi_power_down(mhi_cntrl, false);
  702. mhi_unprepare_after_power_down(mhi_cntrl);
  703. }
  704. pci_set_power_state(pdev, PCI_D0);
  705. pci_load_saved_state(pdev, mhi_pdev->pci_state);
  706. pci_restore_state(pdev);
  707. if (!mhi_pci_is_alive(mhi_cntrl))
  708. goto err_try_reset;
  709. err = mhi_prepare_for_power_up(mhi_cntrl);
  710. if (err)
  711. goto err_try_reset;
  712. err = mhi_sync_power_up(mhi_cntrl);
  713. if (err)
  714. goto err_unprepare;
  715. dev_dbg(&pdev->dev, "Recovery completed\n");
  716. set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
  717. mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
  718. return;
  719. err_unprepare:
  720. mhi_unprepare_after_power_down(mhi_cntrl);
  721. err_try_reset:
  722. if (pci_reset_function(pdev))
  723. dev_err(&pdev->dev, "Recovery failed\n");
  724. }
  725. static void health_check(struct timer_list *t)
  726. {
  727. struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
  728. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  729. if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
  730. test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
  731. return;
  732. if (!mhi_pci_is_alive(mhi_cntrl)) {
  733. dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
  734. queue_work(system_long_wq, &mhi_pdev->recovery_work);
  735. return;
  736. }
  737. /* reschedule in two seconds */
  738. mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
  739. }
  740. static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  741. {
  742. const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
  743. const struct mhi_controller_config *mhi_cntrl_config;
  744. struct mhi_pci_device *mhi_pdev;
  745. struct mhi_controller *mhi_cntrl;
  746. int err;
  747. dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
  748. /* mhi_pdev.mhi_cntrl must be zero-initialized */
  749. mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
  750. if (!mhi_pdev)
  751. return -ENOMEM;
  752. INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
  753. timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
  754. mhi_cntrl_config = info->config;
  755. mhi_cntrl = &mhi_pdev->mhi_cntrl;
  756. mhi_cntrl->cntrl_dev = &pdev->dev;
  757. mhi_cntrl->iova_start = 0;
  758. mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
  759. mhi_cntrl->fw_image = info->fw;
  760. mhi_cntrl->edl_image = info->edl;
  761. mhi_cntrl->read_reg = mhi_pci_read_reg;
  762. mhi_cntrl->write_reg = mhi_pci_write_reg;
  763. mhi_cntrl->status_cb = mhi_pci_status_cb;
  764. mhi_cntrl->runtime_get = mhi_pci_runtime_get;
  765. mhi_cntrl->runtime_put = mhi_pci_runtime_put;
  766. mhi_cntrl->mru = info->mru_default;
  767. if (info->sideband_wake) {
  768. mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
  769. mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
  770. mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
  771. }
  772. err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
  773. if (err)
  774. return err;
  775. err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
  776. if (err)
  777. return err;
  778. pci_set_drvdata(pdev, mhi_pdev);
  779. /* Have stored pci confspace at hand for restore in sudden PCI error.
  780. * cache the state locally and discard the PCI core one.
  781. */
  782. pci_save_state(pdev);
  783. mhi_pdev->pci_state = pci_store_saved_state(pdev);
  784. pci_load_saved_state(pdev, NULL);
  785. pci_enable_pcie_error_reporting(pdev);
  786. err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
  787. if (err)
  788. goto err_disable_reporting;
  789. /* MHI bus does not power up the controller by default */
  790. err = mhi_prepare_for_power_up(mhi_cntrl);
  791. if (err) {
  792. dev_err(&pdev->dev, "failed to prepare MHI controller\n");
  793. goto err_unregister;
  794. }
  795. err = mhi_sync_power_up(mhi_cntrl);
  796. if (err) {
  797. dev_err(&pdev->dev, "failed to power up MHI controller\n");
  798. goto err_unprepare;
  799. }
  800. set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
  801. /* start health check */
  802. mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
  803. /* Only allow runtime-suspend if PME capable (for wakeup) */
  804. if (pci_pme_capable(pdev, PCI_D3hot)) {
  805. pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
  806. pm_runtime_use_autosuspend(&pdev->dev);
  807. pm_runtime_mark_last_busy(&pdev->dev);
  808. pm_runtime_put_noidle(&pdev->dev);
  809. }
  810. return 0;
  811. err_unprepare:
  812. mhi_unprepare_after_power_down(mhi_cntrl);
  813. err_unregister:
  814. mhi_unregister_controller(mhi_cntrl);
  815. err_disable_reporting:
  816. pci_disable_pcie_error_reporting(pdev);
  817. return err;
  818. }
  819. static void mhi_pci_remove(struct pci_dev *pdev)
  820. {
  821. struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
  822. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  823. del_timer_sync(&mhi_pdev->health_check_timer);
  824. cancel_work_sync(&mhi_pdev->recovery_work);
  825. if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
  826. mhi_power_down(mhi_cntrl, true);
  827. mhi_unprepare_after_power_down(mhi_cntrl);
  828. }
  829. /* balancing probe put_noidle */
  830. if (pci_pme_capable(pdev, PCI_D3hot))
  831. pm_runtime_get_noresume(&pdev->dev);
  832. mhi_unregister_controller(mhi_cntrl);
  833. pci_disable_pcie_error_reporting(pdev);
  834. }
  835. static void mhi_pci_shutdown(struct pci_dev *pdev)
  836. {
  837. mhi_pci_remove(pdev);
  838. pci_set_power_state(pdev, PCI_D3hot);
  839. }
  840. static void mhi_pci_reset_prepare(struct pci_dev *pdev)
  841. {
  842. struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
  843. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  844. dev_info(&pdev->dev, "reset\n");
  845. del_timer(&mhi_pdev->health_check_timer);
  846. /* Clean up MHI state */
  847. if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
  848. mhi_power_down(mhi_cntrl, false);
  849. mhi_unprepare_after_power_down(mhi_cntrl);
  850. }
  851. /* cause internal device reset */
  852. mhi_soc_reset(mhi_cntrl);
  853. /* Be sure device reset has been executed */
  854. msleep(MHI_POST_RESET_DELAY_MS);
  855. }
  856. static void mhi_pci_reset_done(struct pci_dev *pdev)
  857. {
  858. struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
  859. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  860. int err;
  861. /* Restore initial known working PCI state */
  862. pci_load_saved_state(pdev, mhi_pdev->pci_state);
  863. pci_restore_state(pdev);
  864. /* Is device status available ? */
  865. if (!mhi_pci_is_alive(mhi_cntrl)) {
  866. dev_err(&pdev->dev, "reset failed\n");
  867. return;
  868. }
  869. err = mhi_prepare_for_power_up(mhi_cntrl);
  870. if (err) {
  871. dev_err(&pdev->dev, "failed to prepare MHI controller\n");
  872. return;
  873. }
  874. err = mhi_sync_power_up(mhi_cntrl);
  875. if (err) {
  876. dev_err(&pdev->dev, "failed to power up MHI controller\n");
  877. mhi_unprepare_after_power_down(mhi_cntrl);
  878. return;
  879. }
  880. set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
  881. mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
  882. }
  883. static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
  884. pci_channel_state_t state)
  885. {
  886. struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
  887. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  888. dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
  889. if (state == pci_channel_io_perm_failure)
  890. return PCI_ERS_RESULT_DISCONNECT;
  891. /* Clean up MHI state */
  892. if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
  893. mhi_power_down(mhi_cntrl, false);
  894. mhi_unprepare_after_power_down(mhi_cntrl);
  895. } else {
  896. /* Nothing to do */
  897. return PCI_ERS_RESULT_RECOVERED;
  898. }
  899. pci_disable_device(pdev);
  900. return PCI_ERS_RESULT_NEED_RESET;
  901. }
  902. static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
  903. {
  904. if (pci_enable_device(pdev)) {
  905. dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
  906. return PCI_ERS_RESULT_DISCONNECT;
  907. }
  908. return PCI_ERS_RESULT_RECOVERED;
  909. }
  910. static void mhi_pci_io_resume(struct pci_dev *pdev)
  911. {
  912. struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
  913. dev_err(&pdev->dev, "PCI slot reset done\n");
  914. queue_work(system_long_wq, &mhi_pdev->recovery_work);
  915. }
  916. static const struct pci_error_handlers mhi_pci_err_handler = {
  917. .error_detected = mhi_pci_error_detected,
  918. .slot_reset = mhi_pci_slot_reset,
  919. .resume = mhi_pci_io_resume,
  920. .reset_prepare = mhi_pci_reset_prepare,
  921. .reset_done = mhi_pci_reset_done,
  922. };
  923. static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
  924. {
  925. struct pci_dev *pdev = to_pci_dev(dev);
  926. struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
  927. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  928. int err;
  929. if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
  930. return 0;
  931. del_timer(&mhi_pdev->health_check_timer);
  932. cancel_work_sync(&mhi_pdev->recovery_work);
  933. if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
  934. mhi_cntrl->ee != MHI_EE_AMSS)
  935. goto pci_suspend; /* Nothing to do at MHI level */
  936. /* Transition to M3 state */
  937. err = mhi_pm_suspend(mhi_cntrl);
  938. if (err) {
  939. dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
  940. clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
  941. return -EBUSY;
  942. }
  943. pci_suspend:
  944. pci_disable_device(pdev);
  945. pci_wake_from_d3(pdev, true);
  946. return 0;
  947. }
  948. static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
  949. {
  950. struct pci_dev *pdev = to_pci_dev(dev);
  951. struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
  952. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  953. int err;
  954. if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
  955. return 0;
  956. err = pci_enable_device(pdev);
  957. if (err)
  958. goto err_recovery;
  959. pci_set_master(pdev);
  960. pci_wake_from_d3(pdev, false);
  961. if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
  962. mhi_cntrl->ee != MHI_EE_AMSS)
  963. return 0; /* Nothing to do at MHI level */
  964. /* Exit M3, transition to M0 state */
  965. err = mhi_pm_resume(mhi_cntrl);
  966. if (err) {
  967. dev_err(&pdev->dev, "failed to resume device: %d\n", err);
  968. goto err_recovery;
  969. }
  970. /* Resume health check */
  971. mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
  972. /* It can be a remote wakeup (no mhi runtime_get), update access time */
  973. pm_runtime_mark_last_busy(dev);
  974. return 0;
  975. err_recovery:
  976. /* Do not fail to not mess up our PCI device state, the device likely
  977. * lost power (d3cold) and we simply need to reset it from the recovery
  978. * procedure, trigger the recovery asynchronously to prevent system
  979. * suspend exit delaying.
  980. */
  981. queue_work(system_long_wq, &mhi_pdev->recovery_work);
  982. pm_runtime_mark_last_busy(dev);
  983. return 0;
  984. }
  985. static int __maybe_unused mhi_pci_suspend(struct device *dev)
  986. {
  987. pm_runtime_disable(dev);
  988. return mhi_pci_runtime_suspend(dev);
  989. }
  990. static int __maybe_unused mhi_pci_resume(struct device *dev)
  991. {
  992. int ret;
  993. /* Depending the platform, device may have lost power (d3cold), we need
  994. * to resume it now to check its state and recover when necessary.
  995. */
  996. ret = mhi_pci_runtime_resume(dev);
  997. pm_runtime_enable(dev);
  998. return ret;
  999. }
  1000. static int __maybe_unused mhi_pci_freeze(struct device *dev)
  1001. {
  1002. struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
  1003. struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
  1004. /* We want to stop all operations, hibernation does not guarantee that
  1005. * device will be in the same state as before freezing, especially if
  1006. * the intermediate restore kernel reinitializes MHI device with new
  1007. * context.
  1008. */
  1009. flush_work(&mhi_pdev->recovery_work);
  1010. if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
  1011. mhi_power_down(mhi_cntrl, true);
  1012. mhi_unprepare_after_power_down(mhi_cntrl);
  1013. }
  1014. return 0;
  1015. }
  1016. static int __maybe_unused mhi_pci_restore(struct device *dev)
  1017. {
  1018. struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
  1019. /* Reinitialize the device */
  1020. queue_work(system_long_wq, &mhi_pdev->recovery_work);
  1021. return 0;
  1022. }
  1023. static const struct dev_pm_ops mhi_pci_pm_ops = {
  1024. SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
  1025. #ifdef CONFIG_PM_SLEEP
  1026. .suspend = mhi_pci_suspend,
  1027. .resume = mhi_pci_resume,
  1028. .freeze = mhi_pci_freeze,
  1029. .thaw = mhi_pci_restore,
  1030. .poweroff = mhi_pci_freeze,
  1031. .restore = mhi_pci_restore,
  1032. #endif
  1033. };
  1034. static struct pci_driver mhi_pci_driver = {
  1035. .name = "mhi-pci-generic",
  1036. .id_table = mhi_pci_id_table,
  1037. .probe = mhi_pci_probe,
  1038. .remove = mhi_pci_remove,
  1039. .shutdown = mhi_pci_shutdown,
  1040. .err_handler = &mhi_pci_err_handler,
  1041. .driver.pm = &mhi_pci_pm_ops
  1042. };
  1043. module_pci_driver(mhi_pci_driver);
  1044. MODULE_AUTHOR("Loic Poulain <[email protected]>");
  1045. MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
  1046. MODULE_LICENSE("GPL");