hw-me.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
  4. * Intel Management Engine Interface (Intel MEI) Linux driver
  5. */
  6. #include <linux/pci.h>
  7. #include <linux/kthread.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/pm_runtime.h>
  10. #include <linux/sizes.h>
  11. #include <linux/delay.h>
  12. #include "mei_dev.h"
  13. #include "hbm.h"
  14. #include "hw-me.h"
  15. #include "hw-me-regs.h"
  16. #include "mei-trace.h"
  17. /**
  18. * mei_me_reg_read - Reads 32bit data from the mei device
  19. *
  20. * @hw: the me hardware structure
  21. * @offset: offset from which to read the data
  22. *
  23. * Return: register value (u32)
  24. */
  25. static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
  26. unsigned long offset)
  27. {
  28. return ioread32(hw->mem_addr + offset);
  29. }
  30. /**
  31. * mei_me_reg_write - Writes 32bit data to the mei device
  32. *
  33. * @hw: the me hardware structure
  34. * @offset: offset from which to write the data
  35. * @value: register value to write (u32)
  36. */
  37. static inline void mei_me_reg_write(const struct mei_me_hw *hw,
  38. unsigned long offset, u32 value)
  39. {
  40. iowrite32(value, hw->mem_addr + offset);
  41. }
  42. /**
  43. * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
  44. * read window register
  45. *
  46. * @dev: the device structure
  47. *
  48. * Return: ME_CB_RW register value (u32)
  49. */
  50. static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
  51. {
  52. return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
  53. }
  54. /**
  55. * mei_me_hcbww_write - write 32bit data to the host circular buffer
  56. *
  57. * @dev: the device structure
  58. * @data: 32bit data to be written to the host circular buffer
  59. */
  60. static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
  61. {
  62. mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
  63. }
  64. /**
  65. * mei_me_mecsr_read - Reads 32bit data from the ME CSR
  66. *
  67. * @dev: the device structure
  68. *
  69. * Return: ME_CSR_HA register value (u32)
  70. */
  71. static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
  72. {
  73. u32 reg;
  74. reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
  75. trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
  76. return reg;
  77. }
  78. /**
  79. * mei_hcsr_read - Reads 32bit data from the host CSR
  80. *
  81. * @dev: the device structure
  82. *
  83. * Return: H_CSR register value (u32)
  84. */
  85. static inline u32 mei_hcsr_read(const struct mei_device *dev)
  86. {
  87. u32 reg;
  88. reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
  89. trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
  90. return reg;
  91. }
  92. /**
  93. * mei_hcsr_write - writes H_CSR register to the mei device
  94. *
  95. * @dev: the device structure
  96. * @reg: new register value
  97. */
  98. static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
  99. {
  100. trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
  101. mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
  102. }
  103. /**
  104. * mei_hcsr_set - writes H_CSR register to the mei device,
  105. * and ignores the H_IS bit for it is write-one-to-zero.
  106. *
  107. * @dev: the device structure
  108. * @reg: new register value
  109. */
  110. static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
  111. {
  112. reg &= ~H_CSR_IS_MASK;
  113. mei_hcsr_write(dev, reg);
  114. }
  115. /**
  116. * mei_hcsr_set_hig - set host interrupt (set H_IG)
  117. *
  118. * @dev: the device structure
  119. */
  120. static inline void mei_hcsr_set_hig(struct mei_device *dev)
  121. {
  122. u32 hcsr;
  123. hcsr = mei_hcsr_read(dev) | H_IG;
  124. mei_hcsr_set(dev, hcsr);
  125. }
  126. /**
  127. * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
  128. *
  129. * @dev: the device structure
  130. *
  131. * Return: H_D0I3C register value (u32)
  132. */
  133. static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
  134. {
  135. u32 reg;
  136. reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
  137. trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
  138. return reg;
  139. }
  140. /**
  141. * mei_me_d0i3c_write - writes H_D0I3C register to device
  142. *
  143. * @dev: the device structure
  144. * @reg: new register value
  145. */
  146. static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
  147. {
  148. trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
  149. mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
  150. }
  151. /**
  152. * mei_me_trc_status - read trc status register
  153. *
  154. * @dev: mei device
  155. * @trc: trc status register value
  156. *
  157. * Return: 0 on success, error otherwise
  158. */
  159. static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
  160. {
  161. struct mei_me_hw *hw = to_me_hw(dev);
  162. if (!hw->cfg->hw_trc_supported)
  163. return -EOPNOTSUPP;
  164. *trc = mei_me_reg_read(hw, ME_TRC);
  165. trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
  166. return 0;
  167. }
  168. /**
  169. * mei_me_fw_status - read fw status register from pci config space
  170. *
  171. * @dev: mei device
  172. * @fw_status: fw status register values
  173. *
  174. * Return: 0 on success, error otherwise
  175. */
  176. static int mei_me_fw_status(struct mei_device *dev,
  177. struct mei_fw_status *fw_status)
  178. {
  179. struct mei_me_hw *hw = to_me_hw(dev);
  180. const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
  181. int ret;
  182. int i;
  183. if (!fw_status || !hw->read_fws)
  184. return -EINVAL;
  185. fw_status->count = fw_src->count;
  186. for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
  187. ret = hw->read_fws(dev, fw_src->status[i],
  188. &fw_status->status[i]);
  189. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
  190. fw_src->status[i],
  191. fw_status->status[i]);
  192. if (ret)
  193. return ret;
  194. }
  195. return 0;
  196. }
  197. /**
  198. * mei_me_hw_config - configure hw dependent settings
  199. *
  200. * @dev: mei device
  201. *
  202. * Return:
  203. * * -EINVAL when read_fws is not set
  204. * * 0 on success
  205. *
  206. */
  207. static int mei_me_hw_config(struct mei_device *dev)
  208. {
  209. struct mei_me_hw *hw = to_me_hw(dev);
  210. u32 hcsr, reg;
  211. if (WARN_ON(!hw->read_fws))
  212. return -EINVAL;
  213. /* Doesn't change in runtime */
  214. hcsr = mei_hcsr_read(dev);
  215. hw->hbuf_depth = (hcsr & H_CBD) >> 24;
  216. reg = 0;
  217. hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
  218. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  219. hw->d0i3_supported =
  220. ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
  221. hw->pg_state = MEI_PG_OFF;
  222. if (hw->d0i3_supported) {
  223. reg = mei_me_d0i3c_read(dev);
  224. if (reg & H_D0I3C_I3)
  225. hw->pg_state = MEI_PG_ON;
  226. }
  227. return 0;
  228. }
  229. /**
  230. * mei_me_pg_state - translate internal pg state
  231. * to the mei power gating state
  232. *
  233. * @dev: mei device
  234. *
  235. * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
  236. */
  237. static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
  238. {
  239. struct mei_me_hw *hw = to_me_hw(dev);
  240. return hw->pg_state;
  241. }
  242. static inline u32 me_intr_src(u32 hcsr)
  243. {
  244. return hcsr & H_CSR_IS_MASK;
  245. }
  246. /**
  247. * me_intr_disable - disables mei device interrupts
  248. * using supplied hcsr register value.
  249. *
  250. * @dev: the device structure
  251. * @hcsr: supplied hcsr register value
  252. */
  253. static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
  254. {
  255. hcsr &= ~H_CSR_IE_MASK;
  256. mei_hcsr_set(dev, hcsr);
  257. }
  258. /**
  259. * me_intr_clear - clear and stop interrupts
  260. *
  261. * @dev: the device structure
  262. * @hcsr: supplied hcsr register value
  263. */
  264. static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
  265. {
  266. if (me_intr_src(hcsr))
  267. mei_hcsr_write(dev, hcsr);
  268. }
  269. /**
  270. * mei_me_intr_clear - clear and stop interrupts
  271. *
  272. * @dev: the device structure
  273. */
  274. static void mei_me_intr_clear(struct mei_device *dev)
  275. {
  276. u32 hcsr = mei_hcsr_read(dev);
  277. me_intr_clear(dev, hcsr);
  278. }
  279. /**
  280. * mei_me_intr_enable - enables mei device interrupts
  281. *
  282. * @dev: the device structure
  283. */
  284. static void mei_me_intr_enable(struct mei_device *dev)
  285. {
  286. u32 hcsr;
  287. if (mei_me_hw_use_polling(to_me_hw(dev)))
  288. return;
  289. hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
  290. mei_hcsr_set(dev, hcsr);
  291. }
  292. /**
  293. * mei_me_intr_disable - disables mei device interrupts
  294. *
  295. * @dev: the device structure
  296. */
  297. static void mei_me_intr_disable(struct mei_device *dev)
  298. {
  299. u32 hcsr = mei_hcsr_read(dev);
  300. me_intr_disable(dev, hcsr);
  301. }
  302. /**
  303. * mei_me_synchronize_irq - wait for pending IRQ handlers
  304. *
  305. * @dev: the device structure
  306. */
  307. static void mei_me_synchronize_irq(struct mei_device *dev)
  308. {
  309. struct mei_me_hw *hw = to_me_hw(dev);
  310. if (mei_me_hw_use_polling(hw))
  311. return;
  312. synchronize_irq(hw->irq);
  313. }
  314. /**
  315. * mei_me_hw_reset_release - release device from the reset
  316. *
  317. * @dev: the device structure
  318. */
  319. static void mei_me_hw_reset_release(struct mei_device *dev)
  320. {
  321. u32 hcsr = mei_hcsr_read(dev);
  322. hcsr |= H_IG;
  323. hcsr &= ~H_RST;
  324. mei_hcsr_set(dev, hcsr);
  325. }
  326. /**
  327. * mei_me_host_set_ready - enable device
  328. *
  329. * @dev: mei device
  330. */
  331. static void mei_me_host_set_ready(struct mei_device *dev)
  332. {
  333. u32 hcsr = mei_hcsr_read(dev);
  334. if (!mei_me_hw_use_polling(to_me_hw(dev)))
  335. hcsr |= H_CSR_IE_MASK;
  336. hcsr |= H_IG | H_RDY;
  337. mei_hcsr_set(dev, hcsr);
  338. }
  339. /**
  340. * mei_me_host_is_ready - check whether the host has turned ready
  341. *
  342. * @dev: mei device
  343. * Return: bool
  344. */
  345. static bool mei_me_host_is_ready(struct mei_device *dev)
  346. {
  347. u32 hcsr = mei_hcsr_read(dev);
  348. return (hcsr & H_RDY) == H_RDY;
  349. }
  350. /**
  351. * mei_me_hw_is_ready - check whether the me(hw) has turned ready
  352. *
  353. * @dev: mei device
  354. * Return: bool
  355. */
  356. static bool mei_me_hw_is_ready(struct mei_device *dev)
  357. {
  358. u32 mecsr = mei_me_mecsr_read(dev);
  359. return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
  360. }
  361. /**
  362. * mei_me_hw_is_resetting - check whether the me(hw) is in reset
  363. *
  364. * @dev: mei device
  365. * Return: bool
  366. */
  367. static bool mei_me_hw_is_resetting(struct mei_device *dev)
  368. {
  369. u32 mecsr = mei_me_mecsr_read(dev);
  370. return (mecsr & ME_RST_HRA) == ME_RST_HRA;
  371. }
  372. /**
  373. * mei_gsc_pxp_check - check for gsc firmware entering pxp mode
  374. *
  375. * @dev: the device structure
  376. */
  377. static void mei_gsc_pxp_check(struct mei_device *dev)
  378. {
  379. struct mei_me_hw *hw = to_me_hw(dev);
  380. u32 fwsts5 = 0;
  381. if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
  382. return;
  383. hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
  384. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
  385. if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
  386. dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
  387. dev->pxp_mode = MEI_DEV_PXP_READY;
  388. } else {
  389. dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
  390. }
  391. }
  392. /**
  393. * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
  394. * or timeout is reached
  395. *
  396. * @dev: mei device
  397. * Return: 0 on success, error otherwise
  398. */
  399. static int mei_me_hw_ready_wait(struct mei_device *dev)
  400. {
  401. mutex_unlock(&dev->device_lock);
  402. wait_event_timeout(dev->wait_hw_ready,
  403. dev->recvd_hw_ready,
  404. dev->timeouts.hw_ready);
  405. mutex_lock(&dev->device_lock);
  406. if (!dev->recvd_hw_ready) {
  407. dev_err(dev->dev, "wait hw ready failed\n");
  408. return -ETIME;
  409. }
  410. mei_gsc_pxp_check(dev);
  411. mei_me_hw_reset_release(dev);
  412. dev->recvd_hw_ready = false;
  413. return 0;
  414. }
  415. /**
  416. * mei_me_hw_start - hw start routine
  417. *
  418. * @dev: mei device
  419. * Return: 0 on success, error otherwise
  420. */
  421. static int mei_me_hw_start(struct mei_device *dev)
  422. {
  423. int ret = mei_me_hw_ready_wait(dev);
  424. if (ret)
  425. return ret;
  426. dev_dbg(dev->dev, "hw is ready\n");
  427. mei_me_host_set_ready(dev);
  428. return ret;
  429. }
  430. /**
  431. * mei_hbuf_filled_slots - gets number of device filled buffer slots
  432. *
  433. * @dev: the device structure
  434. *
  435. * Return: number of filled slots
  436. */
  437. static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
  438. {
  439. u32 hcsr;
  440. char read_ptr, write_ptr;
  441. hcsr = mei_hcsr_read(dev);
  442. read_ptr = (char) ((hcsr & H_CBRP) >> 8);
  443. write_ptr = (char) ((hcsr & H_CBWP) >> 16);
  444. return (unsigned char) (write_ptr - read_ptr);
  445. }
  446. /**
  447. * mei_me_hbuf_is_empty - checks if host buffer is empty.
  448. *
  449. * @dev: the device structure
  450. *
  451. * Return: true if empty, false - otherwise.
  452. */
  453. static bool mei_me_hbuf_is_empty(struct mei_device *dev)
  454. {
  455. return mei_hbuf_filled_slots(dev) == 0;
  456. }
  457. /**
  458. * mei_me_hbuf_empty_slots - counts write empty slots.
  459. *
  460. * @dev: the device structure
  461. *
  462. * Return: -EOVERFLOW if overflow, otherwise empty slots count
  463. */
  464. static int mei_me_hbuf_empty_slots(struct mei_device *dev)
  465. {
  466. struct mei_me_hw *hw = to_me_hw(dev);
  467. unsigned char filled_slots, empty_slots;
  468. filled_slots = mei_hbuf_filled_slots(dev);
  469. empty_slots = hw->hbuf_depth - filled_slots;
  470. /* check for overflow */
  471. if (filled_slots > hw->hbuf_depth)
  472. return -EOVERFLOW;
  473. return empty_slots;
  474. }
  475. /**
  476. * mei_me_hbuf_depth - returns depth of the hw buffer.
  477. *
  478. * @dev: the device structure
  479. *
  480. * Return: size of hw buffer in slots
  481. */
  482. static u32 mei_me_hbuf_depth(const struct mei_device *dev)
  483. {
  484. struct mei_me_hw *hw = to_me_hw(dev);
  485. return hw->hbuf_depth;
  486. }
  487. /**
  488. * mei_me_hbuf_write - writes a message to host hw buffer.
  489. *
  490. * @dev: the device structure
  491. * @hdr: header of message
  492. * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
  493. * @data: payload
  494. * @data_len: payload length in bytes
  495. *
  496. * Return: 0 if success, < 0 - otherwise.
  497. */
  498. static int mei_me_hbuf_write(struct mei_device *dev,
  499. const void *hdr, size_t hdr_len,
  500. const void *data, size_t data_len)
  501. {
  502. unsigned long rem;
  503. unsigned long i;
  504. const u32 *reg_buf;
  505. u32 dw_cnt;
  506. int empty_slots;
  507. if (WARN_ON(!hdr || !data || hdr_len & 0x3))
  508. return -EINVAL;
  509. dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
  510. empty_slots = mei_hbuf_empty_slots(dev);
  511. dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots);
  512. if (empty_slots < 0)
  513. return -EOVERFLOW;
  514. dw_cnt = mei_data2slots(hdr_len + data_len);
  515. if (dw_cnt > (u32)empty_slots)
  516. return -EMSGSIZE;
  517. reg_buf = hdr;
  518. for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
  519. mei_me_hcbww_write(dev, reg_buf[i]);
  520. reg_buf = data;
  521. for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
  522. mei_me_hcbww_write(dev, reg_buf[i]);
  523. rem = data_len & 0x3;
  524. if (rem > 0) {
  525. u32 reg = 0;
  526. memcpy(&reg, (const u8 *)data + data_len - rem, rem);
  527. mei_me_hcbww_write(dev, reg);
  528. }
  529. mei_hcsr_set_hig(dev);
  530. if (!mei_me_hw_is_ready(dev))
  531. return -EIO;
  532. return 0;
  533. }
  534. /**
  535. * mei_me_count_full_read_slots - counts read full slots.
  536. *
  537. * @dev: the device structure
  538. *
  539. * Return: -EOVERFLOW if overflow, otherwise filled slots count
  540. */
  541. static int mei_me_count_full_read_slots(struct mei_device *dev)
  542. {
  543. u32 me_csr;
  544. char read_ptr, write_ptr;
  545. unsigned char buffer_depth, filled_slots;
  546. me_csr = mei_me_mecsr_read(dev);
  547. buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
  548. read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
  549. write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
  550. filled_slots = (unsigned char) (write_ptr - read_ptr);
  551. /* check for overflow */
  552. if (filled_slots > buffer_depth)
  553. return -EOVERFLOW;
  554. dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
  555. return (int)filled_slots;
  556. }
  557. /**
  558. * mei_me_read_slots - reads a message from mei device.
  559. *
  560. * @dev: the device structure
  561. * @buffer: message buffer will be written
  562. * @buffer_length: message size will be read
  563. *
  564. * Return: always 0
  565. */
  566. static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
  567. unsigned long buffer_length)
  568. {
  569. u32 *reg_buf = (u32 *)buffer;
  570. for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
  571. *reg_buf++ = mei_me_mecbrw_read(dev);
  572. if (buffer_length > 0) {
  573. u32 reg = mei_me_mecbrw_read(dev);
  574. memcpy(reg_buf, &reg, buffer_length);
  575. }
  576. mei_hcsr_set_hig(dev);
  577. return 0;
  578. }
  579. /**
  580. * mei_me_pg_set - write pg enter register
  581. *
  582. * @dev: the device structure
  583. */
  584. static void mei_me_pg_set(struct mei_device *dev)
  585. {
  586. struct mei_me_hw *hw = to_me_hw(dev);
  587. u32 reg;
  588. reg = mei_me_reg_read(hw, H_HPG_CSR);
  589. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  590. reg |= H_HPG_CSR_PGI;
  591. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  592. mei_me_reg_write(hw, H_HPG_CSR, reg);
  593. }
  594. /**
  595. * mei_me_pg_unset - write pg exit register
  596. *
  597. * @dev: the device structure
  598. */
  599. static void mei_me_pg_unset(struct mei_device *dev)
  600. {
  601. struct mei_me_hw *hw = to_me_hw(dev);
  602. u32 reg;
  603. reg = mei_me_reg_read(hw, H_HPG_CSR);
  604. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  605. WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
  606. reg |= H_HPG_CSR_PGIHEXR;
  607. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  608. mei_me_reg_write(hw, H_HPG_CSR, reg);
  609. }
  610. /**
  611. * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
  612. *
  613. * @dev: the device structure
  614. *
  615. * Return: 0 on success an error code otherwise
  616. */
  617. static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
  618. {
  619. struct mei_me_hw *hw = to_me_hw(dev);
  620. int ret;
  621. dev->pg_event = MEI_PG_EVENT_WAIT;
  622. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  623. if (ret)
  624. return ret;
  625. mutex_unlock(&dev->device_lock);
  626. wait_event_timeout(dev->wait_pg,
  627. dev->pg_event == MEI_PG_EVENT_RECEIVED,
  628. dev->timeouts.pgi);
  629. mutex_lock(&dev->device_lock);
  630. if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
  631. mei_me_pg_set(dev);
  632. ret = 0;
  633. } else {
  634. ret = -ETIME;
  635. }
  636. dev->pg_event = MEI_PG_EVENT_IDLE;
  637. hw->pg_state = MEI_PG_ON;
  638. return ret;
  639. }
  640. /**
  641. * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
  642. *
  643. * @dev: the device structure
  644. *
  645. * Return: 0 on success an error code otherwise
  646. */
  647. static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
  648. {
  649. struct mei_me_hw *hw = to_me_hw(dev);
  650. int ret;
  651. if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
  652. goto reply;
  653. dev->pg_event = MEI_PG_EVENT_WAIT;
  654. mei_me_pg_unset(dev);
  655. mutex_unlock(&dev->device_lock);
  656. wait_event_timeout(dev->wait_pg,
  657. dev->pg_event == MEI_PG_EVENT_RECEIVED,
  658. dev->timeouts.pgi);
  659. mutex_lock(&dev->device_lock);
  660. reply:
  661. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  662. ret = -ETIME;
  663. goto out;
  664. }
  665. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  666. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
  667. if (ret)
  668. return ret;
  669. mutex_unlock(&dev->device_lock);
  670. wait_event_timeout(dev->wait_pg,
  671. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
  672. dev->timeouts.pgi);
  673. mutex_lock(&dev->device_lock);
  674. if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
  675. ret = 0;
  676. else
  677. ret = -ETIME;
  678. out:
  679. dev->pg_event = MEI_PG_EVENT_IDLE;
  680. hw->pg_state = MEI_PG_OFF;
  681. return ret;
  682. }
  683. /**
  684. * mei_me_pg_in_transition - is device now in pg transition
  685. *
  686. * @dev: the device structure
  687. *
  688. * Return: true if in pg transition, false otherwise
  689. */
  690. static bool mei_me_pg_in_transition(struct mei_device *dev)
  691. {
  692. return dev->pg_event >= MEI_PG_EVENT_WAIT &&
  693. dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
  694. }
  695. /**
  696. * mei_me_pg_is_enabled - detect if PG is supported by HW
  697. *
  698. * @dev: the device structure
  699. *
  700. * Return: true is pg supported, false otherwise
  701. */
  702. static bool mei_me_pg_is_enabled(struct mei_device *dev)
  703. {
  704. struct mei_me_hw *hw = to_me_hw(dev);
  705. u32 reg = mei_me_mecsr_read(dev);
  706. if (hw->d0i3_supported)
  707. return true;
  708. if ((reg & ME_PGIC_HRA) == 0)
  709. goto notsupported;
  710. if (!dev->hbm_f_pg_supported)
  711. goto notsupported;
  712. return true;
  713. notsupported:
  714. dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
  715. hw->d0i3_supported,
  716. !!(reg & ME_PGIC_HRA),
  717. dev->version.major_version,
  718. dev->version.minor_version,
  719. HBM_MAJOR_VERSION_PGI,
  720. HBM_MINOR_VERSION_PGI);
  721. return false;
  722. }
  723. /**
  724. * mei_me_d0i3_set - write d0i3 register bit on mei device.
  725. *
  726. * @dev: the device structure
  727. * @intr: ask for interrupt
  728. *
  729. * Return: D0I3C register value
  730. */
  731. static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
  732. {
  733. u32 reg = mei_me_d0i3c_read(dev);
  734. reg |= H_D0I3C_I3;
  735. if (intr)
  736. reg |= H_D0I3C_IR;
  737. else
  738. reg &= ~H_D0I3C_IR;
  739. mei_me_d0i3c_write(dev, reg);
  740. /* read it to ensure HW consistency */
  741. reg = mei_me_d0i3c_read(dev);
  742. return reg;
  743. }
  744. /**
  745. * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
  746. *
  747. * @dev: the device structure
  748. *
  749. * Return: D0I3C register value
  750. */
  751. static u32 mei_me_d0i3_unset(struct mei_device *dev)
  752. {
  753. u32 reg = mei_me_d0i3c_read(dev);
  754. reg &= ~H_D0I3C_I3;
  755. reg |= H_D0I3C_IR;
  756. mei_me_d0i3c_write(dev, reg);
  757. /* read it to ensure HW consistency */
  758. reg = mei_me_d0i3c_read(dev);
  759. return reg;
  760. }
  761. /**
  762. * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
  763. *
  764. * @dev: the device structure
  765. *
  766. * Return: 0 on success an error code otherwise
  767. */
  768. static int mei_me_d0i3_enter_sync(struct mei_device *dev)
  769. {
  770. struct mei_me_hw *hw = to_me_hw(dev);
  771. int ret;
  772. u32 reg;
  773. reg = mei_me_d0i3c_read(dev);
  774. if (reg & H_D0I3C_I3) {
  775. /* we are in d0i3, nothing to do */
  776. dev_dbg(dev->dev, "d0i3 set not needed\n");
  777. ret = 0;
  778. goto on;
  779. }
  780. /* PGI entry procedure */
  781. dev->pg_event = MEI_PG_EVENT_WAIT;
  782. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  783. if (ret)
  784. /* FIXME: should we reset here? */
  785. goto out;
  786. mutex_unlock(&dev->device_lock);
  787. wait_event_timeout(dev->wait_pg,
  788. dev->pg_event == MEI_PG_EVENT_RECEIVED,
  789. dev->timeouts.pgi);
  790. mutex_lock(&dev->device_lock);
  791. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  792. ret = -ETIME;
  793. goto out;
  794. }
  795. /* end PGI entry procedure */
  796. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  797. reg = mei_me_d0i3_set(dev, true);
  798. if (!(reg & H_D0I3C_CIP)) {
  799. dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
  800. ret = 0;
  801. goto on;
  802. }
  803. mutex_unlock(&dev->device_lock);
  804. wait_event_timeout(dev->wait_pg,
  805. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
  806. dev->timeouts.d0i3);
  807. mutex_lock(&dev->device_lock);
  808. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  809. reg = mei_me_d0i3c_read(dev);
  810. if (!(reg & H_D0I3C_I3)) {
  811. ret = -ETIME;
  812. goto out;
  813. }
  814. }
  815. ret = 0;
  816. on:
  817. hw->pg_state = MEI_PG_ON;
  818. out:
  819. dev->pg_event = MEI_PG_EVENT_IDLE;
  820. dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
  821. return ret;
  822. }
  823. /**
  824. * mei_me_d0i3_enter - perform d0i3 entry procedure
  825. * no hbm PG handshake
  826. * no waiting for confirmation; runs with interrupts
  827. * disabled
  828. *
  829. * @dev: the device structure
  830. *
  831. * Return: 0 on success an error code otherwise
  832. */
  833. static int mei_me_d0i3_enter(struct mei_device *dev)
  834. {
  835. struct mei_me_hw *hw = to_me_hw(dev);
  836. u32 reg;
  837. reg = mei_me_d0i3c_read(dev);
  838. if (reg & H_D0I3C_I3) {
  839. /* we are in d0i3, nothing to do */
  840. dev_dbg(dev->dev, "already d0i3 : set not needed\n");
  841. goto on;
  842. }
  843. mei_me_d0i3_set(dev, false);
  844. on:
  845. hw->pg_state = MEI_PG_ON;
  846. dev->pg_event = MEI_PG_EVENT_IDLE;
  847. dev_dbg(dev->dev, "d0i3 enter\n");
  848. return 0;
  849. }
  850. /**
  851. * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
  852. *
  853. * @dev: the device structure
  854. *
  855. * Return: 0 on success an error code otherwise
  856. */
  857. static int mei_me_d0i3_exit_sync(struct mei_device *dev)
  858. {
  859. struct mei_me_hw *hw = to_me_hw(dev);
  860. int ret;
  861. u32 reg;
  862. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  863. reg = mei_me_d0i3c_read(dev);
  864. if (!(reg & H_D0I3C_I3)) {
  865. /* we are not in d0i3, nothing to do */
  866. dev_dbg(dev->dev, "d0i3 exit not needed\n");
  867. ret = 0;
  868. goto off;
  869. }
  870. reg = mei_me_d0i3_unset(dev);
  871. if (!(reg & H_D0I3C_CIP)) {
  872. dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
  873. ret = 0;
  874. goto off;
  875. }
  876. mutex_unlock(&dev->device_lock);
  877. wait_event_timeout(dev->wait_pg,
  878. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
  879. dev->timeouts.d0i3);
  880. mutex_lock(&dev->device_lock);
  881. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  882. reg = mei_me_d0i3c_read(dev);
  883. if (reg & H_D0I3C_I3) {
  884. ret = -ETIME;
  885. goto out;
  886. }
  887. }
  888. ret = 0;
  889. off:
  890. hw->pg_state = MEI_PG_OFF;
  891. out:
  892. dev->pg_event = MEI_PG_EVENT_IDLE;
  893. dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
  894. return ret;
  895. }
  896. /**
  897. * mei_me_pg_legacy_intr - perform legacy pg processing
  898. * in interrupt thread handler
  899. *
  900. * @dev: the device structure
  901. */
  902. static void mei_me_pg_legacy_intr(struct mei_device *dev)
  903. {
  904. struct mei_me_hw *hw = to_me_hw(dev);
  905. if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
  906. return;
  907. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  908. hw->pg_state = MEI_PG_OFF;
  909. if (waitqueue_active(&dev->wait_pg))
  910. wake_up(&dev->wait_pg);
  911. }
  912. /**
  913. * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
  914. *
  915. * @dev: the device structure
  916. * @intr_source: interrupt source
  917. */
  918. static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
  919. {
  920. struct mei_me_hw *hw = to_me_hw(dev);
  921. if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
  922. (intr_source & H_D0I3C_IS)) {
  923. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  924. if (hw->pg_state == MEI_PG_ON) {
  925. hw->pg_state = MEI_PG_OFF;
  926. if (dev->hbm_state != MEI_HBM_IDLE) {
  927. /*
  928. * force H_RDY because it could be
  929. * wiped off during PG
  930. */
  931. dev_dbg(dev->dev, "d0i3 set host ready\n");
  932. mei_me_host_set_ready(dev);
  933. }
  934. } else {
  935. hw->pg_state = MEI_PG_ON;
  936. }
  937. wake_up(&dev->wait_pg);
  938. }
  939. if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
  940. /*
  941. * HW sent some data and we are in D0i3, so
  942. * we got here because of HW initiated exit from D0i3.
  943. * Start runtime pm resume sequence to exit low power state.
  944. */
  945. dev_dbg(dev->dev, "d0i3 want resume\n");
  946. mei_hbm_pg_resume(dev);
  947. }
  948. }
  949. /**
  950. * mei_me_pg_intr - perform pg processing in interrupt thread handler
  951. *
  952. * @dev: the device structure
  953. * @intr_source: interrupt source
  954. */
  955. static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
  956. {
  957. struct mei_me_hw *hw = to_me_hw(dev);
  958. if (hw->d0i3_supported)
  959. mei_me_d0i3_intr(dev, intr_source);
  960. else
  961. mei_me_pg_legacy_intr(dev);
  962. }
  963. /**
  964. * mei_me_pg_enter_sync - perform runtime pm entry procedure
  965. *
  966. * @dev: the device structure
  967. *
  968. * Return: 0 on success an error code otherwise
  969. */
  970. int mei_me_pg_enter_sync(struct mei_device *dev)
  971. {
  972. struct mei_me_hw *hw = to_me_hw(dev);
  973. if (hw->d0i3_supported)
  974. return mei_me_d0i3_enter_sync(dev);
  975. else
  976. return mei_me_pg_legacy_enter_sync(dev);
  977. }
  978. /**
  979. * mei_me_pg_exit_sync - perform runtime pm exit procedure
  980. *
  981. * @dev: the device structure
  982. *
  983. * Return: 0 on success an error code otherwise
  984. */
  985. int mei_me_pg_exit_sync(struct mei_device *dev)
  986. {
  987. struct mei_me_hw *hw = to_me_hw(dev);
  988. if (hw->d0i3_supported)
  989. return mei_me_d0i3_exit_sync(dev);
  990. else
  991. return mei_me_pg_legacy_exit_sync(dev);
  992. }
  993. /**
  994. * mei_me_hw_reset - resets fw via mei csr register.
  995. *
  996. * @dev: the device structure
  997. * @intr_enable: if interrupt should be enabled after reset.
  998. *
  999. * Return: 0 on success an error code otherwise
  1000. */
  1001. static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
  1002. {
  1003. struct mei_me_hw *hw = to_me_hw(dev);
  1004. int ret;
  1005. u32 hcsr;
  1006. if (intr_enable) {
  1007. mei_me_intr_enable(dev);
  1008. if (hw->d0i3_supported) {
  1009. ret = mei_me_d0i3_exit_sync(dev);
  1010. if (ret)
  1011. return ret;
  1012. } else {
  1013. hw->pg_state = MEI_PG_OFF;
  1014. }
  1015. }
  1016. pm_runtime_set_active(dev->dev);
  1017. hcsr = mei_hcsr_read(dev);
  1018. /* H_RST may be found lit before reset is started,
  1019. * for example if preceding reset flow hasn't completed.
  1020. * In that case asserting H_RST will be ignored, therefore
  1021. * we need to clean H_RST bit to start a successful reset sequence.
  1022. */
  1023. if ((hcsr & H_RST) == H_RST) {
  1024. dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
  1025. hcsr &= ~H_RST;
  1026. mei_hcsr_set(dev, hcsr);
  1027. hcsr = mei_hcsr_read(dev);
  1028. }
  1029. hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
  1030. if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
  1031. hcsr &= ~H_CSR_IE_MASK;
  1032. dev->recvd_hw_ready = false;
  1033. mei_hcsr_write(dev, hcsr);
  1034. /*
  1035. * Host reads the H_CSR once to ensure that the
  1036. * posted write to H_CSR completes.
  1037. */
  1038. hcsr = mei_hcsr_read(dev);
  1039. if ((hcsr & H_RST) == 0)
  1040. dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
  1041. if ((hcsr & H_RDY) == H_RDY)
  1042. dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
  1043. if (!intr_enable) {
  1044. mei_me_hw_reset_release(dev);
  1045. if (hw->d0i3_supported) {
  1046. ret = mei_me_d0i3_enter(dev);
  1047. if (ret)
  1048. return ret;
  1049. }
  1050. }
  1051. return 0;
  1052. }
  1053. /**
  1054. * mei_me_irq_quick_handler - The ISR of the MEI device
  1055. *
  1056. * @irq: The irq number
  1057. * @dev_id: pointer to the device structure
  1058. *
  1059. * Return: irqreturn_t
  1060. */
  1061. irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
  1062. {
  1063. struct mei_device *dev = (struct mei_device *)dev_id;
  1064. u32 hcsr;
  1065. hcsr = mei_hcsr_read(dev);
  1066. if (!me_intr_src(hcsr))
  1067. return IRQ_NONE;
  1068. dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
  1069. /* disable interrupts on device */
  1070. me_intr_disable(dev, hcsr);
  1071. return IRQ_WAKE_THREAD;
  1072. }
  1073. EXPORT_SYMBOL_GPL(mei_me_irq_quick_handler);
  1074. /**
  1075. * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
  1076. * processing.
  1077. *
  1078. * @irq: The irq number
  1079. * @dev_id: pointer to the device structure
  1080. *
  1081. * Return: irqreturn_t
  1082. *
  1083. */
  1084. irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
  1085. {
  1086. struct mei_device *dev = (struct mei_device *) dev_id;
  1087. struct list_head cmpl_list;
  1088. s32 slots;
  1089. u32 hcsr;
  1090. int rets = 0;
  1091. dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
  1092. /* initialize our complete list */
  1093. mutex_lock(&dev->device_lock);
  1094. hcsr = mei_hcsr_read(dev);
  1095. me_intr_clear(dev, hcsr);
  1096. INIT_LIST_HEAD(&cmpl_list);
  1097. /* check if ME wants a reset */
  1098. if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
  1099. dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n",
  1100. dev->dev_state, dev->pxp_mode);
  1101. if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
  1102. dev->dev_state == MEI_DEV_POWER_DOWN)
  1103. mei_cl_all_disconnect(dev);
  1104. else if (dev->dev_state != MEI_DEV_DISABLED)
  1105. schedule_work(&dev->reset_work);
  1106. goto end;
  1107. }
  1108. if (mei_me_hw_is_resetting(dev))
  1109. mei_hcsr_set_hig(dev);
  1110. mei_me_pg_intr(dev, me_intr_src(hcsr));
  1111. /* check if we need to start the dev */
  1112. if (!mei_host_is_ready(dev)) {
  1113. if (mei_hw_is_ready(dev)) {
  1114. dev_dbg(dev->dev, "we need to start the dev.\n");
  1115. dev->recvd_hw_ready = true;
  1116. wake_up(&dev->wait_hw_ready);
  1117. } else {
  1118. dev_dbg(dev->dev, "Spurious Interrupt\n");
  1119. }
  1120. goto end;
  1121. }
  1122. /* check slots available for reading */
  1123. slots = mei_count_full_read_slots(dev);
  1124. while (slots > 0) {
  1125. dev_dbg(dev->dev, "slots to read = %08x\n", slots);
  1126. rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
  1127. /* There is a race between ME write and interrupt delivery:
  1128. * Not all data is always available immediately after the
  1129. * interrupt, so try to read again on the next interrupt.
  1130. */
  1131. if (rets == -ENODATA)
  1132. break;
  1133. if (rets) {
  1134. dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
  1135. rets, dev->dev_state);
  1136. if (dev->dev_state != MEI_DEV_RESETTING &&
  1137. dev->dev_state != MEI_DEV_DISABLED &&
  1138. dev->dev_state != MEI_DEV_POWERING_DOWN &&
  1139. dev->dev_state != MEI_DEV_POWER_DOWN)
  1140. schedule_work(&dev->reset_work);
  1141. goto end;
  1142. }
  1143. }
  1144. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1145. /*
  1146. * During PG handshake only allowed write is the replay to the
  1147. * PG exit message, so block calling write function
  1148. * if the pg event is in PG handshake
  1149. */
  1150. if (dev->pg_event != MEI_PG_EVENT_WAIT &&
  1151. dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  1152. rets = mei_irq_write_handler(dev, &cmpl_list);
  1153. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1154. }
  1155. mei_irq_compl_handler(dev, &cmpl_list);
  1156. end:
  1157. dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
  1158. mei_me_intr_enable(dev);
  1159. mutex_unlock(&dev->device_lock);
  1160. return IRQ_HANDLED;
  1161. }
  1162. EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
  1163. #define MEI_POLLING_TIMEOUT_ACTIVE 100
  1164. #define MEI_POLLING_TIMEOUT_IDLE 500
  1165. /**
  1166. * mei_me_polling_thread - interrupt register polling thread
  1167. *
  1168. * The thread monitors the interrupt source register and calls
  1169. * mei_me_irq_thread_handler() to handle the firmware
  1170. * input.
  1171. *
  1172. * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
  1173. * in case there was an event, in idle case the polling
  1174. * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
  1175. * up to MEI_POLLING_TIMEOUT_IDLE.
  1176. *
  1177. * @_dev: mei device
  1178. *
  1179. * Return: always 0
  1180. */
  1181. int mei_me_polling_thread(void *_dev)
  1182. {
  1183. struct mei_device *dev = _dev;
  1184. irqreturn_t irq_ret;
  1185. long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
  1186. dev_dbg(dev->dev, "kernel thread is running\n");
  1187. while (!kthread_should_stop()) {
  1188. struct mei_me_hw *hw = to_me_hw(dev);
  1189. u32 hcsr;
  1190. wait_event_timeout(hw->wait_active,
  1191. hw->is_active || kthread_should_stop(),
  1192. msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
  1193. if (kthread_should_stop())
  1194. break;
  1195. hcsr = mei_hcsr_read(dev);
  1196. if (me_intr_src(hcsr)) {
  1197. polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
  1198. irq_ret = mei_me_irq_thread_handler(1, dev);
  1199. if (irq_ret != IRQ_HANDLED)
  1200. dev_err(dev->dev, "irq_ret %d\n", irq_ret);
  1201. } else {
  1202. /*
  1203. * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
  1204. * up to MEI_POLLING_TIMEOUT_IDLE
  1205. */
  1206. polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
  1207. MEI_POLLING_TIMEOUT_ACTIVE,
  1208. MEI_POLLING_TIMEOUT_IDLE);
  1209. }
  1210. schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
  1211. }
  1212. return 0;
  1213. }
  1214. EXPORT_SYMBOL_GPL(mei_me_polling_thread);
  1215. static const struct mei_hw_ops mei_me_hw_ops = {
  1216. .trc_status = mei_me_trc_status,
  1217. .fw_status = mei_me_fw_status,
  1218. .pg_state = mei_me_pg_state,
  1219. .host_is_ready = mei_me_host_is_ready,
  1220. .hw_is_ready = mei_me_hw_is_ready,
  1221. .hw_reset = mei_me_hw_reset,
  1222. .hw_config = mei_me_hw_config,
  1223. .hw_start = mei_me_hw_start,
  1224. .pg_in_transition = mei_me_pg_in_transition,
  1225. .pg_is_enabled = mei_me_pg_is_enabled,
  1226. .intr_clear = mei_me_intr_clear,
  1227. .intr_enable = mei_me_intr_enable,
  1228. .intr_disable = mei_me_intr_disable,
  1229. .synchronize_irq = mei_me_synchronize_irq,
  1230. .hbuf_free_slots = mei_me_hbuf_empty_slots,
  1231. .hbuf_is_ready = mei_me_hbuf_is_empty,
  1232. .hbuf_depth = mei_me_hbuf_depth,
  1233. .write = mei_me_hbuf_write,
  1234. .rdbuf_full_slots = mei_me_count_full_read_slots,
  1235. .read_hdr = mei_me_mecbrw_read,
  1236. .read = mei_me_read_slots
  1237. };
  1238. /**
  1239. * mei_me_fw_type_nm() - check for nm sku
  1240. *
  1241. * Read ME FW Status register to check for the Node Manager (NM) Firmware.
  1242. * The NM FW is only signaled in PCI function 0.
  1243. * __Note__: Deprecated by PCH8 and newer.
  1244. *
  1245. * @pdev: pci device
  1246. *
  1247. * Return: true in case of NM firmware
  1248. */
  1249. static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
  1250. {
  1251. u32 reg;
  1252. unsigned int devfn;
  1253. devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
  1254. pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, &reg);
  1255. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
  1256. /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
  1257. return (reg & 0x600) == 0x200;
  1258. }
  1259. #define MEI_CFG_FW_NM \
  1260. .quirk_probe = mei_me_fw_type_nm
  1261. /**
  1262. * mei_me_fw_type_sps_4() - check for sps 4.0 sku
  1263. *
  1264. * Read ME FW Status register to check for SPS Firmware.
  1265. * The SPS FW is only signaled in the PCI function 0.
  1266. * __Note__: Deprecated by SPS 5.0 and newer.
  1267. *
  1268. * @pdev: pci device
  1269. *
  1270. * Return: true in case of SPS firmware
  1271. */
  1272. static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
  1273. {
  1274. u32 reg;
  1275. unsigned int devfn;
  1276. devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
  1277. pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
  1278. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  1279. return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
  1280. }
  1281. #define MEI_CFG_FW_SPS_4 \
  1282. .quirk_probe = mei_me_fw_type_sps_4
  1283. /**
  1284. * mei_me_fw_type_sps_ign() - check for sps or ign sku
  1285. *
  1286. * Read ME FW Status register to check for SPS or IGN Firmware.
  1287. * The SPS/IGN FW is only signaled in pci function 0
  1288. *
  1289. * @pdev: pci device
  1290. *
  1291. * Return: true in case of SPS/IGN firmware
  1292. */
  1293. static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
  1294. {
  1295. u32 reg;
  1296. u32 fw_type;
  1297. unsigned int devfn;
  1298. devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
  1299. pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
  1300. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
  1301. fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
  1302. dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
  1303. return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN ||
  1304. fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
  1305. }
  1306. #define MEI_CFG_KIND_ITOUCH \
  1307. .kind = "itouch"
  1308. #define MEI_CFG_TYPE_GSC \
  1309. .kind = "gsc"
  1310. #define MEI_CFG_TYPE_GSCFI \
  1311. .kind = "gscfi"
  1312. #define MEI_CFG_FW_SPS_IGN \
  1313. .quirk_probe = mei_me_fw_type_sps_ign
  1314. #define MEI_CFG_FW_VER_SUPP \
  1315. .fw_ver_supported = 1
  1316. #define MEI_CFG_ICH_HFS \
  1317. .fw_status.count = 0
  1318. #define MEI_CFG_ICH10_HFS \
  1319. .fw_status.count = 1, \
  1320. .fw_status.status[0] = PCI_CFG_HFS_1
  1321. #define MEI_CFG_PCH_HFS \
  1322. .fw_status.count = 2, \
  1323. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1324. .fw_status.status[1] = PCI_CFG_HFS_2
  1325. #define MEI_CFG_PCH8_HFS \
  1326. .fw_status.count = 6, \
  1327. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1328. .fw_status.status[1] = PCI_CFG_HFS_2, \
  1329. .fw_status.status[2] = PCI_CFG_HFS_3, \
  1330. .fw_status.status[3] = PCI_CFG_HFS_4, \
  1331. .fw_status.status[4] = PCI_CFG_HFS_5, \
  1332. .fw_status.status[5] = PCI_CFG_HFS_6
  1333. #define MEI_CFG_DMA_128 \
  1334. .dma_size[DMA_DSCR_HOST] = SZ_128K, \
  1335. .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
  1336. .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
  1337. #define MEI_CFG_TRC \
  1338. .hw_trc_supported = 1
  1339. /* ICH Legacy devices */
  1340. static const struct mei_cfg mei_me_ich_cfg = {
  1341. MEI_CFG_ICH_HFS,
  1342. };
  1343. /* ICH devices */
  1344. static const struct mei_cfg mei_me_ich10_cfg = {
  1345. MEI_CFG_ICH10_HFS,
  1346. };
  1347. /* PCH6 devices */
  1348. static const struct mei_cfg mei_me_pch6_cfg = {
  1349. MEI_CFG_PCH_HFS,
  1350. };
  1351. /* PCH7 devices */
  1352. static const struct mei_cfg mei_me_pch7_cfg = {
  1353. MEI_CFG_PCH_HFS,
  1354. MEI_CFG_FW_VER_SUPP,
  1355. };
  1356. /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
  1357. static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
  1358. MEI_CFG_PCH_HFS,
  1359. MEI_CFG_FW_VER_SUPP,
  1360. MEI_CFG_FW_NM,
  1361. };
  1362. /* PCH8 Lynx Point and newer devices */
  1363. static const struct mei_cfg mei_me_pch8_cfg = {
  1364. MEI_CFG_PCH8_HFS,
  1365. MEI_CFG_FW_VER_SUPP,
  1366. };
  1367. /* PCH8 Lynx Point and newer devices - iTouch */
  1368. static const struct mei_cfg mei_me_pch8_itouch_cfg = {
  1369. MEI_CFG_KIND_ITOUCH,
  1370. MEI_CFG_PCH8_HFS,
  1371. MEI_CFG_FW_VER_SUPP,
  1372. };
  1373. /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
  1374. static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
  1375. MEI_CFG_PCH8_HFS,
  1376. MEI_CFG_FW_VER_SUPP,
  1377. MEI_CFG_FW_SPS_4,
  1378. };
  1379. /* LBG with quirk for SPS (4.0) Firmware exclusion */
  1380. static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
  1381. MEI_CFG_PCH8_HFS,
  1382. MEI_CFG_FW_VER_SUPP,
  1383. MEI_CFG_FW_SPS_4,
  1384. };
  1385. /* Cannon Lake and newer devices */
  1386. static const struct mei_cfg mei_me_pch12_cfg = {
  1387. MEI_CFG_PCH8_HFS,
  1388. MEI_CFG_FW_VER_SUPP,
  1389. MEI_CFG_DMA_128,
  1390. };
  1391. /* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
  1392. static const struct mei_cfg mei_me_pch12_sps_cfg = {
  1393. MEI_CFG_PCH8_HFS,
  1394. MEI_CFG_FW_VER_SUPP,
  1395. MEI_CFG_DMA_128,
  1396. MEI_CFG_FW_SPS_IGN,
  1397. };
  1398. /* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
  1399. * w/o DMA support.
  1400. */
  1401. static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
  1402. MEI_CFG_KIND_ITOUCH,
  1403. MEI_CFG_PCH8_HFS,
  1404. MEI_CFG_FW_VER_SUPP,
  1405. MEI_CFG_FW_SPS_IGN,
  1406. };
  1407. /* Tiger Lake and newer devices */
  1408. static const struct mei_cfg mei_me_pch15_cfg = {
  1409. MEI_CFG_PCH8_HFS,
  1410. MEI_CFG_FW_VER_SUPP,
  1411. MEI_CFG_DMA_128,
  1412. MEI_CFG_TRC,
  1413. };
  1414. /* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
  1415. static const struct mei_cfg mei_me_pch15_sps_cfg = {
  1416. MEI_CFG_PCH8_HFS,
  1417. MEI_CFG_FW_VER_SUPP,
  1418. MEI_CFG_DMA_128,
  1419. MEI_CFG_TRC,
  1420. MEI_CFG_FW_SPS_IGN,
  1421. };
  1422. /* Graphics System Controller */
  1423. static const struct mei_cfg mei_me_gsc_cfg = {
  1424. MEI_CFG_TYPE_GSC,
  1425. MEI_CFG_PCH8_HFS,
  1426. MEI_CFG_FW_VER_SUPP,
  1427. };
  1428. /* Graphics System Controller Firmware Interface */
  1429. static const struct mei_cfg mei_me_gscfi_cfg = {
  1430. MEI_CFG_TYPE_GSCFI,
  1431. MEI_CFG_PCH8_HFS,
  1432. MEI_CFG_FW_VER_SUPP,
  1433. };
  1434. /*
  1435. * mei_cfg_list - A list of platform platform specific configurations.
  1436. * Note: has to be synchronized with enum mei_cfg_idx.
  1437. */
  1438. static const struct mei_cfg *const mei_cfg_list[] = {
  1439. [MEI_ME_UNDEF_CFG] = NULL,
  1440. [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
  1441. [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
  1442. [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
  1443. [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
  1444. [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
  1445. [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
  1446. [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
  1447. [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
  1448. [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
  1449. [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
  1450. [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
  1451. [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
  1452. [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
  1453. [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
  1454. [MEI_ME_GSC_CFG] = &mei_me_gsc_cfg,
  1455. [MEI_ME_GSCFI_CFG] = &mei_me_gscfi_cfg,
  1456. };
  1457. const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
  1458. {
  1459. BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
  1460. if (idx >= MEI_ME_NUM_CFG)
  1461. return NULL;
  1462. return mei_cfg_list[idx];
  1463. }
  1464. EXPORT_SYMBOL_GPL(mei_me_get_cfg);
  1465. /**
  1466. * mei_me_dev_init - allocates and initializes the mei device structure
  1467. *
  1468. * @parent: device associated with physical device (pci/platform)
  1469. * @cfg: per device generation config
  1470. * @slow_fw: configure longer timeouts as FW is slow
  1471. *
  1472. * Return: The mei_device pointer on success, NULL on failure.
  1473. */
  1474. struct mei_device *mei_me_dev_init(struct device *parent,
  1475. const struct mei_cfg *cfg, bool slow_fw)
  1476. {
  1477. struct mei_device *dev;
  1478. struct mei_me_hw *hw;
  1479. int i;
  1480. dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
  1481. if (!dev)
  1482. return NULL;
  1483. hw = to_me_hw(dev);
  1484. for (i = 0; i < DMA_DSCR_NUM; i++)
  1485. dev->dr_dscr[i].size = cfg->dma_size[i];
  1486. mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
  1487. hw->cfg = cfg;
  1488. dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
  1489. dev->kind = cfg->kind;
  1490. return dev;
  1491. }
  1492. EXPORT_SYMBOL_GPL(mei_me_dev_init);