hw-txe.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
  4. * Intel Management Engine Interface (Intel MEI) Linux driver
  5. */
  6. #include <linux/pci.h>
  7. #include <linux/jiffies.h>
  8. #include <linux/ktime.h>
  9. #include <linux/delay.h>
  10. #include <linux/kthread.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/pm_runtime.h>
  13. #include <linux/mei.h>
  14. #include "mei_dev.h"
  15. #include "hw-txe.h"
  16. #include "client.h"
  17. #include "hbm.h"
  18. #include "mei-trace.h"
  19. #define TXE_HBUF_DEPTH (PAYLOAD_SIZE / MEI_SLOT_SIZE)
  20. /**
  21. * mei_txe_reg_read - Reads 32bit data from the txe device
  22. *
  23. * @base_addr: registers base address
  24. * @offset: register offset
  25. *
  26. * Return: register value
  27. */
  28. static inline u32 mei_txe_reg_read(void __iomem *base_addr,
  29. unsigned long offset)
  30. {
  31. return ioread32(base_addr + offset);
  32. }
  33. /**
  34. * mei_txe_reg_write - Writes 32bit data to the txe device
  35. *
  36. * @base_addr: registers base address
  37. * @offset: register offset
  38. * @value: the value to write
  39. */
  40. static inline void mei_txe_reg_write(void __iomem *base_addr,
  41. unsigned long offset, u32 value)
  42. {
  43. iowrite32(value, base_addr + offset);
  44. }
  45. /**
  46. * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR
  47. *
  48. * @hw: the txe hardware structure
  49. * @offset: register offset
  50. *
  51. * Doesn't check for aliveness while Reads 32bit data from the SeC BAR
  52. *
  53. * Return: register value
  54. */
  55. static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
  56. unsigned long offset)
  57. {
  58. return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
  59. }
  60. /**
  61. * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR
  62. *
  63. * @hw: the txe hardware structure
  64. * @offset: register offset
  65. *
  66. * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set
  67. *
  68. * Return: register value
  69. */
  70. static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
  71. unsigned long offset)
  72. {
  73. WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
  74. return mei_txe_sec_reg_read_silent(hw, offset);
  75. }
  76. /**
  77. * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR
  78. * doesn't check for aliveness
  79. *
  80. * @hw: the txe hardware structure
  81. * @offset: register offset
  82. * @value: value to write
  83. *
  84. * Doesn't check for aliveness while writes 32bit data from to the SeC BAR
  85. */
  86. static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
  87. unsigned long offset, u32 value)
  88. {
  89. mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
  90. }
  91. /**
  92. * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR
  93. *
  94. * @hw: the txe hardware structure
  95. * @offset: register offset
  96. * @value: value to write
  97. *
  98. * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set
  99. */
  100. static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
  101. unsigned long offset, u32 value)
  102. {
  103. WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
  104. mei_txe_sec_reg_write_silent(hw, offset, value);
  105. }
  106. /**
  107. * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR
  108. *
  109. * @hw: the txe hardware structure
  110. * @offset: offset from which to read the data
  111. *
  112. * Return: the byte read.
  113. */
  114. static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
  115. unsigned long offset)
  116. {
  117. return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
  118. }
  119. /**
  120. * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR
  121. *
  122. * @hw: the txe hardware structure
  123. * @offset: offset from which to write the data
  124. * @value: the byte to write
  125. */
  126. static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
  127. unsigned long offset, u32 value)
  128. {
  129. mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
  130. }
  131. /**
  132. * mei_txe_aliveness_set - request for aliveness change
  133. *
  134. * @dev: the device structure
  135. * @req: requested aliveness value
  136. *
  137. * Request for aliveness change and returns true if the change is
  138. * really needed and false if aliveness is already
  139. * in the requested state
  140. *
  141. * Locking: called under "dev->device_lock" lock
  142. *
  143. * Return: true if request was send
  144. */
  145. static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
  146. {
  147. struct mei_txe_hw *hw = to_txe_hw(dev);
  148. bool do_req = hw->aliveness != req;
  149. dev_dbg(dev->dev, "Aliveness current=%d request=%d\n",
  150. hw->aliveness, req);
  151. if (do_req) {
  152. dev->pg_event = MEI_PG_EVENT_WAIT;
  153. mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
  154. }
  155. return do_req;
  156. }
  157. /**
  158. * mei_txe_aliveness_req_get - get aliveness requested register value
  159. *
  160. * @dev: the device structure
  161. *
  162. * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from
  163. * HICR_HOST_ALIVENESS_REQ register value
  164. *
  165. * Return: SICR_HOST_ALIVENESS_REQ_REQUESTED bit value
  166. */
  167. static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
  168. {
  169. struct mei_txe_hw *hw = to_txe_hw(dev);
  170. u32 reg;
  171. reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
  172. return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
  173. }
  174. /**
  175. * mei_txe_aliveness_get - get aliveness response register value
  176. *
  177. * @dev: the device structure
  178. *
  179. * Return: HICR_HOST_ALIVENESS_RESP_ACK bit from HICR_HOST_ALIVENESS_RESP
  180. * register
  181. */
  182. static u32 mei_txe_aliveness_get(struct mei_device *dev)
  183. {
  184. struct mei_txe_hw *hw = to_txe_hw(dev);
  185. u32 reg;
  186. reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
  187. return reg & HICR_HOST_ALIVENESS_RESP_ACK;
  188. }
  189. /**
  190. * mei_txe_aliveness_poll - waits for aliveness to settle
  191. *
  192. * @dev: the device structure
  193. * @expected: expected aliveness value
  194. *
  195. * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
  196. *
  197. * Return: 0 if the expected value was received, -ETIME otherwise
  198. */
  199. static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
  200. {
  201. struct mei_txe_hw *hw = to_txe_hw(dev);
  202. ktime_t stop, start;
  203. start = ktime_get();
  204. stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
  205. do {
  206. hw->aliveness = mei_txe_aliveness_get(dev);
  207. if (hw->aliveness == expected) {
  208. dev->pg_event = MEI_PG_EVENT_IDLE;
  209. dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
  210. ktime_to_us(ktime_sub(ktime_get(), start)));
  211. return 0;
  212. }
  213. usleep_range(20, 50);
  214. } while (ktime_before(ktime_get(), stop));
  215. dev->pg_event = MEI_PG_EVENT_IDLE;
  216. dev_err(dev->dev, "aliveness timed out\n");
  217. return -ETIME;
  218. }
  219. /**
  220. * mei_txe_aliveness_wait - waits for aliveness to settle
  221. *
  222. * @dev: the device structure
  223. * @expected: expected aliveness value
  224. *
  225. * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
  226. *
  227. * Return: 0 on success and < 0 otherwise
  228. */
  229. static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
  230. {
  231. struct mei_txe_hw *hw = to_txe_hw(dev);
  232. const unsigned long timeout =
  233. msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
  234. long err;
  235. int ret;
  236. hw->aliveness = mei_txe_aliveness_get(dev);
  237. if (hw->aliveness == expected)
  238. return 0;
  239. mutex_unlock(&dev->device_lock);
  240. err = wait_event_timeout(hw->wait_aliveness_resp,
  241. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  242. mutex_lock(&dev->device_lock);
  243. hw->aliveness = mei_txe_aliveness_get(dev);
  244. ret = hw->aliveness == expected ? 0 : -ETIME;
  245. if (ret)
  246. dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
  247. err, hw->aliveness, dev->pg_event);
  248. else
  249. dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
  250. jiffies_to_msecs(timeout - err),
  251. hw->aliveness, dev->pg_event);
  252. dev->pg_event = MEI_PG_EVENT_IDLE;
  253. return ret;
  254. }
  255. /**
  256. * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete
  257. *
  258. * @dev: the device structure
  259. * @req: requested aliveness value
  260. *
  261. * Return: 0 on success and < 0 otherwise
  262. */
  263. int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
  264. {
  265. if (mei_txe_aliveness_set(dev, req))
  266. return mei_txe_aliveness_wait(dev, req);
  267. return 0;
  268. }
  269. /**
  270. * mei_txe_pg_in_transition - is device now in pg transition
  271. *
  272. * @dev: the device structure
  273. *
  274. * Return: true if in pg transition, false otherwise
  275. */
  276. static bool mei_txe_pg_in_transition(struct mei_device *dev)
  277. {
  278. return dev->pg_event == MEI_PG_EVENT_WAIT;
  279. }
  280. /**
  281. * mei_txe_pg_is_enabled - detect if PG is supported by HW
  282. *
  283. * @dev: the device structure
  284. *
  285. * Return: true is pg supported, false otherwise
  286. */
  287. static bool mei_txe_pg_is_enabled(struct mei_device *dev)
  288. {
  289. return true;
  290. }
  291. /**
  292. * mei_txe_pg_state - translate aliveness register value
  293. * to the mei power gating state
  294. *
  295. * @dev: the device structure
  296. *
  297. * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
  298. */
  299. static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev)
  300. {
  301. struct mei_txe_hw *hw = to_txe_hw(dev);
  302. return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON;
  303. }
  304. /**
  305. * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt
  306. *
  307. * @dev: the device structure
  308. */
  309. static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
  310. {
  311. struct mei_txe_hw *hw = to_txe_hw(dev);
  312. u32 hintmsk;
  313. /* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */
  314. hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
  315. hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
  316. mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
  317. }
  318. /**
  319. * mei_txe_input_doorbell_set - sets bit 0 in
  320. * SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL.
  321. *
  322. * @hw: the txe hardware structure
  323. */
  324. static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
  325. {
  326. /* Clear the interrupt cause */
  327. clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
  328. mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
  329. }
  330. /**
  331. * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1
  332. *
  333. * @hw: the txe hardware structure
  334. */
  335. static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
  336. {
  337. mei_txe_br_reg_write(hw,
  338. SICR_SEC_IPC_OUTPUT_STATUS_REG,
  339. SEC_IPC_OUTPUT_STATUS_RDY);
  340. }
  341. /**
  342. * mei_txe_is_input_ready - check if TXE is ready for receiving data
  343. *
  344. * @dev: the device structure
  345. *
  346. * Return: true if INPUT STATUS READY bit is set
  347. */
  348. static bool mei_txe_is_input_ready(struct mei_device *dev)
  349. {
  350. struct mei_txe_hw *hw = to_txe_hw(dev);
  351. u32 status;
  352. status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
  353. return !!(SEC_IPC_INPUT_STATUS_RDY & status);
  354. }
  355. /**
  356. * mei_txe_intr_clear - clear all interrupts
  357. *
  358. * @dev: the device structure
  359. */
  360. static inline void mei_txe_intr_clear(struct mei_device *dev)
  361. {
  362. struct mei_txe_hw *hw = to_txe_hw(dev);
  363. mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
  364. SEC_IPC_HOST_INT_STATUS_PENDING);
  365. mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
  366. mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
  367. }
  368. /**
  369. * mei_txe_intr_disable - disable all interrupts
  370. *
  371. * @dev: the device structure
  372. */
  373. static void mei_txe_intr_disable(struct mei_device *dev)
  374. {
  375. struct mei_txe_hw *hw = to_txe_hw(dev);
  376. mei_txe_br_reg_write(hw, HHIER_REG, 0);
  377. mei_txe_br_reg_write(hw, HIER_REG, 0);
  378. }
  379. /**
  380. * mei_txe_intr_enable - enable all interrupts
  381. *
  382. * @dev: the device structure
  383. */
  384. static void mei_txe_intr_enable(struct mei_device *dev)
  385. {
  386. struct mei_txe_hw *hw = to_txe_hw(dev);
  387. mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
  388. mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
  389. }
  390. /**
  391. * mei_txe_synchronize_irq - wait for pending IRQ handlers
  392. *
  393. * @dev: the device structure
  394. */
  395. static void mei_txe_synchronize_irq(struct mei_device *dev)
  396. {
  397. struct pci_dev *pdev = to_pci_dev(dev->dev);
  398. synchronize_irq(pdev->irq);
  399. }
  400. /**
  401. * mei_txe_pending_interrupts - check if there are pending interrupts
  402. * only Aliveness, Input ready, and output doorbell are of relevance
  403. *
  404. * @dev: the device structure
  405. *
  406. * Checks if there are pending interrupts
  407. * only Aliveness, Readiness, Input ready, and Output doorbell are relevant
  408. *
  409. * Return: true if there are pending interrupts
  410. */
  411. static bool mei_txe_pending_interrupts(struct mei_device *dev)
  412. {
  413. struct mei_txe_hw *hw = to_txe_hw(dev);
  414. bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
  415. TXE_INTR_ALIVENESS |
  416. TXE_INTR_IN_READY |
  417. TXE_INTR_OUT_DB));
  418. if (ret) {
  419. dev_dbg(dev->dev,
  420. "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
  421. !!(hw->intr_cause & TXE_INTR_IN_READY),
  422. !!(hw->intr_cause & TXE_INTR_READINESS),
  423. !!(hw->intr_cause & TXE_INTR_ALIVENESS),
  424. !!(hw->intr_cause & TXE_INTR_OUT_DB));
  425. }
  426. return ret;
  427. }
  428. /**
  429. * mei_txe_input_payload_write - write a dword to the host buffer
  430. * at offset idx
  431. *
  432. * @dev: the device structure
  433. * @idx: index in the host buffer
  434. * @value: value
  435. */
  436. static void mei_txe_input_payload_write(struct mei_device *dev,
  437. unsigned long idx, u32 value)
  438. {
  439. struct mei_txe_hw *hw = to_txe_hw(dev);
  440. mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
  441. (idx * sizeof(u32)), value);
  442. }
  443. /**
  444. * mei_txe_out_data_read - read dword from the device buffer
  445. * at offset idx
  446. *
  447. * @dev: the device structure
  448. * @idx: index in the device buffer
  449. *
  450. * Return: register value at index
  451. */
  452. static u32 mei_txe_out_data_read(const struct mei_device *dev,
  453. unsigned long idx)
  454. {
  455. struct mei_txe_hw *hw = to_txe_hw(dev);
  456. return mei_txe_br_reg_read(hw,
  457. BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
  458. }
  459. /* Readiness */
  460. /**
  461. * mei_txe_readiness_set_host_rdy - set host readiness bit
  462. *
  463. * @dev: the device structure
  464. */
  465. static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
  466. {
  467. struct mei_txe_hw *hw = to_txe_hw(dev);
  468. mei_txe_br_reg_write(hw,
  469. SICR_HOST_IPC_READINESS_REQ_REG,
  470. SICR_HOST_IPC_READINESS_HOST_RDY);
  471. }
  472. /**
  473. * mei_txe_readiness_clear - clear host readiness bit
  474. *
  475. * @dev: the device structure
  476. */
  477. static void mei_txe_readiness_clear(struct mei_device *dev)
  478. {
  479. struct mei_txe_hw *hw = to_txe_hw(dev);
  480. mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
  481. SICR_HOST_IPC_READINESS_RDY_CLR);
  482. }
  483. /**
  484. * mei_txe_readiness_get - Reads and returns
  485. * the HICR_SEC_IPC_READINESS register value
  486. *
  487. * @dev: the device structure
  488. *
  489. * Return: the HICR_SEC_IPC_READINESS register value
  490. */
  491. static u32 mei_txe_readiness_get(struct mei_device *dev)
  492. {
  493. struct mei_txe_hw *hw = to_txe_hw(dev);
  494. return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
  495. }
  496. /**
  497. * mei_txe_readiness_is_sec_rdy - check readiness
  498. * for HICR_SEC_IPC_READINESS_SEC_RDY
  499. *
  500. * @readiness: cached readiness state
  501. *
  502. * Return: true if readiness bit is set
  503. */
  504. static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
  505. {
  506. return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
  507. }
  508. /**
  509. * mei_txe_hw_is_ready - check if the hw is ready
  510. *
  511. * @dev: the device structure
  512. *
  513. * Return: true if sec is ready
  514. */
  515. static bool mei_txe_hw_is_ready(struct mei_device *dev)
  516. {
  517. u32 readiness = mei_txe_readiness_get(dev);
  518. return mei_txe_readiness_is_sec_rdy(readiness);
  519. }
  520. /**
  521. * mei_txe_host_is_ready - check if the host is ready
  522. *
  523. * @dev: the device structure
  524. *
  525. * Return: true if host is ready
  526. */
  527. static inline bool mei_txe_host_is_ready(struct mei_device *dev)
  528. {
  529. struct mei_txe_hw *hw = to_txe_hw(dev);
  530. u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
  531. return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
  532. }
  533. /**
  534. * mei_txe_readiness_wait - wait till readiness settles
  535. *
  536. * @dev: the device structure
  537. *
  538. * Return: 0 on success and -ETIME on timeout
  539. */
  540. static int mei_txe_readiness_wait(struct mei_device *dev)
  541. {
  542. if (mei_txe_hw_is_ready(dev))
  543. return 0;
  544. mutex_unlock(&dev->device_lock);
  545. wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
  546. msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
  547. mutex_lock(&dev->device_lock);
  548. if (!dev->recvd_hw_ready) {
  549. dev_err(dev->dev, "wait for readiness failed\n");
  550. return -ETIME;
  551. }
  552. dev->recvd_hw_ready = false;
  553. return 0;
  554. }
  555. static const struct mei_fw_status mei_txe_fw_sts = {
  556. .count = 2,
  557. .status[0] = PCI_CFG_TXE_FW_STS0,
  558. .status[1] = PCI_CFG_TXE_FW_STS1
  559. };
  560. /**
  561. * mei_txe_fw_status - read fw status register from pci config space
  562. *
  563. * @dev: mei device
  564. * @fw_status: fw status register values
  565. *
  566. * Return: 0 on success, error otherwise
  567. */
  568. static int mei_txe_fw_status(struct mei_device *dev,
  569. struct mei_fw_status *fw_status)
  570. {
  571. const struct mei_fw_status *fw_src = &mei_txe_fw_sts;
  572. struct pci_dev *pdev = to_pci_dev(dev->dev);
  573. int ret;
  574. int i;
  575. if (!fw_status)
  576. return -EINVAL;
  577. fw_status->count = fw_src->count;
  578. for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
  579. ret = pci_read_config_dword(pdev, fw_src->status[i],
  580. &fw_status->status[i]);
  581. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
  582. fw_src->status[i],
  583. fw_status->status[i]);
  584. if (ret)
  585. return ret;
  586. }
  587. return 0;
  588. }
  589. /**
  590. * mei_txe_hw_config - configure hardware at the start of the devices
  591. *
  592. * @dev: the device structure
  593. *
  594. * Configure hardware at the start of the device should be done only
  595. * once at the device probe time
  596. *
  597. * Return: always 0
  598. */
  599. static int mei_txe_hw_config(struct mei_device *dev)
  600. {
  601. struct mei_txe_hw *hw = to_txe_hw(dev);
  602. hw->aliveness = mei_txe_aliveness_get(dev);
  603. hw->readiness = mei_txe_readiness_get(dev);
  604. dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
  605. hw->aliveness, hw->readiness);
  606. return 0;
  607. }
  608. /**
  609. * mei_txe_write - writes a message to device.
  610. *
  611. * @dev: the device structure
  612. * @hdr: header of message
  613. * @hdr_len: header length in bytes - must multiplication of a slot (4bytes)
  614. * @data: payload
  615. * @data_len: paylead length in bytes
  616. *
  617. * Return: 0 if success, < 0 - otherwise.
  618. */
  619. static int mei_txe_write(struct mei_device *dev,
  620. const void *hdr, size_t hdr_len,
  621. const void *data, size_t data_len)
  622. {
  623. struct mei_txe_hw *hw = to_txe_hw(dev);
  624. unsigned long rem;
  625. const u32 *reg_buf;
  626. u32 slots = TXE_HBUF_DEPTH;
  627. u32 dw_cnt;
  628. unsigned long i, j;
  629. if (WARN_ON(!hdr || !data || hdr_len & 0x3))
  630. return -EINVAL;
  631. dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
  632. dw_cnt = mei_data2slots(hdr_len + data_len);
  633. if (dw_cnt > slots)
  634. return -EMSGSIZE;
  635. if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
  636. return -EAGAIN;
  637. /* Enable Input Ready Interrupt. */
  638. mei_txe_input_ready_interrupt_enable(dev);
  639. if (!mei_txe_is_input_ready(dev)) {
  640. char fw_sts_str[MEI_FW_STATUS_STR_SZ];
  641. mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
  642. dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
  643. return -EAGAIN;
  644. }
  645. reg_buf = hdr;
  646. for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
  647. mei_txe_input_payload_write(dev, i, reg_buf[i]);
  648. reg_buf = data;
  649. for (j = 0; j < data_len / MEI_SLOT_SIZE; j++)
  650. mei_txe_input_payload_write(dev, i + j, reg_buf[j]);
  651. rem = data_len & 0x3;
  652. if (rem > 0) {
  653. u32 reg = 0;
  654. memcpy(&reg, (const u8 *)data + data_len - rem, rem);
  655. mei_txe_input_payload_write(dev, i + j, reg);
  656. }
  657. /* after each write the whole buffer is consumed */
  658. hw->slots = 0;
  659. /* Set Input-Doorbell */
  660. mei_txe_input_doorbell_set(hw);
  661. return 0;
  662. }
  663. /**
  664. * mei_txe_hbuf_depth - mimics the me hbuf circular buffer
  665. *
  666. * @dev: the device structure
  667. *
  668. * Return: the TXE_HBUF_DEPTH
  669. */
  670. static u32 mei_txe_hbuf_depth(const struct mei_device *dev)
  671. {
  672. return TXE_HBUF_DEPTH;
  673. }
  674. /**
  675. * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer
  676. *
  677. * @dev: the device structure
  678. *
  679. * Return: always TXE_HBUF_DEPTH
  680. */
  681. static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
  682. {
  683. struct mei_txe_hw *hw = to_txe_hw(dev);
  684. return hw->slots;
  685. }
  686. /**
  687. * mei_txe_count_full_read_slots - mimics the me device circular buffer
  688. *
  689. * @dev: the device structure
  690. *
  691. * Return: always buffer size in dwords count
  692. */
  693. static int mei_txe_count_full_read_slots(struct mei_device *dev)
  694. {
  695. /* read buffers has static size */
  696. return TXE_HBUF_DEPTH;
  697. }
  698. /**
  699. * mei_txe_read_hdr - read message header which is always in 4 first bytes
  700. *
  701. * @dev: the device structure
  702. *
  703. * Return: mei message header
  704. */
  705. static u32 mei_txe_read_hdr(const struct mei_device *dev)
  706. {
  707. return mei_txe_out_data_read(dev, 0);
  708. }
  709. /**
  710. * mei_txe_read - reads a message from the txe device.
  711. *
  712. * @dev: the device structure
  713. * @buf: message buffer will be written
  714. * @len: message size will be read
  715. *
  716. * Return: -EINVAL on error wrong argument and 0 on success
  717. */
  718. static int mei_txe_read(struct mei_device *dev,
  719. unsigned char *buf, unsigned long len)
  720. {
  721. struct mei_txe_hw *hw = to_txe_hw(dev);
  722. u32 *reg_buf, reg;
  723. u32 rem;
  724. u32 i;
  725. if (WARN_ON(!buf || !len))
  726. return -EINVAL;
  727. reg_buf = (u32 *)buf;
  728. rem = len & 0x3;
  729. dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
  730. len, mei_txe_out_data_read(dev, 0));
  731. for (i = 0; i < len / MEI_SLOT_SIZE; i++) {
  732. /* skip header: index starts from 1 */
  733. reg = mei_txe_out_data_read(dev, i + 1);
  734. dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
  735. *reg_buf++ = reg;
  736. }
  737. if (rem) {
  738. reg = mei_txe_out_data_read(dev, i + 1);
  739. memcpy(reg_buf, &reg, rem);
  740. }
  741. mei_txe_output_ready_set(hw);
  742. return 0;
  743. }
  744. /**
  745. * mei_txe_hw_reset - resets host and fw.
  746. *
  747. * @dev: the device structure
  748. * @intr_enable: if interrupt should be enabled after reset.
  749. *
  750. * Return: 0 on success and < 0 in case of error
  751. */
  752. static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
  753. {
  754. struct mei_txe_hw *hw = to_txe_hw(dev);
  755. u32 aliveness_req;
  756. /*
  757. * read input doorbell to ensure consistency between Bridge and SeC
  758. * return value might be garbage return
  759. */
  760. (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
  761. aliveness_req = mei_txe_aliveness_req_get(dev);
  762. hw->aliveness = mei_txe_aliveness_get(dev);
  763. /* Disable interrupts in this stage we will poll */
  764. mei_txe_intr_disable(dev);
  765. /*
  766. * If Aliveness Request and Aliveness Response are not equal then
  767. * wait for them to be equal
  768. * Since we might have interrupts disabled - poll for it
  769. */
  770. if (aliveness_req != hw->aliveness)
  771. if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
  772. dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n");
  773. return -EIO;
  774. }
  775. /*
  776. * If Aliveness Request and Aliveness Response are set then clear them
  777. */
  778. if (aliveness_req) {
  779. mei_txe_aliveness_set(dev, 0);
  780. if (mei_txe_aliveness_poll(dev, 0) < 0) {
  781. dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
  782. return -EIO;
  783. }
  784. }
  785. /*
  786. * Set readiness RDY_CLR bit
  787. */
  788. mei_txe_readiness_clear(dev);
  789. return 0;
  790. }
  791. /**
  792. * mei_txe_hw_start - start the hardware after reset
  793. *
  794. * @dev: the device structure
  795. *
  796. * Return: 0 on success an error code otherwise
  797. */
  798. static int mei_txe_hw_start(struct mei_device *dev)
  799. {
  800. struct mei_txe_hw *hw = to_txe_hw(dev);
  801. int ret;
  802. u32 hisr;
  803. /* bring back interrupts */
  804. mei_txe_intr_enable(dev);
  805. ret = mei_txe_readiness_wait(dev);
  806. if (ret < 0) {
  807. dev_err(dev->dev, "waiting for readiness failed\n");
  808. return ret;
  809. }
  810. /*
  811. * If HISR.INT2_STS interrupt status bit is set then clear it.
  812. */
  813. hisr = mei_txe_br_reg_read(hw, HISR_REG);
  814. if (hisr & HISR_INT_2_STS)
  815. mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
  816. /* Clear the interrupt cause of OutputDoorbell */
  817. clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
  818. ret = mei_txe_aliveness_set_sync(dev, 1);
  819. if (ret < 0) {
  820. dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
  821. return ret;
  822. }
  823. pm_runtime_set_active(dev->dev);
  824. /* enable input ready interrupts:
  825. * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
  826. */
  827. mei_txe_input_ready_interrupt_enable(dev);
  828. /* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */
  829. mei_txe_output_ready_set(hw);
  830. /* Set bit SICR_HOST_IPC_READINESS.HOST_RDY
  831. */
  832. mei_txe_readiness_set_host_rdy(dev);
  833. return 0;
  834. }
  835. /**
  836. * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into
  837. * single bit mask and acknowledge the interrupts
  838. *
  839. * @dev: the device structure
  840. * @do_ack: acknowledge interrupts
  841. *
  842. * Return: true if found interrupts to process.
  843. */
  844. static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
  845. {
  846. struct mei_txe_hw *hw = to_txe_hw(dev);
  847. u32 hisr;
  848. u32 hhisr;
  849. u32 ipc_isr;
  850. u32 aliveness;
  851. bool generated;
  852. /* read interrupt registers */
  853. hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
  854. generated = (hhisr & IPC_HHIER_MSK);
  855. if (!generated)
  856. goto out;
  857. hisr = mei_txe_br_reg_read(hw, HISR_REG);
  858. aliveness = mei_txe_aliveness_get(dev);
  859. if (hhisr & IPC_HHIER_SEC && aliveness) {
  860. ipc_isr = mei_txe_sec_reg_read_silent(hw,
  861. SEC_IPC_HOST_INT_STATUS_REG);
  862. } else {
  863. ipc_isr = 0;
  864. hhisr &= ~IPC_HHIER_SEC;
  865. }
  866. if (do_ack) {
  867. /* Save the interrupt causes */
  868. hw->intr_cause |= hisr & HISR_INT_STS_MSK;
  869. if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
  870. hw->intr_cause |= TXE_INTR_IN_READY;
  871. mei_txe_intr_disable(dev);
  872. /* Clear the interrupts in hierarchy:
  873. * IPC and Bridge, than the High Level */
  874. mei_txe_sec_reg_write_silent(hw,
  875. SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
  876. mei_txe_br_reg_write(hw, HISR_REG, hisr);
  877. mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
  878. }
  879. out:
  880. return generated;
  881. }
  882. /**
  883. * mei_txe_irq_quick_handler - The ISR of the MEI device
  884. *
  885. * @irq: The irq number
  886. * @dev_id: pointer to the device structure
  887. *
  888. * Return: IRQ_WAKE_THREAD if interrupt is designed for the device
  889. * IRQ_NONE otherwise
  890. */
  891. irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
  892. {
  893. struct mei_device *dev = dev_id;
  894. if (mei_txe_check_and_ack_intrs(dev, true))
  895. return IRQ_WAKE_THREAD;
  896. return IRQ_NONE;
  897. }
  898. /**
  899. * mei_txe_irq_thread_handler - txe interrupt thread
  900. *
  901. * @irq: The irq number
  902. * @dev_id: pointer to the device structure
  903. *
  904. * Return: IRQ_HANDLED
  905. */
  906. irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
  907. {
  908. struct mei_device *dev = (struct mei_device *) dev_id;
  909. struct mei_txe_hw *hw = to_txe_hw(dev);
  910. struct list_head cmpl_list;
  911. s32 slots;
  912. int rets = 0;
  913. dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
  914. mei_txe_br_reg_read(hw, HHISR_REG),
  915. mei_txe_br_reg_read(hw, HISR_REG),
  916. mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
  917. /* initialize our complete list */
  918. mutex_lock(&dev->device_lock);
  919. INIT_LIST_HEAD(&cmpl_list);
  920. if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
  921. mei_txe_check_and_ack_intrs(dev, true);
  922. /* show irq events */
  923. mei_txe_pending_interrupts(dev);
  924. hw->aliveness = mei_txe_aliveness_get(dev);
  925. hw->readiness = mei_txe_readiness_get(dev);
  926. /* Readiness:
  927. * Detection of TXE driver going through reset
  928. * or TXE driver resetting the HECI interface.
  929. */
  930. if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
  931. dev_dbg(dev->dev, "Readiness Interrupt was received...\n");
  932. /* Check if SeC is going through reset */
  933. if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
  934. dev_dbg(dev->dev, "we need to start the dev.\n");
  935. dev->recvd_hw_ready = true;
  936. } else {
  937. dev->recvd_hw_ready = false;
  938. if (dev->dev_state != MEI_DEV_RESETTING) {
  939. dev_warn(dev->dev, "FW not ready: resetting.\n");
  940. schedule_work(&dev->reset_work);
  941. goto end;
  942. }
  943. }
  944. wake_up(&dev->wait_hw_ready);
  945. }
  946. /************************************************************/
  947. /* Check interrupt cause:
  948. * Aliveness: Detection of SeC acknowledge of host request that
  949. * it remain alive or host cancellation of that request.
  950. */
  951. if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
  952. /* Clear the interrupt cause */
  953. dev_dbg(dev->dev,
  954. "Aliveness Interrupt: Status: %d\n", hw->aliveness);
  955. dev->pg_event = MEI_PG_EVENT_RECEIVED;
  956. if (waitqueue_active(&hw->wait_aliveness_resp))
  957. wake_up(&hw->wait_aliveness_resp);
  958. }
  959. /* Output Doorbell:
  960. * Detection of SeC having sent output to host
  961. */
  962. slots = mei_count_full_read_slots(dev);
  963. if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
  964. /* Read from TXE */
  965. rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
  966. if (rets &&
  967. (dev->dev_state != MEI_DEV_RESETTING &&
  968. dev->dev_state != MEI_DEV_POWER_DOWN)) {
  969. dev_err(dev->dev,
  970. "mei_irq_read_handler ret = %d.\n", rets);
  971. schedule_work(&dev->reset_work);
  972. goto end;
  973. }
  974. }
  975. /* Input Ready: Detection if host can write to SeC */
  976. if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
  977. dev->hbuf_is_ready = true;
  978. hw->slots = TXE_HBUF_DEPTH;
  979. }
  980. if (hw->aliveness && dev->hbuf_is_ready) {
  981. /* get the real register value */
  982. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  983. rets = mei_irq_write_handler(dev, &cmpl_list);
  984. if (rets && rets != -EMSGSIZE)
  985. dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
  986. rets);
  987. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  988. }
  989. mei_irq_compl_handler(dev, &cmpl_list);
  990. end:
  991. dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
  992. mutex_unlock(&dev->device_lock);
  993. mei_enable_interrupts(dev);
  994. return IRQ_HANDLED;
  995. }
  996. static const struct mei_hw_ops mei_txe_hw_ops = {
  997. .host_is_ready = mei_txe_host_is_ready,
  998. .fw_status = mei_txe_fw_status,
  999. .pg_state = mei_txe_pg_state,
  1000. .hw_is_ready = mei_txe_hw_is_ready,
  1001. .hw_reset = mei_txe_hw_reset,
  1002. .hw_config = mei_txe_hw_config,
  1003. .hw_start = mei_txe_hw_start,
  1004. .pg_in_transition = mei_txe_pg_in_transition,
  1005. .pg_is_enabled = mei_txe_pg_is_enabled,
  1006. .intr_clear = mei_txe_intr_clear,
  1007. .intr_enable = mei_txe_intr_enable,
  1008. .intr_disable = mei_txe_intr_disable,
  1009. .synchronize_irq = mei_txe_synchronize_irq,
  1010. .hbuf_free_slots = mei_txe_hbuf_empty_slots,
  1011. .hbuf_is_ready = mei_txe_is_input_ready,
  1012. .hbuf_depth = mei_txe_hbuf_depth,
  1013. .write = mei_txe_write,
  1014. .rdbuf_full_slots = mei_txe_count_full_read_slots,
  1015. .read_hdr = mei_txe_read_hdr,
  1016. .read = mei_txe_read,
  1017. };
  1018. /**
  1019. * mei_txe_dev_init - allocates and initializes txe hardware specific structure
  1020. *
  1021. * @pdev: pci device
  1022. *
  1023. * Return: struct mei_device * on success or NULL
  1024. */
  1025. struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
  1026. {
  1027. struct mei_device *dev;
  1028. struct mei_txe_hw *hw;
  1029. dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
  1030. if (!dev)
  1031. return NULL;
  1032. mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops);
  1033. hw = to_txe_hw(dev);
  1034. init_waitqueue_head(&hw->wait_aliveness_resp);
  1035. return dev;
  1036. }
  1037. /**
  1038. * mei_txe_setup_satt2 - SATT2 configuration for DMA support.
  1039. *
  1040. * @dev: the device structure
  1041. * @addr: physical address start of the range
  1042. * @range: physical range size
  1043. *
  1044. * Return: 0 on success an error code otherwise
  1045. */
  1046. int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
  1047. {
  1048. struct mei_txe_hw *hw = to_txe_hw(dev);
  1049. u32 lo32 = lower_32_bits(addr);
  1050. u32 hi32 = upper_32_bits(addr);
  1051. u32 ctrl;
  1052. /* SATT is limited to 36 Bits */
  1053. if (hi32 & ~0xF)
  1054. return -EINVAL;
  1055. /* SATT has to be 16Byte aligned */
  1056. if (lo32 & 0xF)
  1057. return -EINVAL;
  1058. /* SATT range has to be 4Bytes aligned */
  1059. if (range & 0x4)
  1060. return -EINVAL;
  1061. /* SATT is limited to 32 MB range*/
  1062. if (range > SATT_RANGE_MAX)
  1063. return -EINVAL;
  1064. ctrl = SATT2_CTRL_VALID_MSK;
  1065. ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
  1066. mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
  1067. mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
  1068. mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
  1069. dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
  1070. range, lo32, ctrl);
  1071. return 0;
  1072. }