boot.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. *
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/device.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/firmware.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/list.h>
  13. #include <linux/mhi.h>
  14. #include <linux/module.h>
  15. #include <linux/random.h>
  16. #include <linux/slab.h>
  17. #include <linux/wait.h>
  18. #include "internal.h"
  19. /* Setup RDDM vector table for RDDM transfer and program RXVEC */
  20. int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
  21. struct image_info *img_info)
  22. {
  23. struct mhi_buf *mhi_buf = img_info->mhi_buf;
  24. struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
  25. void __iomem *base = mhi_cntrl->bhie;
  26. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  27. u32 sequence_id;
  28. unsigned int i;
  29. int ret;
  30. for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
  31. bhi_vec->dma_addr = mhi_buf->dma_addr;
  32. bhi_vec->size = mhi_buf->len;
  33. }
  34. MHI_VERB(dev, "BHIe programming for RDDM\n");
  35. mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
  36. upper_32_bits(mhi_buf->dma_addr));
  37. mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
  38. lower_32_bits(mhi_buf->dma_addr));
  39. mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
  40. sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
  41. ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
  42. BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
  43. if (ret) {
  44. MHI_ERR(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
  45. return ret;
  46. }
  47. MHI_VERB(dev, "Address: %p and len: 0x%zx sequence: %u\n",
  48. &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
  49. return 0;
  50. }
  51. /* check RDDM image is downloaded */
  52. int mhi_rddm_download_status(struct mhi_controller *mhi_cntrl)
  53. {
  54. u32 rx_status;
  55. enum mhi_ee_type ee;
  56. const u32 delayus = 5000;
  57. void __iomem *base = mhi_cntrl->bhie;
  58. u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
  59. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  60. int ret = 0;
  61. while (retry--) {
  62. ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
  63. BHIE_RXVECSTATUS_STATUS_BMSK,
  64. &rx_status);
  65. if (ret)
  66. return -EIO;
  67. if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
  68. MHI_LOG(dev, "RDDM dumps collected successfully");
  69. return 0;
  70. }
  71. udelay(delayus);
  72. }
  73. ee = mhi_get_exec_env(mhi_cntrl);
  74. ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
  75. MHI_ERR(dev, "ret: %d, RXVEC_STATUS: 0x%x, EE:%s\n", ret, rx_status,
  76. TO_MHI_EXEC_STR(ee));
  77. return -EIO;
  78. }
  79. /* Collect RDDM buffer during kernel panic */
  80. static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
  81. {
  82. int ret;
  83. enum mhi_ee_type ee;
  84. const u32 delayus = 2000;
  85. const u32 rddm_timeout_us = 200000;
  86. int rddm_retry = rddm_timeout_us / delayus;
  87. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  88. MHI_VERB(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
  89. to_mhi_pm_state_str(mhi_cntrl->pm_state),
  90. mhi_state_str(mhi_cntrl->dev_state),
  91. TO_MHI_EXEC_STR(mhi_cntrl->ee));
  92. /*
  93. * This should only be executing during a kernel panic, we expect all
  94. * other cores to shutdown while we're collecting RDDM buffer. After
  95. * returning from this function, we expect the device to reset.
  96. *
  97. * Normaly, we read/write pm_state only after grabbing the
  98. * pm_lock, since we're in a panic, skipping it. Also there is no
  99. * gurantee that this state change would take effect since
  100. * we're setting it w/o grabbing pm_lock
  101. */
  102. mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
  103. /* update should take the effect immediately */
  104. smp_wmb();
  105. /*
  106. * Make sure device is not already in RDDM. In case the device asserts
  107. * and a kernel panic follows, device will already be in RDDM.
  108. * Do not trigger SYS ERR again and proceed with waiting for
  109. * image download completion.
  110. */
  111. ee = mhi_get_exec_env(mhi_cntrl);
  112. if (ee == MHI_EE_MAX)
  113. goto error_exit_rddm;
  114. if (ee != MHI_EE_RDDM) {
  115. MHI_VERB(dev, "Trigger device into RDDM mode using SYS ERR\n");
  116. mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
  117. MHI_VERB(dev, "Waiting for device to enter RDDM\n");
  118. while (rddm_retry--) {
  119. ee = mhi_get_exec_env(mhi_cntrl);
  120. if (ee == MHI_EE_RDDM)
  121. break;
  122. udelay(delayus);
  123. }
  124. if (rddm_retry <= 0) {
  125. /* Hardware reset so force device to enter RDDM */
  126. MHI_VERB(dev,
  127. "Did not enter RDDM, do a host req reset\n");
  128. mhi_soc_reset(mhi_cntrl);
  129. udelay(delayus);
  130. }
  131. ee = mhi_get_exec_env(mhi_cntrl);
  132. }
  133. MHI_VERB(dev,
  134. "Waiting for RDDM image download via BHIe, current EE:%s\n",
  135. TO_MHI_EXEC_STR(ee));
  136. ret = mhi_rddm_download_status(mhi_cntrl);
  137. if (!ret) {
  138. MHI_LOG(dev, "RDDM dumps collected successfully");
  139. return 0;
  140. }
  141. error_exit_rddm:
  142. MHI_ERR(dev, "RDDM transfer failed. Current EE: %s\n",
  143. TO_MHI_EXEC_STR(ee));
  144. return -EIO;
  145. }
  146. /* Download RDDM image from device */
  147. int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
  148. {
  149. void __iomem *base = mhi_cntrl->bhie;
  150. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  151. u32 rx_status;
  152. if (in_panic)
  153. return __mhi_download_rddm_in_panic(mhi_cntrl);
  154. MHI_VERB(dev, "Waiting for RDDM image download via BHIe\n");
  155. /* Wait for the image download to complete */
  156. wait_event_timeout(mhi_cntrl->state_event,
  157. mhi_read_reg_field(mhi_cntrl, base,
  158. BHIE_RXVECSTATUS_OFFS,
  159. BHIE_RXVECSTATUS_STATUS_BMSK,
  160. &rx_status) || rx_status,
  161. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  162. MHI_VERB(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
  163. return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
  164. }
  165. EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
  166. static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
  167. const struct mhi_buf *mhi_buf)
  168. {
  169. void __iomem *base = mhi_cntrl->bhie;
  170. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  171. rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
  172. u32 tx_status;
  173. int ret;
  174. read_lock_bh(pm_lock);
  175. if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
  176. read_unlock_bh(pm_lock);
  177. return -EIO;
  178. }
  179. mhi_cntrl->session_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
  180. MHI_VERB(dev, "Starting image download via BHIe. Sequence ID: %u\n",
  181. mhi_cntrl->session_id);
  182. mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
  183. upper_32_bits(mhi_buf->dma_addr));
  184. mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
  185. lower_32_bits(mhi_buf->dma_addr));
  186. mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
  187. ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
  188. BHIE_TXVECDB_SEQNUM_BMSK, mhi_cntrl->session_id);
  189. read_unlock_bh(pm_lock);
  190. if (ret)
  191. return ret;
  192. /* Wait for the image download to complete */
  193. ret = wait_event_timeout(mhi_cntrl->state_event,
  194. MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
  195. mhi_read_reg_field(mhi_cntrl, base,
  196. BHIE_TXVECSTATUS_OFFS,
  197. BHIE_TXVECSTATUS_STATUS_BMSK,
  198. &tx_status) || tx_status,
  199. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  200. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
  201. tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
  202. return -EIO;
  203. return (!ret) ? -ETIMEDOUT : 0;
  204. }
  205. static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
  206. dma_addr_t dma_addr,
  207. size_t size)
  208. {
  209. u32 tx_status, val, session_id;
  210. int i, ret;
  211. void __iomem *base = mhi_cntrl->bhi;
  212. rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
  213. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  214. struct {
  215. char *name;
  216. u32 offset;
  217. } error_reg[] = {
  218. { "ERROR_CODE", BHI_ERRCODE },
  219. { "ERROR_DBG1", BHI_ERRDBG1 },
  220. { "ERROR_DBG2", BHI_ERRDBG2 },
  221. { "ERROR_DBG3", BHI_ERRDBG3 },
  222. { NULL },
  223. };
  224. read_lock_bh(pm_lock);
  225. if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
  226. read_unlock_bh(pm_lock);
  227. goto invalid_pm_state;
  228. }
  229. session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
  230. MHI_VERB(dev, "Starting image download via BHI. Session ID: %u\n",
  231. session_id);
  232. mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
  233. mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
  234. upper_32_bits(dma_addr));
  235. mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
  236. lower_32_bits(dma_addr));
  237. mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
  238. mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
  239. read_unlock_bh(pm_lock);
  240. /* Wait for the image download to complete */
  241. ret = wait_event_timeout(mhi_cntrl->state_event,
  242. MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
  243. mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
  244. BHI_STATUS_MASK, &tx_status) || tx_status,
  245. msecs_to_jiffies(mhi_cntrl->timeout_ms));
  246. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
  247. goto invalid_pm_state;
  248. if (tx_status == BHI_STATUS_ERROR) {
  249. MHI_ERR(dev, "Image transfer failed\n");
  250. read_lock_bh(pm_lock);
  251. if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
  252. for (i = 0; error_reg[i].name; i++) {
  253. ret = mhi_read_reg(mhi_cntrl, base,
  254. error_reg[i].offset, &val);
  255. if (ret)
  256. break;
  257. MHI_ERR(dev, "Reg: %s value: 0x%x\n",
  258. error_reg[i].name, val);
  259. }
  260. }
  261. read_unlock_bh(pm_lock);
  262. goto invalid_pm_state;
  263. }
  264. return (!ret) ? -ETIMEDOUT : 0;
  265. invalid_pm_state:
  266. return -EIO;
  267. }
  268. void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
  269. struct image_info **image_info)
  270. {
  271. int i;
  272. struct mhi_buf *mhi_buf = (*image_info)->mhi_buf;
  273. if (mhi_cntrl->img_pre_alloc)
  274. return;
  275. for (i = 0; i < (*image_info)->entries; i++, mhi_buf++)
  276. dma_free_attrs(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf,
  277. mhi_buf->dma_addr, DMA_ATTR_FORCE_CONTIGUOUS);
  278. kfree((*image_info)->mhi_buf);
  279. kfree(*image_info);
  280. *image_info = NULL;
  281. }
  282. int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
  283. struct image_info **image_info,
  284. size_t alloc_size)
  285. {
  286. size_t seg_size = mhi_cntrl->seg_len;
  287. int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
  288. int i;
  289. struct image_info *img_info;
  290. struct mhi_buf *mhi_buf;
  291. if (mhi_cntrl->img_pre_alloc)
  292. return 0;
  293. img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
  294. if (!img_info)
  295. return -ENOMEM;
  296. /* Allocate memory for entries */
  297. img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
  298. GFP_KERNEL);
  299. if (!img_info->mhi_buf)
  300. goto error_alloc_mhi_buf;
  301. /* Allocate and populate vector table */
  302. mhi_buf = img_info->mhi_buf;
  303. for (i = 0; i < segments; i++, mhi_buf++) {
  304. size_t vec_size = seg_size;
  305. /* Vector table is the last entry */
  306. if (i == segments - 1)
  307. vec_size = sizeof(struct bhi_vec_entry) * i;
  308. mhi_buf->len = vec_size;
  309. mhi_buf->buf = dma_alloc_attrs(mhi_cntrl->cntrl_dev, vec_size,
  310. &mhi_buf->dma_addr, GFP_KERNEL,
  311. DMA_ATTR_FORCE_CONTIGUOUS);
  312. if (!mhi_buf->buf)
  313. goto error_alloc_segment;
  314. }
  315. img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
  316. img_info->entries = segments;
  317. *image_info = img_info;
  318. return 0;
  319. error_alloc_segment:
  320. for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
  321. dma_free_attrs(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf,
  322. mhi_buf->dma_addr, DMA_ATTR_FORCE_CONTIGUOUS);
  323. error_alloc_mhi_buf:
  324. kfree(img_info);
  325. return -ENOMEM;
  326. }
  327. static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
  328. const u8 *img_buf,
  329. size_t img_size,
  330. struct image_info *img_info)
  331. {
  332. size_t remainder = img_size;
  333. size_t to_cpy;
  334. const u8 *buf = img_buf;
  335. struct mhi_buf *mhi_buf = img_info->mhi_buf;
  336. struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
  337. while (remainder) {
  338. to_cpy = min(remainder, mhi_buf->len);
  339. memcpy(mhi_buf->buf, buf, to_cpy);
  340. bhi_vec->dma_addr = mhi_buf->dma_addr;
  341. bhi_vec->size = to_cpy;
  342. buf += to_cpy;
  343. remainder -= to_cpy;
  344. bhi_vec++;
  345. mhi_buf++;
  346. }
  347. }
  348. void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
  349. {
  350. const struct firmware *firmware = NULL;
  351. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  352. enum mhi_pm_state new_state;
  353. const char *fw_name;
  354. void *buf;
  355. dma_addr_t dma_addr;
  356. size_t size, img_size;
  357. int i, ret;
  358. const u8 *img_buf;
  359. if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
  360. MHI_ERR(dev, "Device MHI is not in valid state\n");
  361. return;
  362. }
  363. /* save hardware info from BHI */
  364. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
  365. &mhi_cntrl->serial_number);
  366. if (ret)
  367. MHI_ERR(dev, "Could not capture serial number via BHI\n");
  368. for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
  369. ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
  370. &mhi_cntrl->oem_pk_hash[i]);
  371. if (ret) {
  372. MHI_ERR(dev, "Could not capture OEM PK HASH via BHI\n");
  373. break;
  374. }
  375. }
  376. /* wait for ready on pass through or any other execution environment */
  377. if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
  378. goto fw_load_ready_state;
  379. fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
  380. mhi_cntrl->edl_image : mhi_cntrl->fw_image;
  381. if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
  382. !mhi_cntrl->seg_len))) {
  383. MHI_ERR(dev,
  384. "No firmware image defined or !sbl_size || !seg_len\n");
  385. goto error_fw_load;
  386. }
  387. ret = request_firmware(&firmware, fw_name, dev->parent);
  388. if (ret) {
  389. if (!mhi_cntrl->fallback_fw_image) {
  390. MHI_ERR(dev, "Error loading firmware: %d\n", ret);
  391. goto error_fw_load;
  392. }
  393. ret = request_firmware(&firmware,
  394. mhi_cntrl->fallback_fw_image,
  395. dev->parent);
  396. if (ret) {
  397. MHI_ERR(dev, "Error loading fallback firmware: %d\n",
  398. ret);
  399. goto error_fw_load;
  400. }
  401. mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FALLBACK_IMG);
  402. }
  403. size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
  404. /* SBL size provided is maximum size, not necessarily the image size */
  405. if (size > firmware->size)
  406. size = firmware->size;
  407. buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
  408. GFP_KERNEL);
  409. if (!buf) {
  410. release_firmware(firmware);
  411. goto error_fw_load;
  412. }
  413. /* Download image using BHI */
  414. memcpy(buf, firmware->data, size);
  415. ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
  416. dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
  417. /* Error or in EDL mode, we're done */
  418. if (ret) {
  419. MHI_ERR(dev, "MHI did not load image over BHI, ret: %d\n", ret);
  420. release_firmware(firmware);
  421. goto error_fw_load;
  422. }
  423. /* Wait for ready since EDL image was loaded */
  424. if (fw_name == mhi_cntrl->edl_image) {
  425. release_firmware(firmware);
  426. goto fw_load_ready_state;
  427. }
  428. write_lock_irq(&mhi_cntrl->pm_lock);
  429. mhi_cntrl->dev_state = MHI_STATE_RESET;
  430. write_unlock_irq(&mhi_cntrl->pm_lock);
  431. /*
  432. * If we're doing fbc, populate vector tables while
  433. * device transitioning into MHI READY state
  434. */
  435. if (mhi_cntrl->fbc_download) {
  436. img_size = firmware->size;
  437. img_buf = firmware->data;
  438. MHI_LOG(dev, "tme_supported_image:%s\n",
  439. (mhi_cntrl->tme_supported_image ? "True" : "False"));
  440. if (mhi_cntrl->tme_supported_image) {
  441. img_buf = firmware->data + mhi_cntrl->sbl_size;
  442. img_size = img_size - mhi_cntrl->sbl_size;
  443. }
  444. ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, img_size);
  445. if (ret) {
  446. release_firmware(firmware);
  447. goto error_fw_load;
  448. }
  449. /* Load the firmware into BHIE vec table */
  450. mhi_firmware_copy(mhi_cntrl, img_buf, img_size, mhi_cntrl->fbc_image);
  451. }
  452. release_firmware(firmware);
  453. fw_load_ready_state:
  454. /* Transitioning into MHI RESET->READY state */
  455. ret = mhi_ready_state_transition(mhi_cntrl);
  456. if (ret) {
  457. MHI_ERR(dev, "MHI did not enter READY state\n");
  458. goto error_ready_state;
  459. }
  460. MHI_LOG(dev, "Wait for device to enter SBL or Mission mode\n");
  461. return;
  462. error_ready_state:
  463. if (mhi_cntrl->fbc_download)
  464. mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
  465. error_fw_load:
  466. write_lock_irq(&mhi_cntrl->pm_lock);
  467. new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
  468. write_unlock_irq(&mhi_cntrl->pm_lock);
  469. if (new_state == MHI_PM_FW_DL_ERR)
  470. wake_up_all(&mhi_cntrl->state_event);
  471. }
  472. int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
  473. {
  474. struct image_info *image_info = mhi_cntrl->fbc_image;
  475. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  476. enum mhi_pm_state new_state;
  477. int ret;
  478. if (!image_info)
  479. return -EIO;
  480. ret = mhi_fw_load_bhie(mhi_cntrl,
  481. /* Vector table is the last entry */
  482. &image_info->mhi_buf[image_info->entries - 1]);
  483. if (ret) {
  484. MHI_ERR(dev, "MHI did not load AMSS, ret:%d\n", ret);
  485. write_lock_irq(&mhi_cntrl->pm_lock);
  486. new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
  487. write_unlock_irq(&mhi_cntrl->pm_lock);
  488. if (new_state == MHI_PM_FW_DL_ERR)
  489. wake_up_all(&mhi_cntrl->state_event);
  490. }
  491. return ret;
  492. }