ipa_mhi_proxy.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/list.h>
  7. #include <linux/slab.h>
  8. #include <linux/device.h>
  9. #include <linux/module.h>
  10. #include <linux/mhi.h>
  11. #include "ipa_qmi_service.h"
  12. #include "../ipa_common_i.h"
  13. #include "ipa_i.h"
  14. #define IMP_DRV_NAME "ipa_mhi_proxy"
  15. #define IMP_DBG(fmt, args...) \
  16. do { \
  17. pr_debug(IMP_DRV_NAME " %s:%d " fmt, \
  18. __func__, __LINE__, ## args); \
  19. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  20. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  21. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  22. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  23. } while (0)
  24. #define IMP_DBG_LOW(fmt, args...) \
  25. do { \
  26. pr_debug(IMP_DRV_NAME " %s:%d " fmt, \
  27. __func__, __LINE__, ## args); \
  28. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  29. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  30. } while (0)
  31. #define IMP_ERR(fmt, args...) \
  32. do { \
  33. pr_err(IMP_DRV_NAME " %s:%d " fmt, \
  34. __func__, __LINE__, ## args); \
  35. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  36. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  37. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  38. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  39. } while (0)
  40. #define IMP_FUNC_ENTRY() \
  41. IMP_DBG_LOW("ENTRY\n")
  42. #define IMP_FUNC_EXIT() \
  43. IMP_DBG_LOW("EXIT\n")
  44. #define IMP_IPA_UC_UL_CH_n 0
  45. #define IMP_IPA_UC_UL_EV_n 1
  46. #define IMP_IPA_UC_DL_CH_n 2
  47. #define IMP_IPA_UC_DL_EV_n 3
  48. #define IMP_IPA_UC_m 1
  49. /* each pair of UL/DL channels are defined below */
  50. static const struct mhi_device_id mhi_driver_match_table[] = {
  51. { .chan = "IP_HW_OFFLOAD_0" },
  52. {},
  53. };
  54. static int imp_mhi_probe_cb(struct mhi_device *, const struct mhi_device_id *);
  55. static void imp_mhi_remove_cb(struct mhi_device *);
  56. static void imp_mhi_status_cb(struct mhi_device *, enum MHI_CB);
  57. static struct mhi_driver mhi_driver = {
  58. .id_table = mhi_driver_match_table,
  59. .probe = imp_mhi_probe_cb,
  60. .remove = imp_mhi_remove_cb,
  61. .status_cb = imp_mhi_status_cb,
  62. .driver = {
  63. .name = IMP_DRV_NAME,
  64. .owner = THIS_MODULE,
  65. },
  66. };
  67. struct imp_channel_context_type {
  68. u32 chstate:8;
  69. u32 brsmode:2;
  70. u32 pollcfg:6;
  71. u32 reserved:16;
  72. u32 chtype;
  73. u32 erindex;
  74. u64 rbase;
  75. u64 rlen;
  76. u64 rpp;
  77. u64 wpp;
  78. } __packed;
  79. struct imp_event_context_type {
  80. u32 reserved:8;
  81. u32 intmodc:8;
  82. u32 intmodt:16;
  83. u32 ertype;
  84. u32 msivec;
  85. u64 rbase;
  86. u64 rlen;
  87. u64 rpp;
  88. u64 wpp;
  89. } __packed;
  90. struct imp_iova_addr {
  91. dma_addr_t base;
  92. unsigned int size;
  93. };
  94. struct imp_dev_info {
  95. struct platform_device *pdev;
  96. bool smmu_enabled;
  97. struct imp_iova_addr ctrl;
  98. struct imp_iova_addr data;
  99. u32 chdb_base;
  100. u32 erdb_base;
  101. };
  102. struct imp_event_props {
  103. u16 id;
  104. phys_addr_t doorbell;
  105. u16 uc_mbox_n;
  106. struct imp_event_context_type ev_ctx;
  107. };
  108. struct imp_event {
  109. struct imp_event_props props;
  110. };
  111. struct imp_channel_props {
  112. enum dma_data_direction dir;
  113. u16 id;
  114. phys_addr_t doorbell;
  115. u16 uc_mbox_n;
  116. struct imp_channel_context_type ch_ctx;
  117. };
  118. struct imp_channel {
  119. struct imp_channel_props props;
  120. struct imp_event event;
  121. };
  122. enum imp_state {
  123. IMP_INVALID = 0,
  124. IMP_PROBED,
  125. IMP_READY,
  126. IMP_STARTED
  127. };
  128. struct imp_qmi_cache {
  129. struct ipa_mhi_ready_indication_msg_v01 ready_ind;
  130. struct ipa_mhi_alloc_channel_req_msg_v01 alloc_ch_req;
  131. struct ipa_mhi_alloc_channel_resp_msg_v01 alloc_ch_resp;
  132. struct ipa_mhi_clk_vote_resp_msg_v01 clk_vote_resp;
  133. };
  134. struct imp_mhi_driver {
  135. struct mhi_device *mhi_dev;
  136. struct imp_channel ul_chan;
  137. struct imp_channel dl_chan;
  138. };
  139. struct imp_context {
  140. struct imp_dev_info dev_info;
  141. struct imp_mhi_driver md;
  142. struct mutex mutex;
  143. struct mutex lpm_mutex;
  144. enum imp_state state;
  145. bool in_lpm;
  146. bool lpm_disabled;
  147. struct imp_qmi_cache qmi;
  148. };
  149. static struct imp_context *imp_ctx;
  150. static void _populate_smmu_info(struct ipa_mhi_ready_indication_msg_v01 *req)
  151. {
  152. req->smmu_info_valid = true;
  153. req->smmu_info.iova_ctl_base_addr = imp_ctx->dev_info.ctrl.base;
  154. req->smmu_info.iova_ctl_size = imp_ctx->dev_info.ctrl.size;
  155. req->smmu_info.iova_data_base_addr = imp_ctx->dev_info.data.base;
  156. req->smmu_info.iova_data_size = imp_ctx->dev_info.data.size;
  157. }
  158. static void imp_mhi_trigger_ready_ind(void)
  159. {
  160. struct ipa_mhi_ready_indication_msg_v01 *req
  161. = &imp_ctx->qmi.ready_ind;
  162. int ret;
  163. struct imp_channel *ch;
  164. struct ipa_mhi_ch_init_info_type_v01 *ch_info;
  165. IMP_FUNC_ENTRY();
  166. if (imp_ctx->state != IMP_PROBED) {
  167. IMP_ERR("invalid state %d\n", imp_ctx->state);
  168. goto exit;
  169. }
  170. if (imp_ctx->dev_info.smmu_enabled)
  171. _populate_smmu_info(req);
  172. req->ch_info_arr_len = 0;
  173. BUILD_BUG_ON(QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01 < 2);
  174. /* UL channel */
  175. ch = &imp_ctx->md.ul_chan;
  176. ch_info = &req->ch_info_arr[req->ch_info_arr_len];
  177. ch_info->ch_id = ch->props.id;
  178. ch_info->direction_type = ch->props.dir;
  179. ch_info->er_id = ch->event.props.id;
  180. /* uC is a doorbell proxy between local Q6 and remote Q6 */
  181. ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
  182. ipahal_get_reg_base() +
  183. ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
  184. IMP_IPA_UC_m,
  185. ch->props.uc_mbox_n);
  186. ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
  187. ipahal_get_reg_base() +
  188. ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
  189. IMP_IPA_UC_m,
  190. ch->event.props.uc_mbox_n);
  191. req->ch_info_arr_len++;
  192. /* DL channel */
  193. ch = &imp_ctx->md.dl_chan;
  194. ch_info = &req->ch_info_arr[req->ch_info_arr_len];
  195. ch_info->ch_id = ch->props.id;
  196. ch_info->direction_type = ch->props.dir;
  197. ch_info->er_id = ch->event.props.id;
  198. /* uC is a doorbell proxy between local Q6 and remote Q6 */
  199. ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
  200. ipahal_get_reg_base() +
  201. ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
  202. IMP_IPA_UC_m,
  203. ch->props.uc_mbox_n);
  204. ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
  205. ipahal_get_reg_base() +
  206. ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
  207. IMP_IPA_UC_m,
  208. ch->event.props.uc_mbox_n);
  209. req->ch_info_arr_len++;
  210. IMP_DBG("sending IND to modem\n");
  211. ret = ipa3_qmi_send_mhi_ready_indication(req);
  212. if (ret) {
  213. IMP_ERR("failed to send ready indication to modem %d\n", ret);
  214. return;
  215. }
  216. imp_ctx->state = IMP_READY;
  217. exit:
  218. IMP_FUNC_EXIT();
  219. }
  220. static struct imp_channel *imp_get_ch_by_id(u16 id)
  221. {
  222. if (imp_ctx->md.ul_chan.props.id == id)
  223. return &imp_ctx->md.ul_chan;
  224. if (imp_ctx->md.dl_chan.props.id == id)
  225. return &imp_ctx->md.dl_chan;
  226. return NULL;
  227. }
  228. static struct ipa_mhi_er_info_type_v01 *
  229. _find_ch_in_er_info_arr(struct ipa_mhi_alloc_channel_req_msg_v01 *req,
  230. u16 id)
  231. {
  232. int i;
  233. if (req->er_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01)
  234. return NULL;
  235. for (i = 0; i < req->tr_info_arr_len; i++)
  236. if (req->er_info_arr[i].er_id == id)
  237. return &req->er_info_arr[i];
  238. return NULL;
  239. }
  240. /* round addresses for closest page per SMMU requirements */
  241. static inline void imp_smmu_round_to_page(uint64_t iova, uint64_t pa,
  242. uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p)
  243. {
  244. *iova_p = rounddown(iova, PAGE_SIZE);
  245. *pa_p = rounddown(pa, PAGE_SIZE);
  246. *size_p = roundup(size + pa - *pa_p, PAGE_SIZE);
  247. }
  248. static void __map_smmu_info(struct device *dev,
  249. struct imp_iova_addr *partition, int num_mapping,
  250. struct ipa_mhi_mem_addr_info_type_v01 *map_info,
  251. bool map)
  252. {
  253. int i;
  254. struct iommu_domain *domain;
  255. unsigned long iova_p;
  256. phys_addr_t pa_p;
  257. u32 size_p;
  258. domain = iommu_get_domain_for_dev(dev);
  259. if (!domain) {
  260. IMP_ERR("domain is NULL for dev\n");
  261. return;
  262. }
  263. for (i = 0; i < num_mapping; i++) {
  264. int prot = IOMMU_READ | IOMMU_WRITE;
  265. u32 ipa_base = ipa3_ctx->ipa_wrapper_base +
  266. ipa3_ctx->ctrl->ipa_reg_base_ofst;
  267. u32 ipa_size = ipa3_ctx->ipa_wrapper_size;
  268. imp_smmu_round_to_page(map_info[i].iova, map_info[i].pa,
  269. map_info[i].size, &iova_p, &pa_p, &size_p);
  270. if (map) {
  271. /* boundary check */
  272. WARN_ON(partition->base > iova_p ||
  273. (partition->base + partition->size) <
  274. (iova_p + size_p));
  275. /* for IPA uC MBOM we need to map with device type */
  276. if (pa_p - ipa_base < ipa_size)
  277. prot |= IOMMU_MMIO;
  278. IMP_DBG("mapping 0x%lx to 0x%pa size %d\n",
  279. iova_p, &pa_p, size_p);
  280. iommu_map(domain,
  281. iova_p, pa_p, size_p, prot);
  282. } else {
  283. IMP_DBG("unmapping 0x%lx to 0x%pa size %d\n",
  284. iova_p, &pa_p, size_p);
  285. iommu_unmap(domain, iova_p, size_p);
  286. }
  287. }
  288. }
  289. static int __imp_configure_mhi_device(
  290. struct ipa_mhi_alloc_channel_req_msg_v01 *req,
  291. struct ipa_mhi_alloc_channel_resp_msg_v01 *resp)
  292. {
  293. struct mhi_buf ch_config[2];
  294. int i;
  295. struct ipa_mhi_er_info_type_v01 *er_info;
  296. struct imp_channel *ch;
  297. int ridx = 0;
  298. int ret;
  299. IMP_FUNC_ENTRY();
  300. /* configure MHI */
  301. for (i = 0; i < req->tr_info_arr_len; i++) {
  302. ch = imp_get_ch_by_id(req->tr_info_arr[i].ch_id);
  303. if (!ch) {
  304. IMP_ERR("unknown channel %d\n",
  305. req->tr_info_arr[i].ch_id);
  306. resp->alloc_resp_arr[ridx].ch_id =
  307. req->tr_info_arr[i].ch_id;
  308. resp->alloc_resp_arr[ridx].is_success = 0;
  309. ridx++;
  310. resp->alloc_resp_arr_len = ridx;
  311. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  312. /* return INCOMPATIBLE_STATE in any case */
  313. resp->resp.error =
  314. IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
  315. return -EINVAL;
  316. }
  317. /* populate CCA */
  318. if (req->tr_info_arr[i].brst_mode_type ==
  319. QMI_IPA_BURST_MODE_ENABLED_V01)
  320. ch->props.ch_ctx.brsmode = 3;
  321. else if (req->tr_info_arr[i].brst_mode_type ==
  322. QMI_IPA_BURST_MODE_DISABLED_V01)
  323. ch->props.ch_ctx.brsmode = 2;
  324. else
  325. ch->props.ch_ctx.brsmode = 0;
  326. ch->props.ch_ctx.pollcfg = req->tr_info_arr[i].poll_cfg;
  327. ch->props.ch_ctx.chtype = ch->props.dir;
  328. ch->props.ch_ctx.erindex = ch->event.props.id;
  329. ch->props.ch_ctx.rbase = req->tr_info_arr[i].ring_iova;
  330. ch->props.ch_ctx.rlen = req->tr_info_arr[i].ring_len;
  331. ch->props.ch_ctx.rpp = req->tr_info_arr[i].rp;
  332. ch->props.ch_ctx.wpp = req->tr_info_arr[i].wp;
  333. ch_config[0].buf = &ch->props.ch_ctx;
  334. ch_config[0].len = sizeof(ch->props.ch_ctx);
  335. ch_config[0].name = "CCA";
  336. /* populate ECA */
  337. er_info = _find_ch_in_er_info_arr(req, ch->event.props.id);
  338. if (!er_info) {
  339. IMP_ERR("no event ring for ch %d\n",
  340. req->tr_info_arr[i].ch_id);
  341. resp->alloc_resp_arr[ridx].ch_id =
  342. req->tr_info_arr[i].ch_id;
  343. resp->alloc_resp_arr[ridx].is_success = 0;
  344. ridx++;
  345. resp->alloc_resp_arr_len = ridx;
  346. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  347. resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
  348. return -EINVAL;
  349. }
  350. ch->event.props.ev_ctx.intmodc = er_info->intmod_count;
  351. ch->event.props.ev_ctx.intmodt = er_info->intmod_cycles;
  352. ch->event.props.ev_ctx.ertype = 1;
  353. ch->event.props.ev_ctx.msivec = er_info->msi_addr;
  354. ch->event.props.ev_ctx.rbase = er_info->ring_iova;
  355. ch->event.props.ev_ctx.rlen = er_info->ring_len;
  356. ch->event.props.ev_ctx.rpp = er_info->rp;
  357. ch->event.props.ev_ctx.wpp = er_info->wp;
  358. ch_config[1].buf = &ch->event.props.ev_ctx;
  359. ch_config[1].len = sizeof(ch->event.props.ev_ctx);
  360. ch_config[1].name = "ECA";
  361. IMP_DBG("Configuring MHI device for ch %d\n", ch->props.id);
  362. ret = mhi_device_configure(imp_ctx->md.mhi_dev, ch->props.dir,
  363. ch_config, 2);
  364. /* configure mhi-host, no need check mhi state */
  365. if (ret) {
  366. IMP_ERR("mhi_device_configure failed for ch %d\n",
  367. req->tr_info_arr[i].ch_id);
  368. resp->alloc_resp_arr[ridx].ch_id =
  369. req->tr_info_arr[i].ch_id;
  370. resp->alloc_resp_arr[ridx].is_success = 0;
  371. ridx++;
  372. resp->alloc_resp_arr_len = ridx;
  373. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  374. resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
  375. return -EINVAL;
  376. }
  377. }
  378. IMP_FUNC_EXIT();
  379. return 0;
  380. }
  381. /**
  382. * imp_handle_allocate_channel_req() - Allocate a new MHI channel
  383. *
  384. * Allocates MHI channel and start them.
  385. *
  386. * Return: QMI return codes
  387. */
  388. struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req(
  389. struct ipa_mhi_alloc_channel_req_msg_v01 *req)
  390. {
  391. int ret;
  392. struct ipa_mhi_alloc_channel_resp_msg_v01 *resp =
  393. &imp_ctx->qmi.alloc_ch_resp;
  394. IMP_FUNC_ENTRY();
  395. mutex_lock(&imp_ctx->mutex);
  396. memset(resp, 0, sizeof(*resp));
  397. if (imp_ctx->state != IMP_READY) {
  398. IMP_ERR("invalid state %d\n", imp_ctx->state);
  399. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  400. resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
  401. mutex_unlock(&imp_ctx->mutex);
  402. return resp;
  403. }
  404. /* cache the req */
  405. memcpy(&imp_ctx->qmi.alloc_ch_req, req, sizeof(*req));
  406. if (req->tr_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01) {
  407. IMP_ERR("invalid tr_info_arr_len %d\n", req->tr_info_arr_len);
  408. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  409. resp->resp.error = IPA_QMI_ERR_NO_MEMORY_V01;
  410. mutex_unlock(&imp_ctx->mutex);
  411. return resp;
  412. }
  413. if ((req->ctrl_addr_map_info_len == 0 ||
  414. req->data_addr_map_info_len == 0) &&
  415. imp_ctx->dev_info.smmu_enabled) {
  416. IMP_ERR("no mapping provided, but smmu is enabled\n");
  417. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  418. resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
  419. mutex_unlock(&imp_ctx->mutex);
  420. return resp;
  421. }
  422. if (imp_ctx->dev_info.smmu_enabled) {
  423. /* map CTRL */
  424. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  425. &imp_ctx->dev_info.ctrl,
  426. req->ctrl_addr_map_info_len,
  427. req->ctrl_addr_map_info,
  428. true);
  429. /* map DATA */
  430. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  431. &imp_ctx->dev_info.data,
  432. req->data_addr_map_info_len,
  433. req->data_addr_map_info,
  434. true);
  435. }
  436. resp->alloc_resp_arr_valid = true;
  437. ret = __imp_configure_mhi_device(req, resp);
  438. if (ret)
  439. goto fail_smmu;
  440. IMP_DBG("Starting MHI channels %d and %d\n",
  441. imp_ctx->md.ul_chan.props.id,
  442. imp_ctx->md.dl_chan.props.id);
  443. ret = mhi_prepare_for_transfer(imp_ctx->md.mhi_dev);
  444. if (ret) {
  445. IMP_ERR("mhi_prepare_for_transfer failed %d\n", ret);
  446. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  447. .ch_id = imp_ctx->md.ul_chan.props.id;
  448. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  449. .is_success = 0;
  450. resp->alloc_resp_arr_len++;
  451. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  452. .ch_id = imp_ctx->md.dl_chan.props.id;
  453. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  454. .is_success = 0;
  455. resp->alloc_resp_arr_len++;
  456. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  457. /* return INCOMPATIBLE_STATE in any case */
  458. resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
  459. goto fail_smmu;
  460. }
  461. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  462. .ch_id = imp_ctx->md.ul_chan.props.id;
  463. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  464. .is_success = 1;
  465. resp->alloc_resp_arr_len++;
  466. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  467. .ch_id = imp_ctx->md.dl_chan.props.id;
  468. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  469. .is_success = 1;
  470. resp->alloc_resp_arr_len++;
  471. imp_ctx->state = IMP_STARTED;
  472. mutex_unlock(&imp_ctx->mutex);
  473. IMP_FUNC_EXIT();
  474. resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01;
  475. return resp;
  476. fail_smmu:
  477. if (imp_ctx->dev_info.smmu_enabled) {
  478. /* unmap CTRL */
  479. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  480. &imp_ctx->dev_info.ctrl,
  481. req->ctrl_addr_map_info_len,
  482. req->ctrl_addr_map_info,
  483. false);
  484. /* unmap DATA */
  485. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  486. &imp_ctx->dev_info.data,
  487. req->data_addr_map_info_len,
  488. req->data_addr_map_info,
  489. false);
  490. }
  491. mutex_unlock(&imp_ctx->mutex);
  492. return resp;
  493. }
  494. /**
  495. * imp_handle_vote_req() - Votes for MHI / PCIe clocks
  496. *
  497. * Hold a vote to prevent / allow low power mode on MHI.
  498. *
  499. * Return: 0 on success, negative otherwise
  500. */
  501. struct ipa_mhi_clk_vote_resp_msg_v01
  502. *imp_handle_vote_req(bool vote)
  503. {
  504. int ret;
  505. struct ipa_mhi_clk_vote_resp_msg_v01 *resp =
  506. &imp_ctx->qmi.clk_vote_resp;
  507. IMP_DBG_LOW("vote %d\n", vote);
  508. memset(resp, 0, sizeof(struct ipa_mhi_clk_vote_resp_msg_v01));
  509. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  510. resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
  511. mutex_lock(&imp_ctx->mutex);
  512. if (imp_ctx->state != IMP_STARTED) {
  513. IMP_ERR("unexpected vote when in state %d\n", imp_ctx->state);
  514. mutex_unlock(&imp_ctx->mutex);
  515. return resp;
  516. }
  517. if (vote == imp_ctx->lpm_disabled) {
  518. IMP_ERR("already voted/devoted %d\n", vote);
  519. mutex_unlock(&imp_ctx->mutex);
  520. return resp;
  521. }
  522. mutex_unlock(&imp_ctx->mutex);
  523. /*
  524. * Unlock the mutex before calling into mhi for clock vote
  525. * to avoid deadlock on imp mutex.
  526. * Calls into mhi are synchronous and imp callbacks are
  527. * executed from mhi context.
  528. */
  529. if (vote) {
  530. ret = mhi_device_get_sync(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
  531. if (ret) {
  532. IMP_ERR("mhi_sync_get failed %d\n", ret);
  533. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  534. /* return INCOMPATIBLE_STATE in any case */
  535. resp->resp.error =
  536. IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
  537. return resp;
  538. }
  539. } else {
  540. mhi_device_put(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
  541. }
  542. mutex_lock(&imp_ctx->mutex);
  543. if (vote)
  544. imp_ctx->lpm_disabled = true;
  545. else
  546. imp_ctx->lpm_disabled = false;
  547. mutex_unlock(&imp_ctx->mutex);
  548. resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01;
  549. return resp;
  550. }
  551. static int imp_read_iova_from_dtsi(const char *node, struct imp_iova_addr *out)
  552. {
  553. u32 iova_mapping[2];
  554. struct device_node *of_node = imp_ctx->dev_info.pdev->dev.of_node;
  555. if (of_property_read_u32_array(of_node, node, iova_mapping, 2)) {
  556. IMP_DBG("failed to read of_node %s\n", node);
  557. return -EINVAL;
  558. }
  559. out->base = iova_mapping[0];
  560. out->size = iova_mapping[1];
  561. IMP_DBG("%s: base: 0x%pad size: 0x%x\n", node, &out->base, out->size);
  562. return 0;
  563. }
  564. static void imp_mhi_shutdown(void)
  565. {
  566. struct ipa_mhi_cleanup_req_msg_v01 req = { 0 };
  567. IMP_FUNC_ENTRY();
  568. if (imp_ctx->state == IMP_STARTED ||
  569. imp_ctx->state == IMP_READY) {
  570. req.cleanup_valid = true;
  571. req.cleanup = true;
  572. ipa3_qmi_send_mhi_cleanup_request(&req);
  573. if (imp_ctx->dev_info.smmu_enabled) {
  574. struct ipa_mhi_alloc_channel_req_msg_v01 *creq
  575. = &imp_ctx->qmi.alloc_ch_req;
  576. /* unmap CTRL */
  577. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  578. &imp_ctx->dev_info.ctrl,
  579. creq->ctrl_addr_map_info_len,
  580. creq->ctrl_addr_map_info,
  581. false);
  582. /* unmap DATA */
  583. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  584. &imp_ctx->dev_info.data,
  585. creq->data_addr_map_info_len,
  586. creq->data_addr_map_info,
  587. false);
  588. }
  589. if (imp_ctx->lpm_disabled) {
  590. mhi_device_put(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
  591. imp_ctx->lpm_disabled = false;
  592. }
  593. /* unmap MHI doorbells from IPA uC SMMU */
  594. if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
  595. struct ipa_smmu_cb_ctx *cb =
  596. ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
  597. unsigned long iova_p;
  598. phys_addr_t pa_p;
  599. u32 size_p;
  600. imp_smmu_round_to_page(imp_ctx->dev_info.chdb_base,
  601. imp_ctx->dev_info.chdb_base, PAGE_SIZE,
  602. &iova_p, &pa_p, &size_p);
  603. iommu_unmap(cb->iommu_domain, iova_p, size_p);
  604. }
  605. }
  606. if (!imp_ctx->in_lpm &&
  607. (imp_ctx->state == IMP_READY ||
  608. imp_ctx->state == IMP_STARTED)) {
  609. IMP_DBG("devote IMP with state= %d\n", imp_ctx->state);
  610. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
  611. }
  612. imp_ctx->in_lpm = false;
  613. imp_ctx->state = IMP_PROBED;
  614. IMP_FUNC_EXIT();
  615. }
  616. static int imp_mhi_probe_cb(struct mhi_device *mhi_dev,
  617. const struct mhi_device_id *id)
  618. {
  619. struct imp_channel *ch;
  620. struct imp_event *ev;
  621. int ret;
  622. IMP_FUNC_ENTRY();
  623. if (id != &mhi_driver_match_table[0]) {
  624. IMP_ERR("only chan=%s is supported for now\n",
  625. mhi_driver_match_table[0].chan);
  626. return -EPERM;
  627. }
  628. /* vote for IPA clock. IPA clock will be devoted when MHI enters LPM */
  629. IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP");
  630. imp_ctx->md.mhi_dev = mhi_dev;
  631. mutex_lock(&imp_ctx->mutex);
  632. /* store UL channel properties */
  633. ch = &imp_ctx->md.ul_chan;
  634. ev = &imp_ctx->md.ul_chan.event;
  635. ch->props.id = mhi_dev->ul_chan_id;
  636. ch->props.dir = DMA_TO_DEVICE;
  637. ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8;
  638. ch->props.uc_mbox_n = IMP_IPA_UC_UL_CH_n;
  639. IMP_DBG("ul ch id %d doorbell 0x%pa uc_mbox_n %d\n",
  640. ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n);
  641. ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell,
  642. ch->props.uc_mbox_n);
  643. if (ret)
  644. goto fail;
  645. IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell,
  646. ch->props.uc_mbox_n);
  647. ev->props.id = mhi_dev->ul_event_id;
  648. ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8;
  649. ev->props.uc_mbox_n = IMP_IPA_UC_UL_EV_n;
  650. IMP_DBG("allocated ev %d\n", ev->props.id);
  651. ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell,
  652. ev->props.uc_mbox_n);
  653. if (ret)
  654. goto fail;
  655. IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell,
  656. ev->props.uc_mbox_n);
  657. /* store DL channel properties */
  658. ch = &imp_ctx->md.dl_chan;
  659. ev = &imp_ctx->md.dl_chan.event;
  660. ch->props.dir = DMA_FROM_DEVICE;
  661. ch->props.id = mhi_dev->dl_chan_id;
  662. ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8;
  663. ch->props.uc_mbox_n = IMP_IPA_UC_DL_CH_n;
  664. IMP_DBG("dl ch id %d doorbell 0x%pa uc_mbox_n %d\n",
  665. ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n);
  666. ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell,
  667. ch->props.uc_mbox_n);
  668. if (ret)
  669. goto fail;
  670. IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell,
  671. ch->props.uc_mbox_n);
  672. ev->props.id = mhi_dev->dl_event_id;
  673. ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8;
  674. ev->props.uc_mbox_n = IMP_IPA_UC_DL_EV_n;
  675. IMP_DBG("allocated ev %d\n", ev->props.id);
  676. ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell,
  677. ev->props.uc_mbox_n);
  678. if (ret)
  679. goto fail;
  680. IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell,
  681. ev->props.uc_mbox_n);
  682. /*
  683. * Map MHI doorbells to IPA uC SMMU.
  684. * Both channel and event doorbells resides in a single page.
  685. */
  686. if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
  687. struct ipa_smmu_cb_ctx *cb =
  688. ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
  689. unsigned long iova_p;
  690. phys_addr_t pa_p;
  691. u32 size_p;
  692. imp_smmu_round_to_page(imp_ctx->dev_info.chdb_base,
  693. imp_ctx->dev_info.chdb_base, PAGE_SIZE,
  694. &iova_p, &pa_p, &size_p);
  695. ret = ipa3_iommu_map(cb->iommu_domain, iova_p, pa_p, size_p,
  696. IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
  697. if (ret)
  698. goto fail;
  699. }
  700. imp_mhi_trigger_ready_ind();
  701. mutex_unlock(&imp_ctx->mutex);
  702. IMP_FUNC_EXIT();
  703. return 0;
  704. fail:
  705. mutex_unlock(&imp_ctx->mutex);
  706. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
  707. return ret;
  708. }
  709. static void imp_mhi_remove_cb(struct mhi_device *mhi_dev)
  710. {
  711. IMP_FUNC_ENTRY();
  712. mutex_lock(&imp_ctx->mutex);
  713. imp_mhi_shutdown();
  714. mutex_unlock(&imp_ctx->mutex);
  715. IMP_FUNC_EXIT();
  716. }
  717. static void imp_mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb)
  718. {
  719. IMP_DBG("%d\n", mhi_cb);
  720. mutex_lock(&imp_ctx->lpm_mutex);
  721. if (mhi_dev != imp_ctx->md.mhi_dev) {
  722. IMP_DBG("ignoring secondary callbacks\n");
  723. mutex_unlock(&imp_ctx->lpm_mutex);
  724. return;
  725. }
  726. switch (mhi_cb) {
  727. case MHI_CB_IDLE:
  728. break;
  729. case MHI_CB_LPM_ENTER:
  730. if (imp_ctx->state == IMP_STARTED) {
  731. if (!imp_ctx->in_lpm) {
  732. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
  733. imp_ctx->in_lpm = true;
  734. } else {
  735. IMP_ERR("already in LPM\n");
  736. }
  737. }
  738. break;
  739. case MHI_CB_LPM_EXIT:
  740. if (imp_ctx->state == IMP_STARTED) {
  741. if (imp_ctx->in_lpm) {
  742. IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP");
  743. imp_ctx->in_lpm = false;
  744. } else {
  745. IMP_ERR("not in LPM\n");
  746. }
  747. }
  748. break;
  749. case MHI_CB_EE_RDDM:
  750. case MHI_CB_PENDING_DATA:
  751. default:
  752. IMP_ERR("unexpected event %d\n", mhi_cb);
  753. break;
  754. }
  755. mutex_unlock(&imp_ctx->lpm_mutex);
  756. }
  757. static int imp_probe(struct platform_device *pdev)
  758. {
  759. int ret;
  760. IMP_FUNC_ENTRY();
  761. if (ipa3_uc_state_check()) {
  762. IMP_DBG("uC not ready yet\n");
  763. return -EPROBE_DEFER;
  764. }
  765. imp_ctx->dev_info.pdev = pdev;
  766. imp_ctx->dev_info.smmu_enabled = true;
  767. ret = imp_read_iova_from_dtsi("qcom,ctrl-iova",
  768. &imp_ctx->dev_info.ctrl);
  769. if (ret)
  770. imp_ctx->dev_info.smmu_enabled = false;
  771. ret = imp_read_iova_from_dtsi("qcom,data-iova",
  772. &imp_ctx->dev_info.data);
  773. if (ret)
  774. imp_ctx->dev_info.smmu_enabled = false;
  775. IMP_DBG("smmu_enabled=%d\n", imp_ctx->dev_info.smmu_enabled);
  776. if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base",
  777. &imp_ctx->dev_info.chdb_base)) {
  778. IMP_ERR("failed to read of_node %s\n", "qcom,mhi-chdb-base");
  779. return -EINVAL;
  780. }
  781. IMP_DBG("chdb-base=0x%x\n", imp_ctx->dev_info.chdb_base);
  782. if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base",
  783. &imp_ctx->dev_info.erdb_base)) {
  784. IMP_ERR("failed to read of_node %s\n", "qcom,mhi-erdb-base");
  785. return -EINVAL;
  786. }
  787. IMP_DBG("erdb-base=0x%x\n", imp_ctx->dev_info.erdb_base);
  788. imp_ctx->state = IMP_PROBED;
  789. ret = mhi_driver_register(&mhi_driver);
  790. if (ret) {
  791. IMP_ERR("mhi_driver_register failed %d\n", ret);
  792. mutex_unlock(&imp_ctx->mutex);
  793. return ret;
  794. }
  795. IMP_FUNC_EXIT();
  796. return 0;
  797. }
  798. static int imp_remove(struct platform_device *pdev)
  799. {
  800. IMP_FUNC_ENTRY();
  801. mhi_driver_unregister(&mhi_driver);
  802. mutex_lock(&imp_ctx->mutex);
  803. if (!imp_ctx->in_lpm && (imp_ctx->state == IMP_READY ||
  804. imp_ctx->state == IMP_STARTED)) {
  805. IMP_DBG("devote IMP with state= %d\n", imp_ctx->state);
  806. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
  807. }
  808. imp_ctx->lpm_disabled = false;
  809. imp_ctx->state = IMP_INVALID;
  810. mutex_unlock(&imp_ctx->mutex);
  811. mutex_lock(&imp_ctx->lpm_mutex);
  812. imp_ctx->in_lpm = false;
  813. mutex_unlock(&imp_ctx->lpm_mutex);
  814. return 0;
  815. }
  816. static const struct of_device_id imp_dt_match[] = {
  817. { .compatible = "qcom,ipa-mhi-proxy" },
  818. {},
  819. };
  820. MODULE_DEVICE_TABLE(of, imp_dt_match);
  821. static struct platform_driver ipa_mhi_proxy_driver = {
  822. .driver = {
  823. .name = "ipa_mhi_proxy",
  824. .of_match_table = imp_dt_match,
  825. },
  826. .probe = imp_probe,
  827. .remove = imp_remove,
  828. };
  829. /**
  830. * imp_handle_modem_ready() - Registers IMP as a platform device
  831. *
  832. * This function is called after modem is loaded and QMI handshake is done.
  833. * IMP will register itself as a platform device, and on support device the
  834. * probe function will get called.
  835. *
  836. * Return: None
  837. */
  838. void imp_handle_modem_ready(void)
  839. {
  840. if (!imp_ctx) {
  841. imp_ctx = kzalloc(sizeof(*imp_ctx), GFP_KERNEL);
  842. if (!imp_ctx)
  843. return;
  844. mutex_init(&imp_ctx->mutex);
  845. mutex_init(&imp_ctx->lpm_mutex);
  846. }
  847. if (imp_ctx->state != IMP_INVALID) {
  848. IMP_ERR("unexpected state %d\n", imp_ctx->state);
  849. return;
  850. }
  851. IMP_DBG("register platform device\n");
  852. platform_driver_register(&ipa_mhi_proxy_driver);
  853. }
  854. /**
  855. * imp_handle_modem_shutdown() - Handles modem SSR
  856. *
  857. * Performs MHI cleanup when modem is going to SSR (Subsystem Restart).
  858. *
  859. * Return: None
  860. */
  861. void imp_handle_modem_shutdown(void)
  862. {
  863. IMP_FUNC_ENTRY();
  864. if (!imp_ctx)
  865. return;
  866. mutex_lock(&imp_ctx->mutex);
  867. if (imp_ctx->state == IMP_INVALID) {
  868. mutex_unlock(&imp_ctx->mutex);
  869. return;
  870. }
  871. if (imp_ctx->state == IMP_STARTED) {
  872. mhi_unprepare_from_transfer(imp_ctx->md.mhi_dev);
  873. imp_ctx->state = IMP_READY;
  874. }
  875. if (imp_ctx->state == IMP_READY) {
  876. if (imp_ctx->dev_info.smmu_enabled) {
  877. struct ipa_mhi_alloc_channel_req_msg_v01 *creq
  878. = &imp_ctx->qmi.alloc_ch_req;
  879. /* unmap CTRL */
  880. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  881. &imp_ctx->dev_info.ctrl,
  882. creq->ctrl_addr_map_info_len,
  883. creq->ctrl_addr_map_info,
  884. false);
  885. /* unmap DATA */
  886. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  887. &imp_ctx->dev_info.data,
  888. creq->data_addr_map_info_len,
  889. creq->data_addr_map_info,
  890. false);
  891. }
  892. }
  893. mutex_unlock(&imp_ctx->mutex);
  894. IMP_FUNC_EXIT();
  895. platform_driver_unregister(&ipa_mhi_proxy_driver);
  896. }
  897. MODULE_LICENSE("GPL v2");
  898. MODULE_DESCRIPTION("IPA MHI Proxy Driver");