ipa_mhi_proxy.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/list.h>
  7. #include <linux/slab.h>
  8. #include <linux/device.h>
  9. #include <linux/module.h>
  10. #include <linux/mhi.h>
  11. #include "ipa_qmi_service.h"
  12. #include "../ipa_common_i.h"
  13. #include "ipa_i.h"
  14. #define IMP_DRV_NAME "ipa_mhi_proxy"
  15. #define IMP_DBG(fmt, args...) \
  16. do { \
  17. pr_debug(IMP_DRV_NAME " %s:%d " fmt, \
  18. __func__, __LINE__, ## args); \
  19. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  20. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  21. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  22. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  23. } while (0)
  24. #define IMP_DBG_LOW(fmt, args...) \
  25. do { \
  26. pr_debug(IMP_DRV_NAME " %s:%d " fmt, \
  27. __func__, __LINE__, ## args); \
  28. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  29. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  30. } while (0)
  31. #define IMP_ERR(fmt, args...) \
  32. do { \
  33. pr_err(IMP_DRV_NAME " %s:%d " fmt, \
  34. __func__, __LINE__, ## args); \
  35. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  36. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  37. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  38. IMP_DRV_NAME " %s:%d " fmt, ## args); \
  39. } while (0)
  40. #define IMP_FUNC_ENTRY() \
  41. IMP_DBG_LOW("ENTRY\n")
  42. #define IMP_FUNC_EXIT() \
  43. IMP_DBG_LOW("EXIT\n")
  44. #define IMP_IPA_UC_UL_CH_n 0
  45. #define IMP_IPA_UC_UL_EV_n 1
  46. #define IMP_IPA_UC_DL_CH_n 2
  47. #define IMP_IPA_UC_DL_EV_n 3
  48. #define IMP_IPA_UC_m 1
  49. /* each pair of UL/DL channels are defined below */
  50. static const struct mhi_device_id mhi_driver_match_table[] = {
  51. { .chan = "IP_HW_OFFLOAD_0" },
  52. {},
  53. };
  54. static int imp_mhi_probe_cb(struct mhi_device *, const struct mhi_device_id *);
  55. static void imp_mhi_remove_cb(struct mhi_device *);
  56. static void imp_mhi_status_cb(struct mhi_device *, enum MHI_CB);
  57. static struct mhi_driver mhi_driver = {
  58. .id_table = mhi_driver_match_table,
  59. .probe = imp_mhi_probe_cb,
  60. .remove = imp_mhi_remove_cb,
  61. .status_cb = imp_mhi_status_cb,
  62. .driver = {
  63. .name = IMP_DRV_NAME,
  64. .owner = THIS_MODULE,
  65. },
  66. };
  67. struct imp_channel_context_type {
  68. u32 chstate:8;
  69. u32 brsmode:2;
  70. u32 pollcfg:6;
  71. u32 reserved:16;
  72. u32 chtype;
  73. u32 erindex;
  74. u64 rbase;
  75. u64 rlen;
  76. u64 rpp;
  77. u64 wpp;
  78. } __packed;
  79. struct imp_event_context_type {
  80. u32 reserved:8;
  81. u32 intmodc:8;
  82. u32 intmodt:16;
  83. u32 ertype;
  84. u32 msivec;
  85. u64 rbase;
  86. u64 rlen;
  87. u64 rpp;
  88. u64 wpp;
  89. } __packed;
  90. struct imp_iova_addr {
  91. dma_addr_t base;
  92. unsigned int size;
  93. };
  94. struct imp_dev_info {
  95. struct platform_device *pdev;
  96. bool smmu_enabled;
  97. struct imp_iova_addr ctrl;
  98. struct imp_iova_addr data;
  99. u32 chdb_base;
  100. u32 erdb_base;
  101. };
  102. struct imp_event_props {
  103. u16 id;
  104. phys_addr_t doorbell;
  105. u16 uc_mbox_n;
  106. struct imp_event_context_type ev_ctx;
  107. };
  108. struct imp_event {
  109. struct imp_event_props props;
  110. };
  111. struct imp_channel_props {
  112. enum dma_data_direction dir;
  113. u16 id;
  114. phys_addr_t doorbell;
  115. u16 uc_mbox_n;
  116. struct imp_channel_context_type ch_ctx;
  117. };
  118. struct imp_channel {
  119. struct imp_channel_props props;
  120. struct imp_event event;
  121. };
  122. enum imp_state {
  123. IMP_INVALID = 0,
  124. IMP_PROBED,
  125. IMP_READY,
  126. IMP_STARTED
  127. };
  128. struct imp_qmi_cache {
  129. struct ipa_mhi_ready_indication_msg_v01 ready_ind;
  130. struct ipa_mhi_alloc_channel_req_msg_v01 alloc_ch_req;
  131. struct ipa_mhi_alloc_channel_resp_msg_v01 alloc_ch_resp;
  132. struct ipa_mhi_clk_vote_resp_msg_v01 clk_vote_resp;
  133. };
  134. struct imp_mhi_driver {
  135. struct mhi_device *mhi_dev;
  136. struct imp_channel ul_chan;
  137. struct imp_channel dl_chan;
  138. };
  139. struct imp_context {
  140. struct imp_dev_info dev_info;
  141. struct imp_mhi_driver md;
  142. struct mutex mutex;
  143. struct mutex lpm_mutex;
  144. enum imp_state state;
  145. bool in_lpm;
  146. bool lpm_disabled;
  147. struct imp_qmi_cache qmi;
  148. };
  149. static struct imp_context *imp_ctx;
  150. static void _populate_smmu_info(struct ipa_mhi_ready_indication_msg_v01 *req)
  151. {
  152. req->smmu_info_valid = true;
  153. req->smmu_info.iova_ctl_base_addr = imp_ctx->dev_info.ctrl.base;
  154. req->smmu_info.iova_ctl_size = imp_ctx->dev_info.ctrl.size;
  155. req->smmu_info.iova_data_base_addr = imp_ctx->dev_info.data.base;
  156. req->smmu_info.iova_data_size = imp_ctx->dev_info.data.size;
  157. }
  158. static void imp_mhi_trigger_ready_ind(void)
  159. {
  160. struct ipa_mhi_ready_indication_msg_v01 *req
  161. = &imp_ctx->qmi.ready_ind;
  162. int ret;
  163. struct imp_channel *ch;
  164. struct ipa_mhi_ch_init_info_type_v01 *ch_info;
  165. IMP_FUNC_ENTRY();
  166. if (imp_ctx->state != IMP_PROBED) {
  167. IMP_ERR("invalid state %d\n", imp_ctx->state);
  168. goto exit;
  169. }
  170. if (imp_ctx->dev_info.smmu_enabled)
  171. _populate_smmu_info(req);
  172. req->ch_info_arr_len = 0;
  173. BUILD_BUG_ON(QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01 < 2);
  174. /* UL channel */
  175. ch = &imp_ctx->md.ul_chan;
  176. ch_info = &req->ch_info_arr[req->ch_info_arr_len];
  177. ch_info->ch_id = ch->props.id;
  178. ch_info->direction_type = ch->props.dir;
  179. ch_info->er_id = ch->event.props.id;
  180. /* uC is a doorbell proxy between local Q6 and remote Q6 */
  181. ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
  182. ipahal_get_reg_base() +
  183. ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
  184. IMP_IPA_UC_m,
  185. ch->props.uc_mbox_n);
  186. ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
  187. ipahal_get_reg_base() +
  188. ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
  189. IMP_IPA_UC_m,
  190. ch->event.props.uc_mbox_n);
  191. req->ch_info_arr_len++;
  192. /* DL channel */
  193. ch = &imp_ctx->md.dl_chan;
  194. ch_info = &req->ch_info_arr[req->ch_info_arr_len];
  195. ch_info->ch_id = ch->props.id;
  196. ch_info->direction_type = ch->props.dir;
  197. ch_info->er_id = ch->event.props.id;
  198. /* uC is a doorbell proxy between local Q6 and remote Q6 */
  199. ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
  200. ipahal_get_reg_base() +
  201. ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
  202. IMP_IPA_UC_m,
  203. ch->props.uc_mbox_n);
  204. ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
  205. ipahal_get_reg_base() +
  206. ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
  207. IMP_IPA_UC_m,
  208. ch->event.props.uc_mbox_n);
  209. req->ch_info_arr_len++;
  210. IMP_DBG("sending IND to modem\n");
  211. ret = ipa3_qmi_send_mhi_ready_indication(req);
  212. if (ret) {
  213. IMP_ERR("failed to send ready indication to modem %d\n", ret);
  214. return;
  215. }
  216. imp_ctx->state = IMP_READY;
  217. exit:
  218. IMP_FUNC_EXIT();
  219. }
  220. static struct imp_channel *imp_get_ch_by_id(u16 id)
  221. {
  222. if (imp_ctx->md.ul_chan.props.id == id)
  223. return &imp_ctx->md.ul_chan;
  224. if (imp_ctx->md.dl_chan.props.id == id)
  225. return &imp_ctx->md.dl_chan;
  226. return NULL;
  227. }
  228. static struct ipa_mhi_er_info_type_v01 *
  229. _find_ch_in_er_info_arr(struct ipa_mhi_alloc_channel_req_msg_v01 *req,
  230. u16 id)
  231. {
  232. int i;
  233. if (req->er_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01)
  234. return NULL;
  235. for (i = 0; i < req->tr_info_arr_len; i++)
  236. if (req->er_info_arr[i].er_id == id)
  237. return &req->er_info_arr[i];
  238. return NULL;
  239. }
  240. /* round addresses for closest page per SMMU requirements */
  241. static inline void imp_smmu_round_to_page(uint64_t iova, uint64_t pa,
  242. uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p)
  243. {
  244. *iova_p = rounddown(iova, PAGE_SIZE);
  245. *pa_p = rounddown(pa, PAGE_SIZE);
  246. *size_p = roundup(size + pa - *pa_p, PAGE_SIZE);
  247. }
  248. static void __map_smmu_info(struct device *dev,
  249. struct imp_iova_addr *partition, int num_mapping,
  250. struct ipa_mhi_mem_addr_info_type_v01 *map_info,
  251. bool map)
  252. {
  253. int i;
  254. struct iommu_domain *domain;
  255. unsigned long iova_p;
  256. phys_addr_t pa_p;
  257. u32 size_p;
  258. domain = iommu_get_domain_for_dev(dev);
  259. if (!domain) {
  260. IMP_ERR("domain is NULL for dev\n");
  261. return;
  262. }
  263. for (i = 0; i < num_mapping; i++) {
  264. int prot = IOMMU_READ | IOMMU_WRITE;
  265. u32 ipa_base = ipa3_ctx->ipa_wrapper_base +
  266. ipa3_ctx->ctrl->ipa_reg_base_ofst;
  267. u32 ipa_size = ipa3_ctx->ipa_wrapper_size;
  268. imp_smmu_round_to_page(map_info[i].iova, map_info[i].pa,
  269. map_info[i].size, &iova_p, &pa_p, &size_p);
  270. if (map) {
  271. /* boundary check */
  272. WARN_ON(partition->base > iova_p ||
  273. (partition->base + partition->size) <
  274. (iova_p + size_p));
  275. /* for IPA uC MBOM we need to map with device type */
  276. if (pa_p - ipa_base < ipa_size)
  277. prot |= IOMMU_MMIO;
  278. IMP_DBG("mapping 0x%lx to 0x%pa size %d\n",
  279. iova_p, &pa_p, size_p);
  280. iommu_map(domain,
  281. iova_p, pa_p, size_p, prot);
  282. } else {
  283. IMP_DBG("unmapping 0x%lx to 0x%pa size %d\n",
  284. iova_p, &pa_p, size_p);
  285. iommu_unmap(domain, iova_p, size_p);
  286. }
  287. }
  288. }
  289. static int __imp_configure_mhi_device(
  290. struct ipa_mhi_alloc_channel_req_msg_v01 *req,
  291. struct ipa_mhi_alloc_channel_resp_msg_v01 *resp)
  292. {
  293. struct mhi_buf ch_config[2];
  294. int i;
  295. struct ipa_mhi_er_info_type_v01 *er_info;
  296. struct imp_channel *ch;
  297. int ridx = 0;
  298. int ret;
  299. IMP_FUNC_ENTRY();
  300. /* configure MHI */
  301. for (i = 0; i < req->tr_info_arr_len; i++) {
  302. ch = imp_get_ch_by_id(req->tr_info_arr[i].ch_id);
  303. if (!ch) {
  304. IMP_ERR("unknown channel %d\n",
  305. req->tr_info_arr[i].ch_id);
  306. resp->alloc_resp_arr[ridx].ch_id =
  307. req->tr_info_arr[i].ch_id;
  308. resp->alloc_resp_arr[ridx].is_success = 0;
  309. ridx++;
  310. resp->alloc_resp_arr_len = ridx;
  311. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  312. /* return INCOMPATIBLE_STATE in any case */
  313. resp->resp.error =
  314. IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
  315. return -EINVAL;
  316. }
  317. /* populate CCA */
  318. if (req->tr_info_arr[i].brst_mode_type ==
  319. QMI_IPA_BURST_MODE_ENABLED_V01)
  320. ch->props.ch_ctx.brsmode = 3;
  321. else if (req->tr_info_arr[i].brst_mode_type ==
  322. QMI_IPA_BURST_MODE_DISABLED_V01)
  323. ch->props.ch_ctx.brsmode = 2;
  324. else
  325. ch->props.ch_ctx.brsmode = 0;
  326. ch->props.ch_ctx.pollcfg = req->tr_info_arr[i].poll_cfg;
  327. ch->props.ch_ctx.chtype = ch->props.dir;
  328. ch->props.ch_ctx.erindex = ch->event.props.id;
  329. ch->props.ch_ctx.rbase = req->tr_info_arr[i].ring_iova;
  330. ch->props.ch_ctx.rlen = req->tr_info_arr[i].ring_len;
  331. ch->props.ch_ctx.rpp = req->tr_info_arr[i].rp;
  332. ch->props.ch_ctx.wpp = req->tr_info_arr[i].wp;
  333. ch_config[0].buf = &ch->props.ch_ctx;
  334. ch_config[0].len = sizeof(ch->props.ch_ctx);
  335. ch_config[0].name = "CCA";
  336. /* populate ECA */
  337. er_info = _find_ch_in_er_info_arr(req, ch->event.props.id);
  338. if (!er_info) {
  339. IMP_ERR("no event ring for ch %d\n",
  340. req->tr_info_arr[i].ch_id);
  341. resp->alloc_resp_arr[ridx].ch_id =
  342. req->tr_info_arr[i].ch_id;
  343. resp->alloc_resp_arr[ridx].is_success = 0;
  344. ridx++;
  345. resp->alloc_resp_arr_len = ridx;
  346. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  347. resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
  348. return -EINVAL;
  349. }
  350. ch->event.props.ev_ctx.intmodc = er_info->intmod_count;
  351. ch->event.props.ev_ctx.intmodt = er_info->intmod_cycles;
  352. ch->event.props.ev_ctx.ertype = 1;
  353. ch->event.props.ev_ctx.msivec = er_info->msi_addr;
  354. ch->event.props.ev_ctx.rbase = er_info->ring_iova;
  355. ch->event.props.ev_ctx.rlen = er_info->ring_len;
  356. ch->event.props.ev_ctx.rpp = er_info->rp;
  357. ch->event.props.ev_ctx.wpp = er_info->wp;
  358. ch_config[1].buf = &ch->event.props.ev_ctx;
  359. ch_config[1].len = sizeof(ch->event.props.ev_ctx);
  360. ch_config[1].name = "ECA";
  361. IMP_DBG("Configuring MHI device for ch %d\n", ch->props.id);
  362. ret = mhi_device_configure(imp_ctx->md.mhi_dev, ch->props.dir,
  363. ch_config, 2);
  364. /* configure mhi-host, no need check mhi state */
  365. if (ret) {
  366. IMP_ERR("mhi_device_configure failed for ch %d\n",
  367. req->tr_info_arr[i].ch_id);
  368. resp->alloc_resp_arr[ridx].ch_id =
  369. req->tr_info_arr[i].ch_id;
  370. resp->alloc_resp_arr[ridx].is_success = 0;
  371. ridx++;
  372. resp->alloc_resp_arr_len = ridx;
  373. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  374. resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
  375. return -EINVAL;
  376. }
  377. }
  378. IMP_FUNC_EXIT();
  379. return 0;
  380. }
  381. /**
  382. * imp_handle_allocate_channel_req() - Allocate a new MHI channel
  383. *
  384. * Allocates MHI channel and start them.
  385. *
  386. * Return: QMI return codes
  387. */
  388. struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req(
  389. struct ipa_mhi_alloc_channel_req_msg_v01 *req)
  390. {
  391. int ret;
  392. struct ipa_mhi_alloc_channel_resp_msg_v01 *resp =
  393. &imp_ctx->qmi.alloc_ch_resp;
  394. IMP_FUNC_ENTRY();
  395. mutex_lock(&imp_ctx->mutex);
  396. memset(resp, 0, sizeof(*resp));
  397. if (imp_ctx->state != IMP_READY) {
  398. IMP_ERR("invalid state %d\n", imp_ctx->state);
  399. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  400. resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
  401. mutex_unlock(&imp_ctx->mutex);
  402. return resp;
  403. }
  404. /* cache the req */
  405. memcpy(&imp_ctx->qmi.alloc_ch_req, req, sizeof(*req));
  406. if (req->tr_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01) {
  407. IMP_ERR("invalid tr_info_arr_len %d\n", req->tr_info_arr_len);
  408. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  409. resp->resp.error = IPA_QMI_ERR_NO_MEMORY_V01;
  410. mutex_unlock(&imp_ctx->mutex);
  411. return resp;
  412. }
  413. if ((req->ctrl_addr_map_info_len == 0 ||
  414. req->data_addr_map_info_len == 0) &&
  415. imp_ctx->dev_info.smmu_enabled) {
  416. IMP_ERR("no mapping provided, but smmu is enabled\n");
  417. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  418. resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
  419. mutex_unlock(&imp_ctx->mutex);
  420. return resp;
  421. }
  422. if (imp_ctx->dev_info.smmu_enabled) {
  423. /* map CTRL */
  424. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  425. &imp_ctx->dev_info.ctrl,
  426. req->ctrl_addr_map_info_len,
  427. req->ctrl_addr_map_info,
  428. true);
  429. /* map DATA */
  430. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  431. &imp_ctx->dev_info.data,
  432. req->data_addr_map_info_len,
  433. req->data_addr_map_info,
  434. true);
  435. }
  436. resp->alloc_resp_arr_valid = true;
  437. ret = __imp_configure_mhi_device(req, resp);
  438. if (ret)
  439. goto fail_smmu;
  440. IMP_DBG("Starting MHI channels %d and %d\n",
  441. imp_ctx->md.ul_chan.props.id,
  442. imp_ctx->md.dl_chan.props.id);
  443. ret = mhi_prepare_for_transfer(imp_ctx->md.mhi_dev);
  444. if (ret) {
  445. IMP_ERR("mhi_prepare_for_transfer failed %d\n", ret);
  446. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  447. .ch_id = imp_ctx->md.ul_chan.props.id;
  448. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  449. .is_success = 0;
  450. resp->alloc_resp_arr_len++;
  451. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  452. .ch_id = imp_ctx->md.dl_chan.props.id;
  453. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  454. .is_success = 0;
  455. resp->alloc_resp_arr_len++;
  456. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  457. /* return INCOMPATIBLE_STATE in any case */
  458. resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
  459. goto fail_smmu;
  460. }
  461. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  462. .ch_id = imp_ctx->md.ul_chan.props.id;
  463. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  464. .is_success = 1;
  465. resp->alloc_resp_arr_len++;
  466. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  467. .ch_id = imp_ctx->md.dl_chan.props.id;
  468. resp->alloc_resp_arr[resp->alloc_resp_arr_len]
  469. .is_success = 1;
  470. resp->alloc_resp_arr_len++;
  471. imp_ctx->state = IMP_STARTED;
  472. mutex_unlock(&imp_ctx->mutex);
  473. IMP_FUNC_EXIT();
  474. resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01;
  475. return resp;
  476. fail_smmu:
  477. if (imp_ctx->dev_info.smmu_enabled) {
  478. /* unmap CTRL */
  479. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  480. &imp_ctx->dev_info.ctrl,
  481. req->ctrl_addr_map_info_len,
  482. req->ctrl_addr_map_info,
  483. false);
  484. /* unmap DATA */
  485. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  486. &imp_ctx->dev_info.data,
  487. req->data_addr_map_info_len,
  488. req->data_addr_map_info,
  489. false);
  490. }
  491. mutex_unlock(&imp_ctx->mutex);
  492. return resp;
  493. }
  494. /**
  495. * imp_handle_vote_req() - Votes for MHI / PCIe clocks
  496. *
  497. * Hold a vote to prevent / allow low power mode on MHI.
  498. *
  499. * Return: 0 on success, negative otherwise
  500. */
  501. struct ipa_mhi_clk_vote_resp_msg_v01
  502. *imp_handle_vote_req(bool vote)
  503. {
  504. int ret;
  505. struct ipa_mhi_clk_vote_resp_msg_v01 *resp =
  506. &imp_ctx->qmi.clk_vote_resp;
  507. IMP_DBG_LOW("vote %d\n", vote);
  508. memset(resp, 0, sizeof(struct ipa_mhi_clk_vote_resp_msg_v01));
  509. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  510. resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
  511. mutex_lock(&imp_ctx->mutex);
  512. /*
  513. * returning success for clock unvote request - since it could
  514. * be 5G modem SSR scenario where clocks are already OFF.
  515. */
  516. if (!vote && imp_ctx->state == IMP_INVALID) {
  517. IMP_DBG("Unvote in Invalid state, no op for clock unvote\n");
  518. mutex_unlock(&imp_ctx->mutex);
  519. return resp;
  520. }
  521. if (imp_ctx->state != IMP_STARTED) {
  522. IMP_ERR("unexpected vote when in state %d\n", imp_ctx->state);
  523. mutex_unlock(&imp_ctx->mutex);
  524. return resp;
  525. }
  526. if (vote == imp_ctx->lpm_disabled) {
  527. IMP_ERR("already voted/devoted %d\n", vote);
  528. mutex_unlock(&imp_ctx->mutex);
  529. return resp;
  530. }
  531. mutex_unlock(&imp_ctx->mutex);
  532. /*
  533. * Unlock the mutex before calling into mhi for clock vote
  534. * to avoid deadlock on imp mutex.
  535. * Calls into mhi are synchronous and imp callbacks are
  536. * executed from mhi context.
  537. */
  538. if (vote) {
  539. ret = mhi_device_get_sync(imp_ctx->md.mhi_dev,
  540. MHI_VOTE_BUS | MHI_VOTE_DEVICE);
  541. if (ret) {
  542. IMP_ERR("mhi_sync_get failed %d\n", ret);
  543. resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
  544. /* return INCOMPATIBLE_STATE in any case */
  545. resp->resp.error =
  546. IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
  547. return resp;
  548. }
  549. } else {
  550. mhi_device_put(imp_ctx->md.mhi_dev,
  551. MHI_VOTE_BUS | MHI_VOTE_DEVICE);
  552. }
  553. mutex_lock(&imp_ctx->mutex);
  554. if (vote)
  555. imp_ctx->lpm_disabled = true;
  556. else
  557. imp_ctx->lpm_disabled = false;
  558. mutex_unlock(&imp_ctx->mutex);
  559. resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01;
  560. return resp;
  561. }
  562. static int imp_read_iova_from_dtsi(const char *node, struct imp_iova_addr *out)
  563. {
  564. u32 iova_mapping[2];
  565. struct device_node *of_node = imp_ctx->dev_info.pdev->dev.of_node;
  566. if (of_property_read_u32_array(of_node, node, iova_mapping, 2)) {
  567. IMP_DBG("failed to read of_node %s\n", node);
  568. return -EINVAL;
  569. }
  570. out->base = iova_mapping[0];
  571. out->size = iova_mapping[1];
  572. IMP_DBG("%s: base: 0x%pad size: 0x%x\n", node, &out->base, out->size);
  573. return 0;
  574. }
  575. static void imp_mhi_shutdown(void)
  576. {
  577. struct ipa_mhi_cleanup_req_msg_v01 req = { 0 };
  578. IMP_FUNC_ENTRY();
  579. if (imp_ctx->state == IMP_STARTED ||
  580. imp_ctx->state == IMP_READY) {
  581. req.cleanup_valid = true;
  582. req.cleanup = true;
  583. ipa3_qmi_send_mhi_cleanup_request(&req);
  584. if (imp_ctx->dev_info.smmu_enabled) {
  585. struct ipa_mhi_alloc_channel_req_msg_v01 *creq
  586. = &imp_ctx->qmi.alloc_ch_req;
  587. /* unmap CTRL */
  588. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  589. &imp_ctx->dev_info.ctrl,
  590. creq->ctrl_addr_map_info_len,
  591. creq->ctrl_addr_map_info,
  592. false);
  593. /* unmap DATA */
  594. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  595. &imp_ctx->dev_info.data,
  596. creq->data_addr_map_info_len,
  597. creq->data_addr_map_info,
  598. false);
  599. }
  600. if (imp_ctx->lpm_disabled) {
  601. mhi_device_put(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
  602. imp_ctx->lpm_disabled = false;
  603. }
  604. /* unmap MHI doorbells from IPA uC SMMU */
  605. if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
  606. struct ipa_smmu_cb_ctx *cb =
  607. ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
  608. unsigned long iova_p;
  609. phys_addr_t pa_p;
  610. u32 size_p;
  611. imp_smmu_round_to_page(imp_ctx->dev_info.chdb_base,
  612. imp_ctx->dev_info.chdb_base, PAGE_SIZE,
  613. &iova_p, &pa_p, &size_p);
  614. iommu_unmap(cb->iommu_domain, iova_p, size_p);
  615. }
  616. }
  617. if (!imp_ctx->in_lpm &&
  618. (imp_ctx->state == IMP_READY ||
  619. imp_ctx->state == IMP_STARTED)) {
  620. IMP_DBG("devote IMP with state= %d\n", imp_ctx->state);
  621. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
  622. }
  623. imp_ctx->in_lpm = false;
  624. imp_ctx->state = IMP_PROBED;
  625. IMP_FUNC_EXIT();
  626. }
  627. static int imp_mhi_probe_cb(struct mhi_device *mhi_dev,
  628. const struct mhi_device_id *id)
  629. {
  630. struct imp_channel *ch;
  631. struct imp_event *ev;
  632. int ret;
  633. IMP_FUNC_ENTRY();
  634. if (id != &mhi_driver_match_table[0]) {
  635. IMP_ERR("only chan=%s is supported for now\n",
  636. mhi_driver_match_table[0].chan);
  637. return -EPERM;
  638. }
  639. /* vote for IPA clock. IPA clock will be devoted when MHI enters LPM */
  640. IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP");
  641. imp_ctx->md.mhi_dev = mhi_dev;
  642. mutex_lock(&imp_ctx->mutex);
  643. /* store UL channel properties */
  644. ch = &imp_ctx->md.ul_chan;
  645. ev = &imp_ctx->md.ul_chan.event;
  646. ch->props.id = mhi_dev->ul_chan_id;
  647. ch->props.dir = DMA_TO_DEVICE;
  648. ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8;
  649. ch->props.uc_mbox_n = IMP_IPA_UC_UL_CH_n;
  650. IMP_DBG("ul ch id %d doorbell 0x%pa uc_mbox_n %d\n",
  651. ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n);
  652. ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell,
  653. ch->props.uc_mbox_n);
  654. if (ret)
  655. goto fail;
  656. IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell,
  657. ch->props.uc_mbox_n);
  658. ev->props.id = mhi_dev->ul_event_id;
  659. ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8;
  660. ev->props.uc_mbox_n = IMP_IPA_UC_UL_EV_n;
  661. IMP_DBG("allocated ev %d\n", ev->props.id);
  662. ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell,
  663. ev->props.uc_mbox_n);
  664. if (ret)
  665. goto fail;
  666. IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell,
  667. ev->props.uc_mbox_n);
  668. /* store DL channel properties */
  669. ch = &imp_ctx->md.dl_chan;
  670. ev = &imp_ctx->md.dl_chan.event;
  671. ch->props.dir = DMA_FROM_DEVICE;
  672. ch->props.id = mhi_dev->dl_chan_id;
  673. ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8;
  674. ch->props.uc_mbox_n = IMP_IPA_UC_DL_CH_n;
  675. IMP_DBG("dl ch id %d doorbell 0x%pa uc_mbox_n %d\n",
  676. ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n);
  677. ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell,
  678. ch->props.uc_mbox_n);
  679. if (ret)
  680. goto fail;
  681. IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell,
  682. ch->props.uc_mbox_n);
  683. ev->props.id = mhi_dev->dl_event_id;
  684. ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8;
  685. ev->props.uc_mbox_n = IMP_IPA_UC_DL_EV_n;
  686. IMP_DBG("allocated ev %d\n", ev->props.id);
  687. ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell,
  688. ev->props.uc_mbox_n);
  689. if (ret)
  690. goto fail;
  691. IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell,
  692. ev->props.uc_mbox_n);
  693. /*
  694. * Map MHI doorbells to IPA uC SMMU.
  695. * Both channel and event doorbells resides in a single page.
  696. */
  697. if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
  698. struct ipa_smmu_cb_ctx *cb =
  699. ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
  700. unsigned long iova_p;
  701. phys_addr_t pa_p;
  702. u32 size_p;
  703. imp_smmu_round_to_page(imp_ctx->dev_info.chdb_base,
  704. imp_ctx->dev_info.chdb_base, PAGE_SIZE,
  705. &iova_p, &pa_p, &size_p);
  706. ret = ipa3_iommu_map(cb->iommu_domain, iova_p, pa_p, size_p,
  707. IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
  708. if (ret)
  709. goto fail;
  710. }
  711. imp_mhi_trigger_ready_ind();
  712. mutex_unlock(&imp_ctx->mutex);
  713. IMP_FUNC_EXIT();
  714. return 0;
  715. fail:
  716. mutex_unlock(&imp_ctx->mutex);
  717. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
  718. return ret;
  719. }
  720. static void imp_mhi_remove_cb(struct mhi_device *mhi_dev)
  721. {
  722. IMP_FUNC_ENTRY();
  723. mutex_lock(&imp_ctx->mutex);
  724. imp_mhi_shutdown();
  725. mutex_unlock(&imp_ctx->mutex);
  726. IMP_FUNC_EXIT();
  727. }
  728. static void imp_mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb)
  729. {
  730. IMP_DBG("%d\n", mhi_cb);
  731. mutex_lock(&imp_ctx->lpm_mutex);
  732. if (mhi_dev != imp_ctx->md.mhi_dev) {
  733. IMP_DBG("ignoring secondary callbacks\n");
  734. mutex_unlock(&imp_ctx->lpm_mutex);
  735. return;
  736. }
  737. switch (mhi_cb) {
  738. case MHI_CB_IDLE:
  739. break;
  740. case MHI_CB_LPM_ENTER:
  741. if (imp_ctx->state == IMP_STARTED) {
  742. if (!imp_ctx->in_lpm) {
  743. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
  744. imp_ctx->in_lpm = true;
  745. } else {
  746. IMP_ERR("already in LPM\n");
  747. }
  748. }
  749. break;
  750. case MHI_CB_LPM_EXIT:
  751. if (imp_ctx->state == IMP_STARTED) {
  752. if (imp_ctx->in_lpm) {
  753. IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP");
  754. imp_ctx->in_lpm = false;
  755. } else {
  756. IMP_ERR("not in LPM\n");
  757. }
  758. }
  759. break;
  760. case MHI_CB_EE_RDDM:
  761. case MHI_CB_PENDING_DATA:
  762. default:
  763. IMP_ERR("unexpected event %d\n", mhi_cb);
  764. break;
  765. }
  766. mutex_unlock(&imp_ctx->lpm_mutex);
  767. }
  768. static int imp_probe(struct platform_device *pdev)
  769. {
  770. int ret;
  771. IMP_FUNC_ENTRY();
  772. if (ipa3_uc_state_check()) {
  773. IMP_DBG("uC not ready yet\n");
  774. return -EPROBE_DEFER;
  775. }
  776. imp_ctx->dev_info.pdev = pdev;
  777. imp_ctx->dev_info.smmu_enabled = true;
  778. ret = imp_read_iova_from_dtsi("qcom,ctrl-iova",
  779. &imp_ctx->dev_info.ctrl);
  780. if (ret)
  781. imp_ctx->dev_info.smmu_enabled = false;
  782. ret = imp_read_iova_from_dtsi("qcom,data-iova",
  783. &imp_ctx->dev_info.data);
  784. if (ret)
  785. imp_ctx->dev_info.smmu_enabled = false;
  786. IMP_DBG("smmu_enabled=%d\n", imp_ctx->dev_info.smmu_enabled);
  787. if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base",
  788. &imp_ctx->dev_info.chdb_base)) {
  789. IMP_ERR("failed to read of_node %s\n", "qcom,mhi-chdb-base");
  790. return -EINVAL;
  791. }
  792. IMP_DBG("chdb-base=0x%x\n", imp_ctx->dev_info.chdb_base);
  793. if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base",
  794. &imp_ctx->dev_info.erdb_base)) {
  795. IMP_ERR("failed to read of_node %s\n", "qcom,mhi-erdb-base");
  796. return -EINVAL;
  797. }
  798. IMP_DBG("erdb-base=0x%x\n", imp_ctx->dev_info.erdb_base);
  799. imp_ctx->state = IMP_PROBED;
  800. ret = mhi_driver_register(&mhi_driver);
  801. if (ret) {
  802. IMP_ERR("mhi_driver_register failed %d\n", ret);
  803. mutex_unlock(&imp_ctx->mutex);
  804. return ret;
  805. }
  806. IMP_FUNC_EXIT();
  807. return 0;
  808. }
  809. static int imp_remove(struct platform_device *pdev)
  810. {
  811. IMP_FUNC_ENTRY();
  812. mhi_driver_unregister(&mhi_driver);
  813. mutex_lock(&imp_ctx->mutex);
  814. if (!imp_ctx->in_lpm && (imp_ctx->state == IMP_READY ||
  815. imp_ctx->state == IMP_STARTED)) {
  816. IMP_DBG("devote IMP with state= %d\n", imp_ctx->state);
  817. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
  818. }
  819. imp_ctx->lpm_disabled = false;
  820. imp_ctx->state = IMP_INVALID;
  821. mutex_unlock(&imp_ctx->mutex);
  822. mutex_lock(&imp_ctx->lpm_mutex);
  823. imp_ctx->in_lpm = false;
  824. mutex_unlock(&imp_ctx->lpm_mutex);
  825. return 0;
  826. }
  827. static const struct of_device_id imp_dt_match[] = {
  828. { .compatible = "qcom,ipa-mhi-proxy" },
  829. {},
  830. };
  831. MODULE_DEVICE_TABLE(of, imp_dt_match);
  832. static struct platform_driver ipa_mhi_proxy_driver = {
  833. .driver = {
  834. .name = "ipa_mhi_proxy",
  835. .of_match_table = imp_dt_match,
  836. },
  837. .probe = imp_probe,
  838. .remove = imp_remove,
  839. };
  840. /**
  841. * imp_handle_modem_ready() - Registers IMP as a platform device
  842. *
  843. * This function is called after modem is loaded and QMI handshake is done.
  844. * IMP will register itself as a platform device, and on support device the
  845. * probe function will get called.
  846. *
  847. * Return: None
  848. */
  849. void imp_handle_modem_ready(void)
  850. {
  851. if (!imp_ctx) {
  852. imp_ctx = kzalloc(sizeof(*imp_ctx), GFP_KERNEL);
  853. if (!imp_ctx)
  854. return;
  855. mutex_init(&imp_ctx->mutex);
  856. mutex_init(&imp_ctx->lpm_mutex);
  857. }
  858. if (imp_ctx->state != IMP_INVALID) {
  859. IMP_ERR("unexpected state %d\n", imp_ctx->state);
  860. return;
  861. }
  862. IMP_DBG("register platform device\n");
  863. platform_driver_register(&ipa_mhi_proxy_driver);
  864. }
  865. /**
  866. * imp_handle_modem_shutdown() - Handles modem SSR
  867. *
  868. * Performs MHI cleanup when modem is going to SSR (Subsystem Restart).
  869. *
  870. * Return: None
  871. */
  872. void imp_handle_modem_shutdown(void)
  873. {
  874. IMP_FUNC_ENTRY();
  875. if (!imp_ctx)
  876. return;
  877. mutex_lock(&imp_ctx->mutex);
  878. if (imp_ctx->state == IMP_INVALID) {
  879. mutex_unlock(&imp_ctx->mutex);
  880. return;
  881. }
  882. if (imp_ctx->state == IMP_STARTED) {
  883. mhi_unprepare_from_transfer(imp_ctx->md.mhi_dev);
  884. imp_ctx->state = IMP_READY;
  885. }
  886. if (imp_ctx->state == IMP_READY) {
  887. if (imp_ctx->dev_info.smmu_enabled) {
  888. struct ipa_mhi_alloc_channel_req_msg_v01 *creq
  889. = &imp_ctx->qmi.alloc_ch_req;
  890. /* unmap CTRL */
  891. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  892. &imp_ctx->dev_info.ctrl,
  893. creq->ctrl_addr_map_info_len,
  894. creq->ctrl_addr_map_info,
  895. false);
  896. /* unmap DATA */
  897. __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
  898. &imp_ctx->dev_info.data,
  899. creq->data_addr_map_info_len,
  900. creq->data_addr_map_info,
  901. false);
  902. }
  903. }
  904. mutex_unlock(&imp_ctx->mutex);
  905. IMP_FUNC_EXIT();
  906. platform_driver_unregister(&ipa_mhi_proxy_driver);
  907. }
  908. MODULE_LICENSE("GPL v2");
  909. MODULE_DESCRIPTION("IPA MHI Proxy Driver");