ipa_mhi.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/export.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mutex.h>
  10. #include <linux/ipa.h>
  11. #include <linux/msm_gsi.h>
  12. #include <linux/ipa_mhi.h>
  13. #include "../ipa_common_i.h"
  14. #include "ipa_i.h"
  15. #include "ipa_qmi_service.h"
  16. #define IPA_MHI_DRV_NAME "ipa_mhi"
  17. #define IPA_MHI_DBG(fmt, args...) \
  18. do { \
  19. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  20. __func__, __LINE__, ## args); \
  21. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  22. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  23. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  24. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  25. } while (0)
  26. #define IPA_MHI_DBG_LOW(fmt, args...) \
  27. do { \
  28. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  29. __func__, __LINE__, ## args); \
  30. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  31. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  32. } while (0)
  33. #define IPA_MHI_ERR(fmt, args...) \
  34. do { \
  35. pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  36. __func__, __LINE__, ## args); \
  37. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  38. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  39. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  40. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  41. } while (0)
  42. #define IPA_MHI_FUNC_ENTRY() \
  43. IPA_MHI_DBG("ENTRY\n")
  44. #define IPA_MHI_FUNC_EXIT() \
  45. IPA_MHI_DBG("EXIT\n")
  46. #define IPA_MHI_MAX_UL_CHANNELS 1
  47. #define IPA_MHI_MAX_DL_CHANNELS 2
  48. /* bit #40 in address should be asserted for MHI transfers over pcie */
  49. #define IPA_MHI_HOST_ADDR_COND(addr) \
  50. ((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
  51. enum ipa3_mhi_polling_mode {
  52. IPA_MHI_POLLING_MODE_DB_MODE,
  53. IPA_MHI_POLLING_MODE_POLL_MODE,
  54. };
  55. bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
  56. {
  57. int res;
  58. int ipa_ep_idx;
  59. struct ipa3_ep_context *ep;
  60. IPA_MHI_FUNC_ENTRY();
  61. ipa_ep_idx = ipa3_get_ep_mapping(client);
  62. if (ipa_ep_idx == -1) {
  63. IPA_MHI_ERR("Invalid client.\n");
  64. return -EINVAL;
  65. }
  66. ep = &ipa3_ctx->ep[ipa_ep_idx];
  67. IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl);
  68. res = gsi_stop_channel(ep->gsi_chan_hdl);
  69. if (res != 0 &&
  70. res != -GSI_STATUS_AGAIN &&
  71. res != -GSI_STATUS_TIMED_OUT) {
  72. IPA_MHI_ERR("GSI stop channel failed %d\n",
  73. res);
  74. WARN_ON(1);
  75. return false;
  76. }
  77. if (res == 0) {
  78. IPA_MHI_DBG_LOW("GSI channel %ld STOP\n",
  79. ep->gsi_chan_hdl);
  80. return true;
  81. }
  82. return false;
  83. }
  84. static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
  85. {
  86. int res;
  87. int clnt_hdl;
  88. IPA_MHI_FUNC_ENTRY();
  89. clnt_hdl = ipa3_get_ep_mapping(client);
  90. if (clnt_hdl < 0)
  91. return -EFAULT;
  92. res = ipa3_reset_gsi_channel(clnt_hdl);
  93. if (res) {
  94. IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res);
  95. return -EFAULT;
  96. }
  97. IPA_MHI_FUNC_EXIT();
  98. return 0;
  99. }
  100. int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
  101. {
  102. int res;
  103. IPA_MHI_FUNC_ENTRY();
  104. res = ipa3_mhi_reset_gsi_channel(client);
  105. if (res) {
  106. IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
  107. ipa_assert();
  108. return res;
  109. }
  110. res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
  111. if (res) {
  112. IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
  113. return res;
  114. }
  115. IPA_MHI_FUNC_EXIT();
  116. return 0;
  117. }
  118. int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
  119. {
  120. int res;
  121. int ipa_ep_idx;
  122. IPA_MHI_FUNC_ENTRY();
  123. ipa_ep_idx = ipa3_get_ep_mapping(client);
  124. if (ipa_ep_idx < 0) {
  125. IPA_MHI_ERR("Invalid client %d\n", client);
  126. return -EINVAL;
  127. }
  128. res = ipa3_enable_data_path(ipa_ep_idx);
  129. if (res) {
  130. IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
  131. return res;
  132. }
  133. IPA_MHI_FUNC_EXIT();
  134. return 0;
  135. }
  136. static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
  137. struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size)
  138. {
  139. switch (ch_ctx_host->pollcfg) {
  140. case 0:
  141. /*set default polling configuration according to MHI spec*/
  142. if (IPA_CLIENT_IS_PROD(client))
  143. return 7;
  144. else
  145. return (ring_size/2)/8;
  146. break;
  147. default:
  148. return ch_ctx_host->pollcfg;
  149. }
  150. }
  151. static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
  152. int ipa_ep_idx, struct start_gsi_channel *params)
  153. {
  154. int res = 0;
  155. struct gsi_evt_ring_props ev_props;
  156. struct ipa_mhi_msi_info *msi;
  157. struct gsi_chan_props ch_props;
  158. union __packed gsi_channel_scratch ch_scratch;
  159. struct ipa3_ep_context *ep;
  160. const struct ipa_gsi_ep_config *ep_cfg;
  161. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  162. bool burst_mode_enabled = false;
  163. IPA_MHI_FUNC_ENTRY();
  164. ep = &ipa3_ctx->ep[ipa_ep_idx];
  165. msi = params->msi;
  166. ep_cfg = ipa3_get_gsi_ep_info(client);
  167. if (!ep_cfg) {
  168. IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
  169. return -EPERM;
  170. }
  171. /* allocate event ring only for the first time pipe is connected */
  172. if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
  173. memset(&ev_props, 0, sizeof(ev_props));
  174. ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
  175. ev_props.intr = GSI_INTR_MSI;
  176. ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  177. ev_props.ring_len = params->ev_ctx_host->rlen;
  178. ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
  179. params->ev_ctx_host->rbase);
  180. ev_props.int_modt = params->ev_ctx_host->intmodt *
  181. IPA_SLEEP_CLK_RATE_KHZ;
  182. ev_props.int_modc = params->ev_ctx_host->intmodc;
  183. ev_props.intvec = ((msi->data & ~msi->mask) |
  184. (params->ev_ctx_host->msivec & msi->mask));
  185. ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND(
  186. (((u64)msi->addr_hi << 32) | msi->addr_low));
  187. ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND(
  188. params->event_context_addr +
  189. offsetof(struct ipa_mhi_ev_ctx, rp));
  190. ev_props.exclusive = true;
  191. ev_props.err_cb = params->ev_err_cb;
  192. ev_props.user_data = params->channel;
  193. ev_props.evchid_valid = true;
  194. ev_props.evchid = params->evchid;
  195. IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n",
  196. ipa_ep_idx, ev_props.evchid);
  197. res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
  198. &ep->gsi_evt_ring_hdl);
  199. if (res) {
  200. IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
  201. goto fail_alloc_evt;
  202. }
  203. IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
  204. client,
  205. ep->gsi_evt_ring_hdl);
  206. *params->cached_gsi_evt_ring_hdl =
  207. ep->gsi_evt_ring_hdl;
  208. } else {
  209. IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n",
  210. *params->cached_gsi_evt_ring_hdl);
  211. ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
  212. }
  213. if (params->ev_ctx_host->wp == params->ev_ctx_host->rbase) {
  214. IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n",
  215. params->ev_ctx_host->wp);
  216. goto fail_alloc_ch;
  217. }
  218. IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n",
  219. ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
  220. res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl,
  221. params->ev_ctx_host->wp);
  222. if (res) {
  223. IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n",
  224. res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
  225. goto fail_alloc_ch;
  226. }
  227. memset(&ch_props, 0, sizeof(ch_props));
  228. ch_props.prot = GSI_CHAN_PROT_MHI;
  229. ch_props.dir = IPA_CLIENT_IS_PROD(client) ?
  230. GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
  231. ch_props.ch_id = ep_cfg->ipa_gsi_chan_num;
  232. ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
  233. ch_props.re_size = GSI_CHAN_RE_SIZE_16B;
  234. ch_props.ring_len = params->ch_ctx_host->rlen;
  235. ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
  236. params->ch_ctx_host->rbase);
  237. /* Burst mode is not supported on DPL pipes */
  238. if ((client != IPA_CLIENT_MHI_DPL_CONS) &&
  239. (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
  240. params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE)) {
  241. burst_mode_enabled = true;
  242. }
  243. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
  244. !burst_mode_enabled)
  245. ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
  246. else
  247. ch_props.use_db_eng = GSI_CHAN_DB_MODE;
  248. ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  249. ch_props.low_weight = 1;
  250. ch_props.prefetch_mode = ep_cfg->prefetch_mode;
  251. ch_props.empty_lvl_threshold = ep_cfg->prefetch_threshold;
  252. ch_props.err_cb = params->ch_err_cb;
  253. ch_props.chan_user_data = params->channel;
  254. res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl,
  255. &ep->gsi_chan_hdl);
  256. if (res) {
  257. IPA_MHI_ERR("gsi_alloc_channel failed %d\n",
  258. res);
  259. goto fail_alloc_ch;
  260. }
  261. memset(&ch_scratch, 0, sizeof(ch_scratch));
  262. ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND(
  263. params->channel_context_addr +
  264. offsetof(struct ipa_mhi_ch_ctx, wp));
  265. ch_scratch.mhi.assert_bit40 = params->assert_bit40;
  266. /*
  267. * Update scratch for MCS smart prefetch:
  268. * Starting IPA4.5, smart prefetch implemented by H/W.
  269. * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
  270. * so keep the fields zero.
  271. */
  272. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  273. ch_scratch.mhi.max_outstanding_tre =
  274. ep_cfg->ipa_if_tlv * ch_props.re_size;
  275. ch_scratch.mhi.outstanding_threshold =
  276. min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
  277. }
  278. ch_scratch.mhi.oob_mod_threshold = 4;
  279. if (burst_mode_enabled) {
  280. ch_scratch.mhi.burst_mode_enabled = burst_mode_enabled;
  281. ch_scratch.mhi.polling_configuration =
  282. ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
  283. (ch_props.ring_len / ch_props.re_size));
  284. ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
  285. } else {
  286. ch_scratch.mhi.burst_mode_enabled = false;
  287. }
  288. res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
  289. ch_scratch);
  290. if (res) {
  291. IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n",
  292. res);
  293. goto fail_ch_scratch;
  294. }
  295. *params->mhi = ch_scratch.mhi;
  296. if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
  297. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  298. ep_cfg_ctrl.ipa_ep_delay = true;
  299. ep->ep_delay_set = true;
  300. res = ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
  301. if (res)
  302. IPA_MHI_ERR("client (ep: %d) failed result=%d\n",
  303. ipa_ep_idx, res);
  304. else
  305. IPA_MHI_DBG("client (ep: %d) success\n", ipa_ep_idx);
  306. } else {
  307. ep->ep_delay_set = false;
  308. }
  309. IPA_MHI_DBG("Starting channel\n");
  310. res = gsi_start_channel(ep->gsi_chan_hdl);
  311. if (res) {
  312. IPA_MHI_ERR("gsi_start_channel failed %d\n", res);
  313. goto fail_ch_start;
  314. }
  315. IPA_MHI_FUNC_EXIT();
  316. return 0;
  317. fail_ch_start:
  318. fail_ch_scratch:
  319. gsi_dealloc_channel(ep->gsi_chan_hdl);
  320. fail_alloc_ch:
  321. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  322. ep->gsi_evt_ring_hdl = ~0;
  323. fail_alloc_evt:
  324. return res;
  325. }
  326. int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
  327. {
  328. int res;
  329. struct gsi_device_scratch gsi_scratch;
  330. const struct ipa_gsi_ep_config *gsi_ep_info;
  331. IPA_MHI_FUNC_ENTRY();
  332. if (!params) {
  333. IPA_MHI_ERR("null args\n");
  334. return -EINVAL;
  335. }
  336. if ((IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) >
  337. ((ipa3_ctx->mhi_evid_limits[1] -
  338. ipa3_ctx->mhi_evid_limits[0]) + 1)) {
  339. IPAERR("Not enough event rings for MHI\n");
  340. ipa_assert();
  341. return -EINVAL;
  342. }
  343. /* Initialize IPA MHI engine */
  344. gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD);
  345. if (!gsi_ep_info) {
  346. IPAERR("MHI PROD has no ep allocated\n");
  347. ipa_assert();
  348. }
  349. memset(&gsi_scratch, 0, sizeof(gsi_scratch));
  350. gsi_scratch.mhi_base_chan_idx_valid = true;
  351. gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
  352. params->gsi.first_ch_idx;
  353. res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
  354. &gsi_scratch);
  355. if (res) {
  356. IPA_MHI_ERR("failed to write device scratch %d\n", res);
  357. goto fail_init_engine;
  358. }
  359. IPA_MHI_FUNC_EXIT();
  360. return 0;
  361. fail_init_engine:
  362. return res;
  363. }
  364. /**
  365. * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
  366. * MHI channel
  367. * @in: connect parameters
  368. * @clnt_hdl: [out] client handle for this pipe
  369. *
  370. * This function is called by IPA MHI client driver on MHI channel start.
  371. * This function is called after MHI engine was started.
  372. *
  373. * Return codes: 0 : success
  374. * negative : error
  375. */
  376. int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
  377. u32 *clnt_hdl)
  378. {
  379. struct ipa3_ep_context *ep;
  380. int ipa_ep_idx;
  381. int res;
  382. enum ipa_client_type client;
  383. IPA_MHI_FUNC_ENTRY();
  384. if (!in || !clnt_hdl) {
  385. IPA_MHI_ERR("NULL args\n");
  386. return -EINVAL;
  387. }
  388. in->start.gsi.evchid += ipa3_ctx->mhi_evid_limits[0];
  389. client = in->sys->client;
  390. ipa_ep_idx = ipa3_get_ep_mapping(client);
  391. if (ipa_ep_idx == -1) {
  392. IPA_MHI_ERR("Invalid client.\n");
  393. return -EINVAL;
  394. }
  395. ep = &ipa3_ctx->ep[ipa_ep_idx];
  396. if (ep->valid == 1) {
  397. IPA_MHI_ERR("EP already allocated.\n");
  398. return -EPERM;
  399. }
  400. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  401. ep->valid = 1;
  402. ep->skip_ep_cfg = in->sys->skip_ep_cfg;
  403. ep->client = client;
  404. ep->client_notify = in->sys->notify;
  405. ep->priv = in->sys->priv;
  406. ep->keep_ipa_awake = in->sys->keep_ipa_awake;
  407. res = ipa_mhi_start_gsi_channel(client,
  408. ipa_ep_idx, &in->start.gsi);
  409. if (res) {
  410. IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
  411. res);
  412. goto fail_start_channel;
  413. }
  414. res = ipa3_enable_data_path(ipa_ep_idx);
  415. if (res) {
  416. IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
  417. ipa_ep_idx);
  418. goto fail_ep_cfg;
  419. }
  420. if (!ep->skip_ep_cfg) {
  421. if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
  422. IPAERR("fail to configure EP.\n");
  423. goto fail_ep_cfg;
  424. }
  425. if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
  426. IPAERR("fail to configure status of EP.\n");
  427. goto fail_ep_cfg;
  428. }
  429. IPA_MHI_DBG("ep configuration successful\n");
  430. } else {
  431. IPA_MHI_DBG("skipping ep configuration\n");
  432. }
  433. *clnt_hdl = ipa_ep_idx;
  434. if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
  435. ipa3_install_dflt_flt_rules(ipa_ep_idx);
  436. ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
  437. IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
  438. ipa_ep_idx);
  439. IPA_MHI_FUNC_EXIT();
  440. return 0;
  441. fail_ep_cfg:
  442. ipa3_disable_data_path(ipa_ep_idx);
  443. fail_start_channel:
  444. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  445. return -EPERM;
  446. }
  447. /**
  448. * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
  449. * MHI channel
  450. * @clnt_hdl: client handle for this pipe
  451. *
  452. * This function is called by IPA MHI client driver on MHI channel reset.
  453. * This function is called after MHI channel was started.
  454. * This function is doing the following:
  455. * - Send command to uC/GSI to reset corresponding MHI channel
  456. * - Configure IPA EP control
  457. *
  458. * Return codes: 0 : success
  459. * negative : error
  460. */
  461. int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
  462. {
  463. struct ipa3_ep_context *ep;
  464. int res;
  465. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  466. IPA_MHI_FUNC_ENTRY();
  467. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
  468. IPAERR("invalid handle %d\n", clnt_hdl);
  469. return -EINVAL;
  470. }
  471. if (ipa3_ctx->ep[clnt_hdl].valid == 0) {
  472. IPAERR("pipe was not connected %d\n", clnt_hdl);
  473. return -EINVAL;
  474. }
  475. ep = &ipa3_ctx->ep[clnt_hdl];
  476. if (ep->ep_delay_set) {
  477. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  478. ep_cfg_ctrl.ipa_ep_delay = false;
  479. res = ipa3_cfg_ep_ctrl(clnt_hdl,
  480. &ep_cfg_ctrl);
  481. if (res) {
  482. IPAERR
  483. ("client(ep:%d) failed to remove delay res=%d\n",
  484. clnt_hdl, res);
  485. } else {
  486. IPADBG("client (ep: %d) delay removed\n",
  487. clnt_hdl);
  488. ep->ep_delay_set = false;
  489. }
  490. }
  491. res = gsi_dealloc_channel(ep->gsi_chan_hdl);
  492. if (res) {
  493. IPAERR("gsi_dealloc_channel failed %d\n", res);
  494. goto fail_reset_channel;
  495. }
  496. ep->valid = 0;
  497. ipa3_delete_dflt_flt_rules(clnt_hdl);
  498. IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
  499. IPA_MHI_FUNC_EXIT();
  500. return 0;
  501. fail_reset_channel:
  502. return res;
  503. }
  504. int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
  505. bool LPTransitionRejected, bool brstmode_enabled,
  506. union __packed gsi_channel_scratch ch_scratch, u8 index)
  507. {
  508. int res;
  509. int ipa_ep_idx;
  510. struct ipa3_ep_context *ep;
  511. union __packed gsi_channel_scratch gsi_ch_scratch;
  512. IPA_MHI_FUNC_ENTRY();
  513. ipa_ep_idx = ipa3_get_ep_mapping(client);
  514. if (ipa_ep_idx < 0) {
  515. IPA_MHI_ERR("Invalid client %d\n", client);
  516. return -EINVAL;
  517. }
  518. ep = &ipa3_ctx->ep[ipa_ep_idx];
  519. if (brstmode_enabled && !LPTransitionRejected) {
  520. res = gsi_read_channel_scratch(ep->gsi_chan_hdl,
  521. &gsi_ch_scratch);
  522. if (res) {
  523. IPA_MHI_ERR("read ch scratch fail %d\n", res);
  524. return res;
  525. }
  526. /*
  527. * set polling mode bit to DB mode before
  528. * resuming the channel
  529. *
  530. * For MHI-->IPA pipes:
  531. * when resuming due to transition to M0,
  532. * set the polling mode bit to 0.
  533. * In other cases, restore it's value form
  534. * when you stopped the channel.
  535. * Here, after successful resume client move to M0 state.
  536. * So, by default setting polling mode bit to 0.
  537. *
  538. * For IPA-->MHI pipe:
  539. * always restore the polling mode bit.
  540. */
  541. if (IPA_CLIENT_IS_PROD(client))
  542. ch_scratch.mhi.polling_mode =
  543. IPA_MHI_POLLING_MODE_DB_MODE;
  544. else
  545. ch_scratch.mhi.polling_mode =
  546. gsi_ch_scratch.mhi.polling_mode;
  547. /* Use GSI update API to not affect non-SWI fields
  548. * inside the scratch while in suspend-resume operation
  549. */
  550. res = gsi_update_mhi_channel_scratch(
  551. ep->gsi_chan_hdl, ch_scratch.mhi);
  552. if (res) {
  553. IPA_MHI_ERR("write ch scratch fail %d\n"
  554. , res);
  555. return res;
  556. }
  557. }
  558. res = gsi_start_channel(ep->gsi_chan_hdl);
  559. if (res) {
  560. IPA_MHI_ERR("failed to resume channel error %d\n", res);
  561. return res;
  562. }
  563. IPA_MHI_FUNC_EXIT();
  564. return 0;
  565. }
  566. int ipa3_mhi_query_ch_info(enum ipa_client_type client,
  567. struct gsi_chan_info *ch_info)
  568. {
  569. int ipa_ep_idx;
  570. int res;
  571. struct ipa3_ep_context *ep;
  572. IPA_MHI_FUNC_ENTRY();
  573. ipa_ep_idx = ipa3_get_ep_mapping(client);
  574. if (ipa_ep_idx < 0) {
  575. IPA_MHI_ERR("Invalid client %d\n", client);
  576. return -EINVAL;
  577. }
  578. ep = &ipa3_ctx->ep[ipa_ep_idx];
  579. res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
  580. if (res) {
  581. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  582. return res;
  583. }
  584. IPA_MHI_FUNC_EXIT();
  585. return 0;
  586. }
  587. bool ipa3_has_open_aggr_frame(enum ipa_client_type client)
  588. {
  589. u32 aggr_state_active;
  590. int ipa_ep_idx;
  591. aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
  592. IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
  593. ipa_ep_idx = ipa_get_ep_mapping(client);
  594. if (ipa_ep_idx == -1) {
  595. ipa_assert();
  596. return false;
  597. }
  598. if ((1 << ipa_ep_idx) & aggr_state_active)
  599. return true;
  600. return false;
  601. }
  602. int ipa3_mhi_destroy_channel(enum ipa_client_type client)
  603. {
  604. int res;
  605. int ipa_ep_idx;
  606. struct ipa3_ep_context *ep;
  607. ipa_ep_idx = ipa3_get_ep_mapping(client);
  608. if (ipa_ep_idx < 0) {
  609. IPA_MHI_ERR("Invalid client %d\n", client);
  610. return -EINVAL;
  611. }
  612. ep = &ipa3_ctx->ep[ipa_ep_idx];
  613. IPA_ACTIVE_CLIENTS_INC_EP(client);
  614. IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
  615. ep->gsi_evt_ring_hdl, ipa_ep_idx);
  616. res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
  617. if (res) {
  618. IPAERR(" failed to reset evt ring %lu, err %d\n"
  619. , ep->gsi_evt_ring_hdl, res);
  620. goto fail;
  621. }
  622. IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n",
  623. ep->gsi_evt_ring_hdl, ipa_ep_idx);
  624. res = gsi_dealloc_evt_ring(
  625. ep->gsi_evt_ring_hdl);
  626. if (res) {
  627. IPAERR("dealloc evt ring %lu failed, err %d\n"
  628. , ep->gsi_evt_ring_hdl, res);
  629. goto fail;
  630. }
  631. IPA_ACTIVE_CLIENTS_DEC_EP(client);
  632. return 0;
  633. fail:
  634. IPA_ACTIVE_CLIENTS_DEC_EP(client);
  635. return res;
  636. }
  637. MODULE_LICENSE("GPL v2");
  638. MODULE_DESCRIPTION("IPA MHI driver");