ipa_mhi.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/export.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mutex.h>
  10. #include <linux/ipa.h>
  11. #include <linux/msm_gsi.h>
  12. #include <linux/ipa_mhi.h>
  13. #include "../ipa_common_i.h"
  14. #include "ipa_i.h"
  15. #include "ipa_qmi_service.h"
  16. #define IPA_MHI_DRV_NAME "ipa_mhi"
  17. #define IPA_MHI_DBG(fmt, args...) \
  18. do { \
  19. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  20. __func__, __LINE__, ## args); \
  21. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  22. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  23. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  24. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  25. } while (0)
  26. #define IPA_MHI_DBG_LOW(fmt, args...) \
  27. do { \
  28. pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  29. __func__, __LINE__, ## args); \
  30. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  31. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  32. } while (0)
  33. #define IPA_MHI_ERR(fmt, args...) \
  34. do { \
  35. pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
  36. __func__, __LINE__, ## args); \
  37. IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
  38. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  39. IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
  40. IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
  41. } while (0)
  42. #define IPA_MHI_FUNC_ENTRY() \
  43. IPA_MHI_DBG("ENTRY\n")
  44. #define IPA_MHI_FUNC_EXIT() \
  45. IPA_MHI_DBG("EXIT\n")
  46. #define IPA_MHI_MAX_UL_CHANNELS 1
  47. #define IPA_MHI_MAX_DL_CHANNELS 2
  48. /* bit #40 in address should be asserted for MHI transfers over pcie */
  49. #define IPA_MHI_HOST_ADDR_COND(addr) \
  50. ((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
  51. enum ipa3_mhi_polling_mode {
  52. IPA_MHI_POLLING_MODE_DB_MODE,
  53. IPA_MHI_POLLING_MODE_POLL_MODE,
  54. };
  55. bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
  56. {
  57. int res;
  58. int ipa_ep_idx;
  59. struct ipa3_ep_context *ep;
  60. IPA_MHI_FUNC_ENTRY();
  61. ipa_ep_idx = ipa3_get_ep_mapping(client);
  62. if (ipa_ep_idx == -1) {
  63. IPA_MHI_ERR("Invalid client.\n");
  64. return -EINVAL;
  65. }
  66. ep = &ipa3_ctx->ep[ipa_ep_idx];
  67. IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl);
  68. res = gsi_stop_channel(ep->gsi_chan_hdl);
  69. if (res != 0 &&
  70. res != -GSI_STATUS_AGAIN &&
  71. res != -GSI_STATUS_TIMED_OUT) {
  72. IPA_MHI_ERR("GSI stop channel failed %d\n",
  73. res);
  74. WARN_ON(1);
  75. return false;
  76. }
  77. if (res == 0) {
  78. IPA_MHI_DBG_LOW("GSI channel %ld STOP\n",
  79. ep->gsi_chan_hdl);
  80. return true;
  81. }
  82. return false;
  83. }
  84. static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
  85. {
  86. int res;
  87. int clnt_hdl;
  88. IPA_MHI_FUNC_ENTRY();
  89. clnt_hdl = ipa3_get_ep_mapping(client);
  90. if (clnt_hdl < 0)
  91. return -EFAULT;
  92. res = ipa3_reset_gsi_channel(clnt_hdl);
  93. if (res) {
  94. IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res);
  95. return -EFAULT;
  96. }
  97. IPA_MHI_FUNC_EXIT();
  98. return 0;
  99. }
  100. int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
  101. {
  102. int res;
  103. IPA_MHI_FUNC_ENTRY();
  104. res = ipa3_mhi_reset_gsi_channel(client);
  105. if (res) {
  106. IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
  107. ipa_assert();
  108. return res;
  109. }
  110. res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
  111. if (res) {
  112. IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
  113. return res;
  114. }
  115. IPA_MHI_FUNC_EXIT();
  116. return 0;
  117. }
  118. int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
  119. {
  120. int res;
  121. int ipa_ep_idx;
  122. IPA_MHI_FUNC_ENTRY();
  123. ipa_ep_idx = ipa3_get_ep_mapping(client);
  124. if (ipa_ep_idx < 0) {
  125. IPA_MHI_ERR("Invalid client %d\n", client);
  126. return -EINVAL;
  127. }
  128. res = ipa3_enable_data_path(ipa_ep_idx);
  129. if (res) {
  130. IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
  131. return res;
  132. }
  133. IPA_MHI_FUNC_EXIT();
  134. return 0;
  135. }
  136. static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
  137. struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size)
  138. {
  139. switch (ch_ctx_host->pollcfg) {
  140. case 0:
  141. /*set default polling configuration according to MHI spec*/
  142. if (IPA_CLIENT_IS_PROD(client))
  143. return 7;
  144. else
  145. return (ring_size/2)/8;
  146. break;
  147. default:
  148. return ch_ctx_host->pollcfg;
  149. }
  150. }
  151. static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
  152. int ipa_ep_idx, struct start_gsi_channel *params)
  153. {
  154. int res = 0;
  155. struct gsi_evt_ring_props ev_props;
  156. struct ipa_mhi_msi_info *msi;
  157. struct gsi_chan_props ch_props;
  158. union __packed gsi_channel_scratch ch_scratch;
  159. struct ipa3_ep_context *ep;
  160. const struct ipa_gsi_ep_config *ep_cfg;
  161. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  162. bool burst_mode_enabled = false;
  163. IPA_MHI_FUNC_ENTRY();
  164. ep = &ipa3_ctx->ep[ipa_ep_idx];
  165. msi = params->msi;
  166. ep_cfg = ipa3_get_gsi_ep_info(client);
  167. if (!ep_cfg) {
  168. IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
  169. return -EPERM;
  170. }
  171. /* allocate event ring only for the first time pipe is connected */
  172. if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
  173. memset(&ev_props, 0, sizeof(ev_props));
  174. ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
  175. ev_props.intr = GSI_INTR_MSI;
  176. ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  177. ev_props.ring_len = params->ev_ctx_host->rlen;
  178. ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
  179. params->ev_ctx_host->rbase);
  180. ev_props.int_modt = params->ev_ctx_host->intmodt *
  181. IPA_SLEEP_CLK_RATE_KHZ;
  182. ev_props.int_modc = params->ev_ctx_host->intmodc;
  183. ev_props.intvec = ((msi->data & ~msi->mask) |
  184. (params->ev_ctx_host->msivec & msi->mask));
  185. ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND(
  186. (((u64)msi->addr_hi << 32) | msi->addr_low));
  187. ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND(
  188. params->event_context_addr +
  189. offsetof(struct ipa_mhi_ev_ctx, rp));
  190. ev_props.exclusive = true;
  191. ev_props.err_cb = params->ev_err_cb;
  192. ev_props.user_data = params->channel;
  193. ev_props.evchid_valid = true;
  194. ev_props.evchid = params->evchid;
  195. IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n",
  196. ipa_ep_idx, ev_props.evchid);
  197. res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
  198. &ep->gsi_evt_ring_hdl);
  199. if (res) {
  200. IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
  201. goto fail_alloc_evt;
  202. }
  203. IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
  204. client,
  205. ep->gsi_evt_ring_hdl);
  206. *params->cached_gsi_evt_ring_hdl =
  207. ep->gsi_evt_ring_hdl;
  208. } else {
  209. IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n",
  210. *params->cached_gsi_evt_ring_hdl);
  211. ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
  212. }
  213. if (params->ev_ctx_host->wp == params->ev_ctx_host->rbase) {
  214. IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n",
  215. params->ev_ctx_host->wp);
  216. goto fail_alloc_ch;
  217. }
  218. IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n",
  219. ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
  220. res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl,
  221. params->ev_ctx_host->wp);
  222. if (res) {
  223. IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n",
  224. res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
  225. goto fail_alloc_ch;
  226. }
  227. memset(&ch_props, 0, sizeof(ch_props));
  228. ch_props.prot = GSI_CHAN_PROT_MHI;
  229. ch_props.dir = IPA_CLIENT_IS_PROD(client) ?
  230. GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
  231. ch_props.ch_id = ep_cfg->ipa_gsi_chan_num;
  232. ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
  233. ch_props.re_size = GSI_CHAN_RE_SIZE_16B;
  234. ch_props.ring_len = params->ch_ctx_host->rlen;
  235. ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
  236. params->ch_ctx_host->rbase);
  237. /* Burst mode is not supported on DPL pipes */
  238. if ((client != IPA_CLIENT_MHI_DPL_CONS) &&
  239. (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
  240. params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE)) {
  241. burst_mode_enabled = true;
  242. }
  243. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
  244. !burst_mode_enabled)
  245. ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
  246. else
  247. ch_props.use_db_eng = GSI_CHAN_DB_MODE;
  248. ch_props.db_in_bytes = 1;
  249. ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  250. ch_props.low_weight = 1;
  251. ch_props.prefetch_mode = ep_cfg->prefetch_mode;
  252. ch_props.empty_lvl_threshold = ep_cfg->prefetch_threshold;
  253. ch_props.err_cb = params->ch_err_cb;
  254. ch_props.chan_user_data = params->channel;
  255. res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl,
  256. &ep->gsi_chan_hdl);
  257. if (res) {
  258. IPA_MHI_ERR("gsi_alloc_channel failed %d\n",
  259. res);
  260. goto fail_alloc_ch;
  261. }
  262. memset(&ch_scratch, 0, sizeof(ch_scratch));
  263. ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND(
  264. params->channel_context_addr +
  265. offsetof(struct ipa_mhi_ch_ctx, wp));
  266. ch_scratch.mhi.assert_bit40 = params->assert_bit40;
  267. /*
  268. * Update scratch for MCS smart prefetch:
  269. * Starting IPA4.5, smart prefetch implemented by H/W.
  270. * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
  271. * so keep the fields zero.
  272. */
  273. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  274. ch_scratch.mhi.max_outstanding_tre =
  275. ep_cfg->ipa_if_tlv * ch_props.re_size;
  276. ch_scratch.mhi.outstanding_threshold =
  277. min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
  278. }
  279. ch_scratch.mhi.oob_mod_threshold = 4;
  280. if (burst_mode_enabled) {
  281. ch_scratch.mhi.burst_mode_enabled = burst_mode_enabled;
  282. ch_scratch.mhi.polling_configuration =
  283. ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
  284. (ch_props.ring_len / ch_props.re_size));
  285. ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
  286. } else {
  287. ch_scratch.mhi.burst_mode_enabled = false;
  288. }
  289. res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
  290. ch_scratch);
  291. if (res) {
  292. IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n",
  293. res);
  294. goto fail_ch_scratch;
  295. }
  296. *params->mhi = ch_scratch.mhi;
  297. if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
  298. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  299. ep_cfg_ctrl.ipa_ep_delay = true;
  300. ep->ep_delay_set = true;
  301. res = ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
  302. if (res)
  303. IPA_MHI_ERR("client (ep: %d) failed result=%d\n",
  304. ipa_ep_idx, res);
  305. else
  306. IPA_MHI_DBG("client (ep: %d) success\n", ipa_ep_idx);
  307. } else {
  308. ep->ep_delay_set = false;
  309. }
  310. IPA_MHI_DBG("Starting channel\n");
  311. res = gsi_start_channel(ep->gsi_chan_hdl);
  312. if (res) {
  313. IPA_MHI_ERR("gsi_start_channel failed %d\n", res);
  314. goto fail_ch_start;
  315. }
  316. IPA_MHI_FUNC_EXIT();
  317. return 0;
  318. fail_ch_start:
  319. fail_ch_scratch:
  320. gsi_dealloc_channel(ep->gsi_chan_hdl);
  321. fail_alloc_ch:
  322. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  323. ep->gsi_evt_ring_hdl = ~0;
  324. fail_alloc_evt:
  325. return res;
  326. }
  327. int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
  328. {
  329. int res;
  330. struct gsi_device_scratch gsi_scratch;
  331. const struct ipa_gsi_ep_config *gsi_ep_info;
  332. IPA_MHI_FUNC_ENTRY();
  333. if (!params) {
  334. IPA_MHI_ERR("null args\n");
  335. return -EINVAL;
  336. }
  337. if ((IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) >
  338. ((ipa3_ctx->mhi_evid_limits[1] -
  339. ipa3_ctx->mhi_evid_limits[0]) + 1)) {
  340. IPAERR("Not enough event rings for MHI\n");
  341. ipa_assert();
  342. return -EINVAL;
  343. }
  344. /* Initialize IPA MHI engine */
  345. gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD);
  346. if (!gsi_ep_info) {
  347. IPAERR("MHI PROD has no ep allocated\n");
  348. ipa_assert();
  349. }
  350. memset(&gsi_scratch, 0, sizeof(gsi_scratch));
  351. gsi_scratch.mhi_base_chan_idx_valid = true;
  352. gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
  353. params->gsi.first_ch_idx;
  354. res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
  355. &gsi_scratch);
  356. if (res) {
  357. IPA_MHI_ERR("failed to write device scratch %d\n", res);
  358. goto fail_init_engine;
  359. }
  360. IPA_MHI_FUNC_EXIT();
  361. return 0;
  362. fail_init_engine:
  363. return res;
  364. }
  365. /**
  366. * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
  367. * MHI channel
  368. * @in: connect parameters
  369. * @clnt_hdl: [out] client handle for this pipe
  370. *
  371. * This function is called by IPA MHI client driver on MHI channel start.
  372. * This function is called after MHI engine was started.
  373. *
  374. * Return codes: 0 : success
  375. * negative : error
  376. */
  377. int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
  378. u32 *clnt_hdl)
  379. {
  380. struct ipa3_ep_context *ep;
  381. int ipa_ep_idx;
  382. int res;
  383. enum ipa_client_type client;
  384. IPA_MHI_FUNC_ENTRY();
  385. if (!in || !clnt_hdl) {
  386. IPA_MHI_ERR("NULL args\n");
  387. return -EINVAL;
  388. }
  389. in->start.gsi.evchid += ipa3_ctx->mhi_evid_limits[0];
  390. client = in->sys->client;
  391. ipa_ep_idx = ipa3_get_ep_mapping(client);
  392. if (ipa_ep_idx == -1) {
  393. IPA_MHI_ERR("Invalid client.\n");
  394. return -EINVAL;
  395. }
  396. ep = &ipa3_ctx->ep[ipa_ep_idx];
  397. if (ep->valid == 1) {
  398. IPA_MHI_ERR("EP already allocated.\n");
  399. return -EPERM;
  400. }
  401. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  402. ep->valid = 1;
  403. ep->skip_ep_cfg = in->sys->skip_ep_cfg;
  404. ep->client = client;
  405. ep->client_notify = in->sys->notify;
  406. ep->priv = in->sys->priv;
  407. ep->keep_ipa_awake = in->sys->keep_ipa_awake;
  408. res = ipa_mhi_start_gsi_channel(client,
  409. ipa_ep_idx, &in->start.gsi);
  410. if (res) {
  411. IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
  412. res);
  413. goto fail_start_channel;
  414. }
  415. res = ipa3_enable_data_path(ipa_ep_idx);
  416. if (res) {
  417. IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
  418. ipa_ep_idx);
  419. goto fail_ep_cfg;
  420. }
  421. if (!ep->skip_ep_cfg) {
  422. if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
  423. IPAERR("fail to configure EP.\n");
  424. goto fail_ep_cfg;
  425. }
  426. if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
  427. IPAERR("fail to configure status of EP.\n");
  428. goto fail_ep_cfg;
  429. }
  430. IPA_MHI_DBG("ep configuration successful\n");
  431. } else {
  432. IPA_MHI_DBG("skipping ep configuration\n");
  433. }
  434. *clnt_hdl = ipa_ep_idx;
  435. if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
  436. ipa3_install_dflt_flt_rules(ipa_ep_idx);
  437. ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
  438. IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
  439. ipa_ep_idx);
  440. IPA_MHI_FUNC_EXIT();
  441. return 0;
  442. fail_ep_cfg:
  443. ipa3_disable_data_path(ipa_ep_idx);
  444. fail_start_channel:
  445. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  446. return -EPERM;
  447. }
  448. /**
  449. * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
  450. * MHI channel
  451. * @clnt_hdl: client handle for this pipe
  452. *
  453. * This function is called by IPA MHI client driver on MHI channel reset.
  454. * This function is called after MHI channel was started.
  455. * This function is doing the following:
  456. * - Send command to uC/GSI to reset corresponding MHI channel
  457. * - Configure IPA EP control
  458. *
  459. * Return codes: 0 : success
  460. * negative : error
  461. */
  462. int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
  463. {
  464. struct ipa3_ep_context *ep;
  465. int res;
  466. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  467. IPA_MHI_FUNC_ENTRY();
  468. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
  469. IPAERR("invalid handle %d\n", clnt_hdl);
  470. return -EINVAL;
  471. }
  472. if (ipa3_ctx->ep[clnt_hdl].valid == 0) {
  473. IPAERR("pipe was not connected %d\n", clnt_hdl);
  474. return -EINVAL;
  475. }
  476. ep = &ipa3_ctx->ep[clnt_hdl];
  477. if (ep->ep_delay_set) {
  478. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  479. ep_cfg_ctrl.ipa_ep_delay = false;
  480. res = ipa3_cfg_ep_ctrl(clnt_hdl,
  481. &ep_cfg_ctrl);
  482. if (res) {
  483. IPAERR
  484. ("client(ep:%d) failed to remove delay res=%d\n",
  485. clnt_hdl, res);
  486. } else {
  487. IPADBG("client (ep: %d) delay removed\n",
  488. clnt_hdl);
  489. ep->ep_delay_set = false;
  490. }
  491. }
  492. res = gsi_dealloc_channel(ep->gsi_chan_hdl);
  493. if (res) {
  494. IPAERR("gsi_dealloc_channel failed %d\n", res);
  495. goto fail_reset_channel;
  496. }
  497. ep->valid = 0;
  498. ipa3_delete_dflt_flt_rules(clnt_hdl);
  499. IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
  500. IPA_MHI_FUNC_EXIT();
  501. return 0;
  502. fail_reset_channel:
  503. return res;
  504. }
  505. int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
  506. bool LPTransitionRejected, bool brstmode_enabled,
  507. union __packed gsi_channel_scratch ch_scratch, u8 index)
  508. {
  509. int res;
  510. int ipa_ep_idx;
  511. struct ipa3_ep_context *ep;
  512. union __packed gsi_channel_scratch gsi_ch_scratch;
  513. IPA_MHI_FUNC_ENTRY();
  514. ipa_ep_idx = ipa3_get_ep_mapping(client);
  515. if (ipa_ep_idx < 0) {
  516. IPA_MHI_ERR("Invalid client %d\n", client);
  517. return -EINVAL;
  518. }
  519. ep = &ipa3_ctx->ep[ipa_ep_idx];
  520. if (brstmode_enabled && !LPTransitionRejected) {
  521. res = gsi_read_channel_scratch(ep->gsi_chan_hdl,
  522. &gsi_ch_scratch);
  523. if (res) {
  524. IPA_MHI_ERR("read ch scratch fail %d\n", res);
  525. return res;
  526. }
  527. /*
  528. * set polling mode bit to DB mode before
  529. * resuming the channel
  530. *
  531. * For MHI-->IPA pipes:
  532. * when resuming due to transition to M0,
  533. * set the polling mode bit to 0.
  534. * In other cases, restore it's value form
  535. * when you stopped the channel.
  536. * Here, after successful resume client move to M0 state.
  537. * So, by default setting polling mode bit to 0.
  538. *
  539. * For IPA-->MHI pipe:
  540. * always restore the polling mode bit.
  541. */
  542. if (IPA_CLIENT_IS_PROD(client))
  543. ch_scratch.mhi.polling_mode =
  544. IPA_MHI_POLLING_MODE_DB_MODE;
  545. else
  546. ch_scratch.mhi.polling_mode =
  547. gsi_ch_scratch.mhi.polling_mode;
  548. /* Use GSI update API to not affect non-SWI fields
  549. * inside the scratch while in suspend-resume operation
  550. */
  551. res = gsi_update_mhi_channel_scratch(
  552. ep->gsi_chan_hdl, ch_scratch.mhi);
  553. if (res) {
  554. IPA_MHI_ERR("write ch scratch fail %d\n"
  555. , res);
  556. return res;
  557. }
  558. }
  559. res = gsi_start_channel(ep->gsi_chan_hdl);
  560. if (res) {
  561. IPA_MHI_ERR("failed to resume channel error %d\n", res);
  562. return res;
  563. }
  564. IPA_MHI_FUNC_EXIT();
  565. return 0;
  566. }
  567. int ipa3_mhi_query_ch_info(enum ipa_client_type client,
  568. struct gsi_chan_info *ch_info)
  569. {
  570. int ipa_ep_idx;
  571. int res;
  572. struct ipa3_ep_context *ep;
  573. IPA_MHI_FUNC_ENTRY();
  574. ipa_ep_idx = ipa3_get_ep_mapping(client);
  575. if (ipa_ep_idx < 0) {
  576. IPA_MHI_ERR("Invalid client %d\n", client);
  577. return -EINVAL;
  578. }
  579. ep = &ipa3_ctx->ep[ipa_ep_idx];
  580. res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
  581. if (res) {
  582. IPA_MHI_ERR("gsi_query_channel_info failed\n");
  583. return res;
  584. }
  585. IPA_MHI_FUNC_EXIT();
  586. return 0;
  587. }
  588. bool ipa3_has_open_aggr_frame(enum ipa_client_type client)
  589. {
  590. u32 aggr_state_active;
  591. int ipa_ep_idx;
  592. aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
  593. IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
  594. ipa_ep_idx = ipa_get_ep_mapping(client);
  595. if (ipa_ep_idx == -1) {
  596. ipa_assert();
  597. return false;
  598. }
  599. if ((1 << ipa_ep_idx) & aggr_state_active)
  600. return true;
  601. return false;
  602. }
  603. int ipa3_mhi_destroy_channel(enum ipa_client_type client)
  604. {
  605. int res;
  606. int ipa_ep_idx;
  607. struct ipa3_ep_context *ep;
  608. ipa_ep_idx = ipa3_get_ep_mapping(client);
  609. if (ipa_ep_idx < 0) {
  610. IPA_MHI_ERR("Invalid client %d\n", client);
  611. return -EINVAL;
  612. }
  613. ep = &ipa3_ctx->ep[ipa_ep_idx];
  614. IPA_ACTIVE_CLIENTS_INC_EP(client);
  615. IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
  616. ep->gsi_evt_ring_hdl, ipa_ep_idx);
  617. res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
  618. if (res) {
  619. IPAERR(" failed to reset evt ring %lu, err %d\n"
  620. , ep->gsi_evt_ring_hdl, res);
  621. goto fail;
  622. }
  623. IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n",
  624. ep->gsi_evt_ring_hdl, ipa_ep_idx);
  625. res = gsi_dealloc_evt_ring(
  626. ep->gsi_evt_ring_hdl);
  627. if (res) {
  628. IPAERR("dealloc evt ring %lu failed, err %d\n"
  629. , ep->gsi_evt_ring_hdl, res);
  630. goto fail;
  631. }
  632. IPA_ACTIVE_CLIENTS_DEC_EP(client);
  633. return 0;
  634. fail:
  635. IPA_ACTIVE_CLIENTS_DEC_EP(client);
  636. return res;
  637. }
  638. MODULE_LICENSE("GPL v2");
  639. MODULE_DESCRIPTION("IPA MHI driver");