ipa_wdi3_i.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018 - 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include <linux/ipa_wdi3.h>
  7. #define UPDATE_RP_MODERATION_CONFIG 1
  8. #define UPDATE_RP_MODERATION_THRESHOLD 8
  9. #define IPA_WLAN_AGGR_PKT_LIMIT 1
  10. #define IPA_WLAN_AGGR_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/
  11. #define IPA_WDI3_GSI_EVT_RING_INT_MODT 32
  12. static void ipa3_wdi3_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
  13. {
  14. switch (notify->evt_id) {
  15. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  16. IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  17. break;
  18. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  19. IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  20. break;
  21. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  22. IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  23. break;
  24. case GSI_EVT_EVT_RING_EMPTY_ERR:
  25. IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
  26. break;
  27. default:
  28. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  29. }
  30. ipa_assert();
  31. }
  32. static void ipa3_wdi3_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
  33. {
  34. switch (notify->evt_id) {
  35. case GSI_CHAN_INVALID_TRE_ERR:
  36. IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
  37. break;
  38. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  39. IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  40. break;
  41. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  42. IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  43. break;
  44. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  45. IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  46. break;
  47. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  48. IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  49. break;
  50. case GSI_CHAN_HWO_1_ERR:
  51. IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
  52. break;
  53. default:
  54. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  55. }
  56. ipa_assert();
  57. }
  58. static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
  59. struct ipa_wdi_pipe_setup_info *info,
  60. struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir,
  61. struct ipa3_ep_context *ep)
  62. {
  63. struct gsi_evt_ring_props gsi_evt_ring_props;
  64. struct gsi_chan_props gsi_channel_props;
  65. union __packed gsi_channel_scratch ch_scratch;
  66. union __packed gsi_evt_scratch evt_scratch;
  67. const struct ipa_gsi_ep_config *gsi_ep_info;
  68. int result, len;
  69. unsigned long va;
  70. uint32_t addr_low, addr_high;
  71. if (!info || !info_smmu || !ep) {
  72. IPAERR("invalid input\n");
  73. return -EINVAL;
  74. }
  75. /* setup event ring */
  76. memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
  77. gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_WDI3_EV;
  78. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_9) {
  79. gsi_evt_ring_props.intr = GSI_INTR_MSI;
  80. /* 32 (for Tx) and 8 (for Rx) */
  81. if (dir == IPA_WDI3_TX_DIR)
  82. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_32B;
  83. else
  84. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_8B;
  85. } else {
  86. gsi_evt_ring_props.intr = GSI_INTR_IRQ;
  87. /* 16 (for Tx) and 8 (for Rx) */
  88. if (dir == IPA_WDI3_TX_DIR)
  89. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  90. else
  91. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_8B;
  92. }
  93. if (!is_smmu_enabled) {
  94. gsi_evt_ring_props.ring_len = info->event_ring_size;
  95. gsi_evt_ring_props.ring_base_addr =
  96. (u64)info->event_ring_base_pa;
  97. } else {
  98. len = info_smmu->event_ring_size;
  99. if (dir == IPA_WDI3_TX_DIR) {
  100. if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_RING_RES,
  101. true, info->event_ring_base_pa,
  102. &info_smmu->event_ring_base, len,
  103. false, &va)) {
  104. IPAERR("failed to get smmu mapping\n");
  105. return -EFAULT;
  106. }
  107. } else {
  108. if (ipa_create_gsi_smmu_mapping(
  109. IPA_WDI_RX_COMP_RING_RES, true,
  110. info->event_ring_base_pa,
  111. &info_smmu->event_ring_base, len,
  112. false, &va)) {
  113. IPAERR("failed to get smmu mapping\n");
  114. return -EFAULT;
  115. }
  116. }
  117. gsi_evt_ring_props.ring_len = len;
  118. gsi_evt_ring_props.ring_base_addr = (u64)va;
  119. }
  120. gsi_evt_ring_props.int_modt = IPA_WDI3_GSI_EVT_RING_INT_MODT;
  121. gsi_evt_ring_props.int_modc = 1;
  122. gsi_evt_ring_props.exclusive = true;
  123. gsi_evt_ring_props.err_cb = ipa3_wdi3_gsi_evt_ring_err_cb;
  124. gsi_evt_ring_props.user_data = NULL;
  125. result = gsi_alloc_evt_ring(&gsi_evt_ring_props, ipa3_ctx->gsi_dev_hdl,
  126. &ep->gsi_evt_ring_hdl);
  127. if (result != GSI_STATUS_SUCCESS) {
  128. IPAERR("fail to alloc RX event ring\n");
  129. result = -EFAULT;
  130. goto fail_smmu_mapping;
  131. }
  132. ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
  133. ep->gsi_mem_info.evt_ring_base_addr =
  134. gsi_evt_ring_props.ring_base_addr;
  135. /* setup channel ring */
  136. memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
  137. gsi_channel_props.prot = GSI_CHAN_PROT_WDI3;
  138. if (dir == IPA_WDI3_TX_DIR)
  139. gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
  140. else
  141. gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  142. gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
  143. if (!gsi_ep_info) {
  144. IPAERR("Failed getting GSI EP info for client=%d\n",
  145. ep->client);
  146. result = -EINVAL;
  147. goto fail_get_gsi_ep_info;
  148. } else
  149. gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
  150. gsi_channel_props.db_in_bytes = 0;
  151. gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
  152. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_9) {
  153. /* 32 (for Tx) and 64 (for Rx) */
  154. if (dir == IPA_WDI3_TX_DIR)
  155. gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_32B;
  156. else
  157. gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_64B;
  158. } else
  159. gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  160. gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  161. gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  162. gsi_channel_props.prefetch_mode =
  163. gsi_ep_info->prefetch_mode;
  164. gsi_channel_props.empty_lvl_threshold =
  165. gsi_ep_info->prefetch_threshold;
  166. gsi_channel_props.low_weight = 1;
  167. gsi_channel_props.err_cb = ipa3_wdi3_gsi_chan_err_cb;
  168. if (!is_smmu_enabled) {
  169. gsi_channel_props.ring_len = (u16)info->transfer_ring_size;
  170. gsi_channel_props.ring_base_addr =
  171. (u64)info->transfer_ring_base_pa;
  172. } else {
  173. len = info_smmu->transfer_ring_size;
  174. if (dir == IPA_WDI3_TX_DIR) {
  175. if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_RING_RES,
  176. true, info->transfer_ring_base_pa,
  177. &info_smmu->transfer_ring_base, len,
  178. false, &va)) {
  179. IPAERR("failed to get smmu mapping\n");
  180. result = -EFAULT;
  181. goto fail_get_gsi_ep_info;
  182. }
  183. } else {
  184. if (ipa_create_gsi_smmu_mapping(
  185. IPA_WDI_RX_RING_RES, true,
  186. info->transfer_ring_base_pa,
  187. &info_smmu->transfer_ring_base, len,
  188. false, &va)) {
  189. IPAERR("failed to get smmu mapping\n");
  190. result = -EFAULT;
  191. goto fail_get_gsi_ep_info;
  192. }
  193. }
  194. gsi_channel_props.ring_len = len;
  195. gsi_channel_props.ring_base_addr = (u64)va;
  196. }
  197. result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
  198. &ep->gsi_chan_hdl);
  199. if (result != GSI_STATUS_SUCCESS)
  200. goto fail_get_gsi_ep_info;
  201. ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
  202. ep->gsi_mem_info.chan_ring_base_addr =
  203. gsi_channel_props.ring_base_addr;
  204. /* write event scratch */
  205. memset(&evt_scratch, 0, sizeof(evt_scratch));
  206. evt_scratch.wdi3.update_rp_moderation_config =
  207. UPDATE_RP_MODERATION_CONFIG;
  208. result = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, evt_scratch);
  209. if (result != GSI_STATUS_SUCCESS) {
  210. IPAERR("failed to write evt ring scratch\n");
  211. goto fail_write_scratch;
  212. }
  213. if (!is_smmu_enabled) {
  214. IPADBG("smmu disabled\n");
  215. if (info->is_evt_rn_db_pcie_addr == true)
  216. IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
  217. else
  218. IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
  219. IPADBG_LOW("LSB 0x%x\n",
  220. (u32)info->event_ring_doorbell_pa);
  221. IPADBG_LOW("MSB 0x%x\n",
  222. (u32)((u64)info->event_ring_doorbell_pa >> 32));
  223. } else {
  224. IPADBG("smmu enabled\n");
  225. if (info_smmu->is_evt_rn_db_pcie_addr == true)
  226. IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
  227. else
  228. IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
  229. IPADBG_LOW("LSB 0x%x\n",
  230. (u32)info_smmu->event_ring_doorbell_pa);
  231. IPADBG_LOW("MSB 0x%x\n",
  232. (u32)((u64)info_smmu->event_ring_doorbell_pa >> 32));
  233. }
  234. if (!is_smmu_enabled) {
  235. addr_low = (u32)info->event_ring_doorbell_pa;
  236. addr_high = (u32)((u64)info->event_ring_doorbell_pa >> 32);
  237. } else {
  238. if (dir == IPA_WDI3_TX_DIR) {
  239. if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
  240. true, info_smmu->event_ring_doorbell_pa,
  241. NULL, 4, true, &va)) {
  242. IPAERR("failed to get smmu mapping\n");
  243. result = -EFAULT;
  244. goto fail_write_scratch;
  245. }
  246. } else {
  247. if (ipa_create_gsi_smmu_mapping(
  248. IPA_WDI_RX_COMP_RING_WP_RES,
  249. true, info_smmu->event_ring_doorbell_pa,
  250. NULL, 4, true, &va)) {
  251. IPAERR("failed to get smmu mapping\n");
  252. result = -EFAULT;
  253. goto fail_write_scratch;
  254. }
  255. }
  256. addr_low = (u32)va;
  257. addr_high = (u32)((u64)va >> 32);
  258. }
  259. /*
  260. * Arch specific:
  261. * pcie addr which are not via smmu, use pa directly!
  262. * pcie and DDR via 2 different port
  263. * assert bit 40 to indicate it is pcie addr
  264. * WDI-3.0, MSM --> pcie via smmu
  265. * WDI-3.0, MDM --> pcie not via smmu + dual port
  266. * assert bit 40 in case
  267. */
  268. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  269. is_smmu_enabled) {
  270. /*
  271. * Ir-respective of smmu enabled don't use IOVA addr
  272. * since pcie not via smmu in MDM's
  273. */
  274. if (info_smmu->is_evt_rn_db_pcie_addr == true) {
  275. addr_low = (u32)info_smmu->event_ring_doorbell_pa;
  276. addr_high =
  277. (u32)((u64)info_smmu->event_ring_doorbell_pa
  278. >> 32);
  279. }
  280. }
  281. /*
  282. * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
  283. * from wdi-3.0 interface document
  284. */
  285. if (!is_smmu_enabled) {
  286. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  287. info->is_evt_rn_db_pcie_addr)
  288. addr_high |= (1 << 8);
  289. } else {
  290. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  291. info_smmu->is_evt_rn_db_pcie_addr)
  292. addr_high |= (1 << 8);
  293. }
  294. gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
  295. addr_low,
  296. addr_high);
  297. /* write channel scratch */
  298. memset(&ch_scratch, 0, sizeof(ch_scratch));
  299. ch_scratch.wdi3.update_rp_moderation_threshold =
  300. UPDATE_RP_MODERATION_THRESHOLD;
  301. if (dir == IPA_WDI3_RX_DIR) {
  302. if (!is_smmu_enabled)
  303. ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
  304. else
  305. ch_scratch.wdi3.rx_pkt_offset = info_smmu->pkt_offset;
  306. /* this metadata reg offset need to be in words */
  307. ch_scratch.wdi3.endp_metadata_reg_offset =
  308. ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
  309. gsi_ep_info->ipa_ep_num) / 4;
  310. }
  311. if (!is_smmu_enabled) {
  312. IPADBG_LOW("smmu disabled\n");
  313. if (info->is_txr_rn_db_pcie_addr == true)
  314. IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
  315. else
  316. IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
  317. IPADBG_LOW("LSB 0x%x\n",
  318. (u32)info->transfer_ring_doorbell_pa);
  319. IPADBG_LOW("MSB 0x%x\n",
  320. (u32)((u64)info->transfer_ring_doorbell_pa >> 32));
  321. } else {
  322. IPADBG_LOW("smmu eabled\n");
  323. if (info_smmu->is_txr_rn_db_pcie_addr == true)
  324. IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
  325. else
  326. IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
  327. IPADBG_LOW("LSB 0x%x\n",
  328. (u32)info_smmu->transfer_ring_doorbell_pa);
  329. IPADBG_LOW("MSB 0x%x\n",
  330. (u32)((u64)info_smmu->transfer_ring_doorbell_pa >> 32));
  331. }
  332. if (!is_smmu_enabled) {
  333. ch_scratch.wdi3.wifi_rp_address_low =
  334. (u32)info->transfer_ring_doorbell_pa;
  335. ch_scratch.wdi3.wifi_rp_address_high =
  336. (u32)((u64)info->transfer_ring_doorbell_pa >> 32);
  337. } else {
  338. if (dir == IPA_WDI3_TX_DIR) {
  339. if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_DB_RES,
  340. true, info_smmu->transfer_ring_doorbell_pa,
  341. NULL, 4, true, &va)) {
  342. IPAERR("failed to get smmu mapping\n");
  343. result = -EFAULT;
  344. goto fail_write_scratch;
  345. }
  346. ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
  347. ch_scratch.wdi3.wifi_rp_address_high =
  348. (u32)((u64)va >> 32);
  349. } else {
  350. if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
  351. true, info_smmu->transfer_ring_doorbell_pa,
  352. NULL, 4, true, &va)) {
  353. IPAERR("failed to get smmu mapping\n");
  354. result = -EFAULT;
  355. goto fail_write_scratch;
  356. }
  357. ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
  358. ch_scratch.wdi3.wifi_rp_address_high =
  359. (u32)((u64)va >> 32);
  360. }
  361. }
  362. /*
  363. * Arch specific:
  364. * pcie addr which are not via smmu, use pa directly!
  365. * pcie and DDR via 2 different port
  366. * assert bit 40 to indicate it is pcie addr
  367. * WDI-3.0, MSM --> pcie via smmu
  368. * WDI-3.0, MDM --> pcie not via smmu + dual port
  369. * assert bit 40 in case
  370. */
  371. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  372. is_smmu_enabled) {
  373. /*
  374. * Ir-respective of smmu enabled don't use IOVA addr
  375. * since pcie not via smmu in MDM's
  376. */
  377. if (info_smmu->is_txr_rn_db_pcie_addr == true) {
  378. ch_scratch.wdi3.wifi_rp_address_low =
  379. (u32)info_smmu->transfer_ring_doorbell_pa;
  380. ch_scratch.wdi3.wifi_rp_address_high =
  381. (u32)((u64)info_smmu->transfer_ring_doorbell_pa
  382. >> 32);
  383. }
  384. }
  385. /*
  386. * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
  387. * from wdi-3.0 interface document
  388. */
  389. if (!is_smmu_enabled) {
  390. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  391. info->is_txr_rn_db_pcie_addr)
  392. ch_scratch.wdi3.wifi_rp_address_high =
  393. (u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
  394. (1 << 8));
  395. } else {
  396. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  397. info_smmu->is_txr_rn_db_pcie_addr)
  398. ch_scratch.wdi3.wifi_rp_address_high =
  399. (u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
  400. (1 << 8));
  401. }
  402. result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
  403. if (result != GSI_STATUS_SUCCESS) {
  404. IPAERR("failed to write evt ring scratch\n");
  405. goto fail_write_scratch;
  406. }
  407. return 0;
  408. fail_write_scratch:
  409. gsi_dealloc_channel(ep->gsi_chan_hdl);
  410. ep->gsi_chan_hdl = ~0;
  411. fail_get_gsi_ep_info:
  412. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  413. ep->gsi_evt_ring_hdl = ~0;
  414. fail_smmu_mapping:
  415. ipa3_release_wdi3_gsi_smmu_mappings(dir);
  416. return result;
  417. }
  418. int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in,
  419. struct ipa_wdi_conn_out_params *out,
  420. ipa_wdi_meter_notifier_cb wdi_notify)
  421. {
  422. enum ipa_client_type rx_client;
  423. enum ipa_client_type tx_client;
  424. struct ipa3_ep_context *ep_rx;
  425. struct ipa3_ep_context *ep_tx;
  426. int ipa_ep_idx_rx;
  427. int ipa_ep_idx_tx;
  428. int result = 0;
  429. u32 gsi_db_addr_low, gsi_db_addr_high;
  430. void __iomem *db_addr;
  431. u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
  432. /* wdi3 only support over gsi */
  433. if (!ipa3_ctx->ipa_wdi3_over_gsi) {
  434. IPAERR("wdi3 over uc offload not supported");
  435. WARN_ON(1);
  436. return -EFAULT;
  437. }
  438. if (in == NULL || out == NULL) {
  439. IPAERR("invalid input\n");
  440. return -EINVAL;
  441. }
  442. if (in->is_smmu_enabled == false) {
  443. rx_client = in->u_rx.rx.client;
  444. tx_client = in->u_tx.tx.client;
  445. } else {
  446. rx_client = in->u_rx.rx_smmu.client;
  447. tx_client = in->u_tx.tx_smmu.client;
  448. }
  449. ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client);
  450. ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client);
  451. if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) {
  452. IPAERR("fail to alloc EP.\n");
  453. return -EFAULT;
  454. }
  455. if (ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES ||
  456. ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES) {
  457. IPAERR("ep out of range.\n");
  458. return -EFAULT;
  459. }
  460. ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
  461. ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
  462. if (ep_rx->valid || ep_tx->valid) {
  463. IPAERR("EP already allocated.\n");
  464. return -EFAULT;
  465. }
  466. memset(ep_rx, 0, offsetof(struct ipa3_ep_context, sys));
  467. memset(ep_tx, 0, offsetof(struct ipa3_ep_context, sys));
  468. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  469. #ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
  470. if (wdi_notify)
  471. ipa3_ctx->uc_wdi_ctx.stats_notify = wdi_notify;
  472. else
  473. IPADBG("wdi_notify is null\n");
  474. #endif
  475. /* setup rx ep cfg */
  476. ep_rx->valid = 1;
  477. ep_rx->client = rx_client;
  478. result = ipa3_disable_data_path(ipa_ep_idx_rx);
  479. if (result) {
  480. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  481. ipa_ep_idx_rx);
  482. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  483. return -EFAULT;
  484. }
  485. ep_rx->client_notify = in->notify;
  486. ep_rx->priv = in->priv;
  487. if (in->is_smmu_enabled == false)
  488. memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg,
  489. sizeof(ep_rx->cfg));
  490. else
  491. memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg,
  492. sizeof(ep_rx->cfg));
  493. if (ipa3_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) {
  494. IPAERR("fail to setup rx pipe cfg\n");
  495. result = -EFAULT;
  496. goto fail;
  497. }
  498. IPADBG("ipa3_ctx->ipa_wdi3_over_gsi %d\n",
  499. ipa3_ctx->ipa_wdi3_over_gsi);
  500. /* setup RX gsi channel */
  501. if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
  502. &in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR,
  503. ep_rx)) {
  504. IPAERR("fail to setup wdi3 gsi rx channel\n");
  505. result = -EFAULT;
  506. goto fail;
  507. }
  508. if (gsi_query_channel_db_addr(ep_rx->gsi_chan_hdl,
  509. &gsi_db_addr_low, &gsi_db_addr_high)) {
  510. IPAERR("failed to query gsi rx db addr\n");
  511. result = -EFAULT;
  512. goto fail;
  513. }
  514. /* only 32 bit lsb is used */
  515. out->rx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
  516. IPADBG("out->rx_uc_db_pa %llu\n", out->rx_uc_db_pa);
  517. ipa3_install_dflt_flt_rules(ipa_ep_idx_rx);
  518. IPADBG("client %d (ep: %d) connected\n", rx_client,
  519. ipa_ep_idx_rx);
  520. /* setup tx ep cfg */
  521. ep_tx->valid = 1;
  522. ep_tx->client = tx_client;
  523. result = ipa3_disable_data_path(ipa_ep_idx_tx);
  524. if (result) {
  525. IPAERR("disable data path failed res=%d ep=%d.\n", result,
  526. ipa_ep_idx_tx);
  527. result = -EFAULT;
  528. goto fail;
  529. }
  530. if (in->is_smmu_enabled == false)
  531. memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg,
  532. sizeof(ep_tx->cfg));
  533. else
  534. memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg,
  535. sizeof(ep_tx->cfg));
  536. ep_tx->cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
  537. ep_tx->cfg.aggr.aggr = IPA_GENERIC;
  538. ep_tx->cfg.aggr.aggr_byte_limit = IPA_WLAN_AGGR_BYTE_LIMIT;
  539. ep_tx->cfg.aggr.aggr_pkt_limit = IPA_WLAN_AGGR_PKT_LIMIT;
  540. ep_tx->cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
  541. if (ipa3_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) {
  542. IPAERR("fail to setup tx pipe cfg\n");
  543. result = -EFAULT;
  544. goto fail;
  545. }
  546. /* setup TX gsi channel */
  547. if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
  548. &in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR,
  549. ep_tx)) {
  550. IPAERR("fail to setup wdi3 gsi tx channel\n");
  551. result = -EFAULT;
  552. goto fail;
  553. }
  554. if (gsi_query_channel_db_addr(ep_tx->gsi_chan_hdl,
  555. &gsi_db_addr_low, &gsi_db_addr_high)) {
  556. IPAERR("failed to query gsi tx db addr\n");
  557. result = -EFAULT;
  558. goto fail;
  559. }
  560. /* only 32 bit lsb is used */
  561. out->tx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
  562. IPADBG("out->tx_uc_db_pa %llu\n", out->tx_uc_db_pa);
  563. IPADBG("client %d (ep: %d) connected\n", tx_client,
  564. ipa_ep_idx_tx);
  565. /* ring initial event ring dbs */
  566. gsi_query_evt_ring_db_addr(ep_rx->gsi_evt_ring_hdl,
  567. &evt_ring_db_addr_low, &evt_ring_db_addr_high);
  568. IPADBG("evt_ring_hdl %lu, db_addr_low %u db_addr_high %u\n",
  569. ep_rx->gsi_evt_ring_hdl, evt_ring_db_addr_low,
  570. evt_ring_db_addr_high);
  571. /* only 32 bit lsb is used */
  572. db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
  573. /*
  574. * IPA/GSI driver should ring the event DB once after
  575. * initialization of the event, with a value that is
  576. * outside of the ring range. Eg: ring base = 0x1000,
  577. * ring size = 0x100 => AP can write value > 0x1100
  578. * into the doorbell address. Eg: 0x 1110
  579. */
  580. iowrite32(in->u_rx.rx.event_ring_size / 4 + 10, db_addr);
  581. gsi_query_evt_ring_db_addr(ep_tx->gsi_evt_ring_hdl,
  582. &evt_ring_db_addr_low, &evt_ring_db_addr_high);
  583. /* only 32 bit lsb is used */
  584. db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
  585. /*
  586. * IPA/GSI driver should ring the event DB once after
  587. * initialization of the event, with a value that is
  588. * outside of the ring range. Eg: ring base = 0x1000,
  589. * ring size = 0x100 => AP can write value > 0x1100
  590. * into the doorbell address. Eg: 0x 1110
  591. */
  592. iowrite32(in->u_tx.tx.event_ring_size / 4 + 10, db_addr);
  593. fail:
  594. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  595. return result;
  596. }
  597. int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
  598. {
  599. struct ipa3_ep_context *ep_tx, *ep_rx;
  600. int result = 0;
  601. /* wdi3 only support over gsi */
  602. if (!ipa3_ctx->ipa_wdi3_over_gsi) {
  603. IPAERR("wdi3 over uc offload not supported");
  604. WARN_ON(1);
  605. return -EFAULT;
  606. }
  607. IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
  608. IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
  609. if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES ||
  610. ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES) {
  611. IPAERR("invalid ipa ep index\n");
  612. return -EINVAL;
  613. }
  614. ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
  615. ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
  616. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
  617. /* tear down tx pipe */
  618. result = ipa3_reset_gsi_channel(ipa_ep_idx_tx);
  619. if (result != GSI_STATUS_SUCCESS) {
  620. IPAERR("failed to reset gsi channel: %d.\n", result);
  621. goto exit;
  622. }
  623. result = gsi_reset_evt_ring(ep_tx->gsi_evt_ring_hdl);
  624. if (result != GSI_STATUS_SUCCESS) {
  625. IPAERR("failed to reset evt ring: %d.\n", result);
  626. goto exit;
  627. }
  628. result = ipa3_release_gsi_channel(ipa_ep_idx_tx);
  629. if (result) {
  630. IPAERR("failed to release gsi channel: %d\n", result);
  631. goto exit;
  632. }
  633. memset(ep_tx, 0, sizeof(struct ipa3_ep_context));
  634. IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx);
  635. /* tear down rx pipe */
  636. result = ipa3_reset_gsi_channel(ipa_ep_idx_rx);
  637. if (result != GSI_STATUS_SUCCESS) {
  638. IPAERR("failed to reset gsi channel: %d.\n", result);
  639. goto exit;
  640. }
  641. result = gsi_reset_evt_ring(ep_rx->gsi_evt_ring_hdl);
  642. if (result != GSI_STATUS_SUCCESS) {
  643. IPAERR("failed to reset evt ring: %d.\n", result);
  644. goto exit;
  645. }
  646. result = ipa3_release_gsi_channel(ipa_ep_idx_rx);
  647. if (result) {
  648. IPAERR("failed to release gsi channel: %d\n", result);
  649. goto exit;
  650. }
  651. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
  652. ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_WDI3);
  653. ipa3_delete_dflt_flt_rules(ipa_ep_idx_rx);
  654. memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
  655. IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx);
  656. exit:
  657. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_by_pipe(ipa_ep_idx_tx));
  658. return result;
  659. }
  660. int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
  661. {
  662. struct ipa3_ep_context *ep_tx, *ep_rx;
  663. int result = 0;
  664. /* wdi3 only support over gsi */
  665. if (!ipa3_ctx->ipa_wdi3_over_gsi) {
  666. IPAERR("wdi3 over uc offload not supported");
  667. WARN_ON(1);
  668. return -EFAULT;
  669. }
  670. IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
  671. IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
  672. ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
  673. ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
  674. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
  675. /* start uC event ring */
  676. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
  677. if (ipa3_ctx->uc_ctx.uc_loaded &&
  678. !ipa3_ctx->uc_ctx.uc_event_ring_valid) {
  679. if (ipa3_uc_setup_event_ring()) {
  680. IPAERR("failed to set uc_event ring\n");
  681. return -EFAULT;
  682. }
  683. } else
  684. IPAERR("uc-loaded %d, ring-valid %d\n",
  685. ipa3_ctx->uc_ctx.uc_loaded,
  686. ipa3_ctx->uc_ctx.uc_event_ring_valid);
  687. }
  688. /* enable data path */
  689. result = ipa3_enable_data_path(ipa_ep_idx_rx);
  690. if (result) {
  691. IPAERR("enable data path failed res=%d clnt=%d\n", result,
  692. ipa_ep_idx_rx);
  693. goto exit;
  694. }
  695. result = ipa3_enable_data_path(ipa_ep_idx_tx);
  696. if (result) {
  697. IPAERR("enable data path failed res=%d clnt=%d\n", result,
  698. ipa_ep_idx_tx);
  699. goto fail_enable_path1;
  700. }
  701. /* start gsi tx channel */
  702. result = gsi_start_channel(ep_tx->gsi_chan_hdl);
  703. if (result) {
  704. IPAERR("failed to start gsi tx channel\n");
  705. goto fail_enable_path2;
  706. }
  707. /* start gsi rx channel */
  708. result = gsi_start_channel(ep_rx->gsi_chan_hdl);
  709. if (result) {
  710. IPAERR("failed to start gsi rx channel\n");
  711. goto fail_start_channel1;
  712. }
  713. /* start uC gsi dbg stats monitor */
  714. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
  715. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
  716. = ep_rx->gsi_chan_hdl;
  717. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
  718. = DIR_PRODUCER;
  719. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
  720. = ep_tx->gsi_chan_hdl;
  721. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
  722. = DIR_CONSUMER;
  723. ipa3_uc_debug_stats_alloc(
  724. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
  725. }
  726. goto exit;
  727. fail_start_channel1:
  728. gsi_stop_channel(ep_tx->gsi_chan_hdl);
  729. fail_enable_path2:
  730. ipa3_disable_data_path(ipa_ep_idx_tx);
  731. fail_enable_path1:
  732. ipa3_disable_data_path(ipa_ep_idx_rx);
  733. exit:
  734. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
  735. return result;
  736. }
  737. int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
  738. {
  739. int result = 0;
  740. struct ipa3_ep_context *ep;
  741. u32 source_pipe_bitmask = 0;
  742. bool disable_force_clear = false;
  743. struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
  744. /* wdi3 only support over gsi */
  745. if (!ipa3_ctx->ipa_wdi3_over_gsi) {
  746. IPAERR("wdi3 over uc offload not supported");
  747. WARN_ON(1);
  748. return -EFAULT;
  749. }
  750. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  751. /* disable tx data path */
  752. result = ipa3_disable_data_path(ipa_ep_idx_tx);
  753. if (result) {
  754. IPAERR("enable data path failed res=%d clnt=%d.\n", result,
  755. ipa_ep_idx_tx);
  756. result = -EFAULT;
  757. goto fail;
  758. }
  759. /* disable rx data path */
  760. result = ipa3_disable_data_path(ipa_ep_idx_rx);
  761. if (result) {
  762. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  763. ipa_ep_idx_rx);
  764. result = -EFAULT;
  765. goto fail;
  766. }
  767. /*
  768. * For WDI 3.0 need to ensure pipe will be empty before suspend
  769. * as IPA uC will fail to suspend the pipe otherwise.
  770. */
  771. ep = &ipa3_ctx->ep[ipa_ep_idx_rx];
  772. source_pipe_bitmask = 1 <<
  773. ipa3_get_ep_mapping(ep->client);
  774. result = ipa3_enable_force_clear(ipa_ep_idx_rx,
  775. false, source_pipe_bitmask);
  776. if (result) {
  777. /*
  778. * assuming here modem SSR, AP can remove
  779. * the delay in this case
  780. */
  781. IPAERR("failed to force clear %d\n", result);
  782. IPAERR("remove delay from SCND reg\n");
  783. ep_ctrl_scnd.endp_delay = false;
  784. ipahal_write_reg_n_fields(
  785. IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx_rx,
  786. &ep_ctrl_scnd);
  787. } else {
  788. disable_force_clear = true;
  789. }
  790. /* stop gsi rx channel */
  791. result = ipa3_stop_gsi_channel(ipa_ep_idx_rx);
  792. if (result) {
  793. IPAERR("failed to stop gsi rx channel\n");
  794. result = -EFAULT;
  795. goto fail;
  796. }
  797. /* stop gsi tx channel */
  798. result = ipa3_stop_gsi_channel(ipa_ep_idx_tx);
  799. if (result) {
  800. IPAERR("failed to stop gsi tx channel\n");
  801. result = -EFAULT;
  802. goto fail;
  803. }
  804. /* stop uC gsi dbg stats monitor */
  805. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
  806. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
  807. = 0xff;
  808. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
  809. = DIR_PRODUCER;
  810. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
  811. = 0xff;
  812. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
  813. = DIR_CONSUMER;
  814. ipa3_uc_debug_stats_alloc(
  815. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
  816. }
  817. if (disable_force_clear)
  818. ipa3_disable_force_clear(ipa_ep_idx_rx);
  819. fail:
  820. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  821. return result;
  822. }
  823. int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id)
  824. {
  825. int result = 0;
  826. struct ipa3_ep_context *ep;
  827. union __packed gsi_wdi3_channel_scratch2_reg scratch2_reg;
  828. memset(&scratch2_reg, 0, sizeof(scratch2_reg));
  829. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  830. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  831. IPAERR_RL("bad parm, %d\n", clnt_hdl);
  832. return -EINVAL;
  833. }
  834. ep = &ipa3_ctx->ep[clnt_hdl];
  835. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  836. result = gsi_read_wdi3_channel_scratch2_reg(ep->gsi_chan_hdl,
  837. &scratch2_reg);
  838. if (result != GSI_STATUS_SUCCESS) {
  839. IPAERR("failed to read channel scratch2 reg %d\n", result);
  840. goto exit;
  841. }
  842. scratch2_reg.wdi.qmap_id = qmap_id;
  843. result = gsi_write_wdi3_channel_scratch2_reg(ep->gsi_chan_hdl,
  844. scratch2_reg);
  845. if (result != GSI_STATUS_SUCCESS) {
  846. IPAERR("failed to write channel scratch2 reg %d\n", result);
  847. goto exit;
  848. }
  849. exit:
  850. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  851. return result;
  852. }
  853. /**
  854. * ipa3_get_wdi3_gsi_stats() - Query WDI3 gsi stats from uc
  855. * @stats: [inout] stats blob from client populated by driver
  856. *
  857. * Returns: 0 on success, negative on failure
  858. *
  859. * @note Cannot be called from atomic context
  860. *
  861. */
  862. int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
  863. {
  864. int i;
  865. if (!ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio) {
  866. IPAERR("bad NULL parms for wdi3_gsi_stats\n");
  867. return -EINVAL;
  868. }
  869. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  870. for (i = 0; i < MAX_WDI3_CHANNELS; i++) {
  871. stats->ring[i].ringFull = ioread32(
  872. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  873. + i * IPA3_UC_DEBUG_STATS_OFF +
  874. IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
  875. stats->ring[i].ringEmpty = ioread32(
  876. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  877. + i * IPA3_UC_DEBUG_STATS_OFF +
  878. IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
  879. stats->ring[i].ringUsageHigh = ioread32(
  880. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  881. + i * IPA3_UC_DEBUG_STATS_OFF +
  882. IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
  883. stats->ring[i].ringUsageLow = ioread32(
  884. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  885. + i * IPA3_UC_DEBUG_STATS_OFF +
  886. IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
  887. stats->ring[i].RingUtilCount = ioread32(
  888. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  889. + i * IPA3_UC_DEBUG_STATS_OFF +
  890. IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
  891. }
  892. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  893. return 0;
  894. }