ipa_wdi3_i.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018 - 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include <linux/ipa_wdi3.h>
  7. #define UPDATE_RP_MODERATION_CONFIG 1
  8. #define UPDATE_RP_MODERATION_THRESHOLD 8
  9. #define IPA_WLAN_AGGR_PKT_LIMIT 1
  10. #define IPA_WLAN_AGGR_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/
  11. #define IPA_WDI3_GSI_EVT_RING_INT_MODT 32
  12. static void ipa3_wdi3_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
  13. {
  14. switch (notify->evt_id) {
  15. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  16. IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  17. break;
  18. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  19. IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  20. break;
  21. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  22. IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  23. break;
  24. case GSI_EVT_EVT_RING_EMPTY_ERR:
  25. IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
  26. break;
  27. default:
  28. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  29. }
  30. ipa_assert();
  31. }
  32. static void ipa3_wdi3_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
  33. {
  34. switch (notify->evt_id) {
  35. case GSI_CHAN_INVALID_TRE_ERR:
  36. IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
  37. break;
  38. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  39. IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  40. break;
  41. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  42. IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  43. break;
  44. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  45. IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  46. break;
  47. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  48. IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  49. break;
  50. case GSI_CHAN_HWO_1_ERR:
  51. IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
  52. break;
  53. default:
  54. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  55. }
  56. ipa_assert();
  57. }
  58. static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
  59. struct ipa_wdi_pipe_setup_info *info,
  60. struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir,
  61. struct ipa3_ep_context *ep)
  62. {
  63. struct gsi_evt_ring_props gsi_evt_ring_props;
  64. struct gsi_chan_props gsi_channel_props;
  65. union __packed gsi_channel_scratch ch_scratch;
  66. union __packed gsi_evt_scratch evt_scratch;
  67. const struct ipa_gsi_ep_config *gsi_ep_info;
  68. int result, len;
  69. unsigned long va;
  70. uint32_t addr_low, addr_high;
  71. if (!info || !info_smmu || !ep) {
  72. IPAERR("invalid input\n");
  73. return -EINVAL;
  74. }
  75. /* setup event ring */
  76. memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
  77. gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_WDI3_EV;
  78. gsi_evt_ring_props.intr = GSI_INTR_IRQ;
  79. /* 16 (for Tx) and 8 (for Rx) */
  80. if (dir == IPA_WDI3_TX_DIR)
  81. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  82. else
  83. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_8B;
  84. if (!is_smmu_enabled) {
  85. gsi_evt_ring_props.ring_len = info->event_ring_size;
  86. gsi_evt_ring_props.ring_base_addr =
  87. (u64)info->event_ring_base_pa;
  88. } else {
  89. len = info_smmu->event_ring_size;
  90. if (dir == IPA_WDI3_TX_DIR) {
  91. if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_RING_RES,
  92. true, info->event_ring_base_pa,
  93. &info_smmu->event_ring_base, len,
  94. false, &va)) {
  95. IPAERR("failed to get smmu mapping\n");
  96. return -EFAULT;
  97. }
  98. } else {
  99. if (ipa_create_gsi_smmu_mapping(
  100. IPA_WDI_RX_COMP_RING_RES, true,
  101. info->event_ring_base_pa,
  102. &info_smmu->event_ring_base, len,
  103. false, &va)) {
  104. IPAERR("failed to get smmu mapping\n");
  105. return -EFAULT;
  106. }
  107. }
  108. gsi_evt_ring_props.ring_len = len;
  109. gsi_evt_ring_props.ring_base_addr = (u64)va;
  110. }
  111. gsi_evt_ring_props.int_modt = IPA_WDI3_GSI_EVT_RING_INT_MODT;
  112. gsi_evt_ring_props.int_modc = 1;
  113. gsi_evt_ring_props.exclusive = true;
  114. gsi_evt_ring_props.err_cb = ipa3_wdi3_gsi_evt_ring_err_cb;
  115. gsi_evt_ring_props.user_data = NULL;
  116. result = gsi_alloc_evt_ring(&gsi_evt_ring_props, ipa3_ctx->gsi_dev_hdl,
  117. &ep->gsi_evt_ring_hdl);
  118. if (result != GSI_STATUS_SUCCESS) {
  119. IPAERR("fail to alloc RX event ring\n");
  120. result = -EFAULT;
  121. goto fail_smmu_mapping;
  122. }
  123. ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
  124. ep->gsi_mem_info.evt_ring_base_addr =
  125. gsi_evt_ring_props.ring_base_addr;
  126. /* setup channel ring */
  127. memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
  128. gsi_channel_props.prot = GSI_CHAN_PROT_WDI3;
  129. if (dir == IPA_WDI3_TX_DIR)
  130. gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
  131. else
  132. gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  133. gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
  134. if (!gsi_ep_info) {
  135. IPAERR("Failed getting GSI EP info for client=%d\n",
  136. ep->client);
  137. result = -EINVAL;
  138. goto fail_get_gsi_ep_info;
  139. } else
  140. gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
  141. gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
  142. gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  143. gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  144. gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  145. gsi_channel_props.prefetch_mode =
  146. gsi_ep_info->prefetch_mode;
  147. gsi_channel_props.empty_lvl_threshold =
  148. gsi_ep_info->prefetch_threshold;
  149. gsi_channel_props.low_weight = 1;
  150. gsi_channel_props.err_cb = ipa3_wdi3_gsi_chan_err_cb;
  151. if (!is_smmu_enabled) {
  152. gsi_channel_props.ring_len = (u16)info->transfer_ring_size;
  153. gsi_channel_props.ring_base_addr =
  154. (u64)info->transfer_ring_base_pa;
  155. } else {
  156. len = info_smmu->transfer_ring_size;
  157. if (dir == IPA_WDI3_TX_DIR) {
  158. if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_RING_RES,
  159. true, info->transfer_ring_base_pa,
  160. &info_smmu->transfer_ring_base, len,
  161. false, &va)) {
  162. IPAERR("failed to get smmu mapping\n");
  163. result = -EFAULT;
  164. goto fail_get_gsi_ep_info;
  165. }
  166. } else {
  167. if (ipa_create_gsi_smmu_mapping(
  168. IPA_WDI_RX_RING_RES, true,
  169. info->transfer_ring_base_pa,
  170. &info_smmu->transfer_ring_base, len,
  171. false, &va)) {
  172. IPAERR("failed to get smmu mapping\n");
  173. result = -EFAULT;
  174. goto fail_get_gsi_ep_info;
  175. }
  176. }
  177. gsi_channel_props.ring_len = len;
  178. gsi_channel_props.ring_base_addr = (u64)va;
  179. }
  180. result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
  181. &ep->gsi_chan_hdl);
  182. if (result != GSI_STATUS_SUCCESS) {
  183. goto fail_get_gsi_ep_info;
  184. }
  185. ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
  186. ep->gsi_mem_info.chan_ring_base_addr =
  187. gsi_channel_props.ring_base_addr;
  188. /* write event scratch */
  189. memset(&evt_scratch, 0, sizeof(evt_scratch));
  190. evt_scratch.wdi3.update_rp_moderation_config =
  191. UPDATE_RP_MODERATION_CONFIG;
  192. result = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, evt_scratch);
  193. if (result != GSI_STATUS_SUCCESS) {
  194. IPAERR("failed to write evt ring scratch\n");
  195. goto fail_write_scratch;
  196. }
  197. if (!is_smmu_enabled) {
  198. IPADBG("smmu disabled\n");
  199. if (info->is_evt_rn_db_pcie_addr == true)
  200. IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
  201. else
  202. IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
  203. IPADBG_LOW("LSB 0x%x\n",
  204. (u32)info->event_ring_doorbell_pa);
  205. IPADBG_LOW("MSB 0x%x\n",
  206. (u32)((u64)info->event_ring_doorbell_pa >> 32));
  207. } else {
  208. IPADBG("smmu enabled\n");
  209. if (info_smmu->is_evt_rn_db_pcie_addr == true)
  210. IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
  211. else
  212. IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
  213. IPADBG_LOW("LSB 0x%x\n",
  214. (u32)info_smmu->event_ring_doorbell_pa);
  215. IPADBG_LOW("MSB 0x%x\n",
  216. (u32)((u64)info_smmu->event_ring_doorbell_pa >> 32));
  217. }
  218. if (!is_smmu_enabled) {
  219. addr_low = (u32)info->event_ring_doorbell_pa;
  220. addr_high = (u32)((u64)info->event_ring_doorbell_pa >> 32);
  221. } else {
  222. if (dir == IPA_WDI3_TX_DIR) {
  223. if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
  224. true, info_smmu->event_ring_doorbell_pa,
  225. NULL, 4, true, &va)) {
  226. IPAERR("failed to get smmu mapping\n");
  227. result = -EFAULT;
  228. goto fail_write_scratch;
  229. }
  230. } else {
  231. if (ipa_create_gsi_smmu_mapping(
  232. IPA_WDI_RX_COMP_RING_WP_RES,
  233. true, info_smmu->event_ring_doorbell_pa,
  234. NULL, 4, true, &va)) {
  235. IPAERR("failed to get smmu mapping\n");
  236. result = -EFAULT;
  237. goto fail_write_scratch;
  238. }
  239. }
  240. addr_low = (u32)va;
  241. addr_high = (u32)((u64)va >> 32);
  242. }
  243. /*
  244. * Arch specific:
  245. * pcie addr which are not via smmu, use pa directly!
  246. * pcie and DDR via 2 different port
  247. * assert bit 40 to indicate it is pcie addr
  248. * WDI-3.0, MSM --> pcie via smmu
  249. * WDI-3.0, MDM --> pcie not via smmu + dual port
  250. * assert bit 40 in case
  251. */
  252. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  253. is_smmu_enabled) {
  254. /*
  255. * Ir-respective of smmu enabled don't use IOVA addr
  256. * since pcie not via smmu in MDM's
  257. */
  258. if (info_smmu->is_evt_rn_db_pcie_addr == true) {
  259. addr_low = (u32)info_smmu->event_ring_doorbell_pa;
  260. addr_high =
  261. (u32)((u64)info_smmu->event_ring_doorbell_pa
  262. >> 32);
  263. }
  264. }
  265. /*
  266. * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
  267. * from wdi-3.0 interface document
  268. */
  269. if (!is_smmu_enabled) {
  270. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  271. info->is_evt_rn_db_pcie_addr)
  272. addr_high |= (1 << 8);
  273. } else {
  274. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  275. info_smmu->is_evt_rn_db_pcie_addr)
  276. addr_high |= (1 << 8);
  277. }
  278. gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
  279. addr_low,
  280. addr_high);
  281. /* write channel scratch */
  282. memset(&ch_scratch, 0, sizeof(ch_scratch));
  283. ch_scratch.wdi3.update_rp_moderation_threshold =
  284. UPDATE_RP_MODERATION_THRESHOLD;
  285. if (dir == IPA_WDI3_RX_DIR) {
  286. if (!is_smmu_enabled)
  287. ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
  288. else
  289. ch_scratch.wdi3.rx_pkt_offset = info_smmu->pkt_offset;
  290. /* this metadata reg offset need to be in words */
  291. ch_scratch.wdi3.endp_metadata_reg_offset =
  292. ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
  293. gsi_ep_info->ipa_ep_num) / 4;
  294. }
  295. if (!is_smmu_enabled) {
  296. IPADBG_LOW("smmu disabled\n");
  297. if (info->is_txr_rn_db_pcie_addr == true)
  298. IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
  299. else
  300. IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
  301. IPADBG_LOW("LSB 0x%x\n",
  302. (u32)info->transfer_ring_doorbell_pa);
  303. IPADBG_LOW("MSB 0x%x\n",
  304. (u32)((u64)info->transfer_ring_doorbell_pa >> 32));
  305. } else {
  306. IPADBG_LOW("smmu eabled\n");
  307. if (info_smmu->is_txr_rn_db_pcie_addr == true)
  308. IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
  309. else
  310. IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
  311. IPADBG_LOW("LSB 0x%x\n",
  312. (u32)info_smmu->transfer_ring_doorbell_pa);
  313. IPADBG_LOW("MSB 0x%x\n",
  314. (u32)((u64)info_smmu->transfer_ring_doorbell_pa >> 32));
  315. }
  316. if (!is_smmu_enabled) {
  317. ch_scratch.wdi3.wifi_rp_address_low =
  318. (u32)info->transfer_ring_doorbell_pa;
  319. ch_scratch.wdi3.wifi_rp_address_high =
  320. (u32)((u64)info->transfer_ring_doorbell_pa >> 32);
  321. } else {
  322. if (dir == IPA_WDI3_TX_DIR) {
  323. if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_DB_RES,
  324. true, info_smmu->transfer_ring_doorbell_pa,
  325. NULL, 4, true, &va)) {
  326. IPAERR("failed to get smmu mapping\n");
  327. result = -EFAULT;
  328. goto fail_write_scratch;
  329. }
  330. ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
  331. ch_scratch.wdi3.wifi_rp_address_high =
  332. (u32)((u64)va >> 32);
  333. } else {
  334. if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
  335. true, info_smmu->transfer_ring_doorbell_pa,
  336. NULL, 4, true, &va)) {
  337. IPAERR("failed to get smmu mapping\n");
  338. result = -EFAULT;
  339. goto fail_write_scratch;
  340. }
  341. ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
  342. ch_scratch.wdi3.wifi_rp_address_high =
  343. (u32)((u64)va >> 32);
  344. }
  345. }
  346. /*
  347. * Arch specific:
  348. * pcie addr which are not via smmu, use pa directly!
  349. * pcie and DDR via 2 different port
  350. * assert bit 40 to indicate it is pcie addr
  351. * WDI-3.0, MSM --> pcie via smmu
  352. * WDI-3.0, MDM --> pcie not via smmu + dual port
  353. * assert bit 40 in case
  354. */
  355. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  356. is_smmu_enabled) {
  357. /*
  358. * Ir-respective of smmu enabled don't use IOVA addr
  359. * since pcie not via smmu in MDM's
  360. */
  361. if (info_smmu->is_txr_rn_db_pcie_addr == true) {
  362. ch_scratch.wdi3.wifi_rp_address_low =
  363. (u32)info_smmu->transfer_ring_doorbell_pa;
  364. ch_scratch.wdi3.wifi_rp_address_high =
  365. (u32)((u64)info_smmu->transfer_ring_doorbell_pa
  366. >> 32);
  367. }
  368. }
  369. /*
  370. * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
  371. * from wdi-3.0 interface document
  372. */
  373. if (!is_smmu_enabled) {
  374. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  375. info->is_txr_rn_db_pcie_addr)
  376. ch_scratch.wdi3.wifi_rp_address_high =
  377. (u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
  378. (1 << 8));
  379. } else {
  380. if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
  381. info_smmu->is_txr_rn_db_pcie_addr)
  382. ch_scratch.wdi3.wifi_rp_address_high =
  383. (u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
  384. (1 << 8));
  385. }
  386. result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
  387. if (result != GSI_STATUS_SUCCESS) {
  388. IPAERR("failed to write evt ring scratch\n");
  389. goto fail_write_scratch;
  390. }
  391. return 0;
  392. fail_write_scratch:
  393. gsi_dealloc_channel(ep->gsi_chan_hdl);
  394. ep->gsi_chan_hdl = ~0;
  395. fail_get_gsi_ep_info:
  396. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  397. ep->gsi_evt_ring_hdl = ~0;
  398. fail_smmu_mapping:
  399. ipa3_release_wdi3_gsi_smmu_mappings(dir);
  400. return result;
  401. }
  402. int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in,
  403. struct ipa_wdi_conn_out_params *out,
  404. ipa_wdi_meter_notifier_cb wdi_notify)
  405. {
  406. enum ipa_client_type rx_client;
  407. enum ipa_client_type tx_client;
  408. struct ipa3_ep_context *ep_rx;
  409. struct ipa3_ep_context *ep_tx;
  410. int ipa_ep_idx_rx;
  411. int ipa_ep_idx_tx;
  412. int result = 0;
  413. u32 gsi_db_addr_low, gsi_db_addr_high;
  414. void __iomem *db_addr;
  415. u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
  416. /* wdi3 only support over gsi */
  417. if (!ipa3_ctx->ipa_wdi3_over_gsi) {
  418. IPAERR("wdi3 over uc offload not supported");
  419. WARN_ON(1);
  420. return -EFAULT;
  421. }
  422. if (in == NULL || out == NULL) {
  423. IPAERR("invalid input\n");
  424. return -EINVAL;
  425. }
  426. if (in->is_smmu_enabled == false) {
  427. rx_client = in->u_rx.rx.client;
  428. tx_client = in->u_tx.tx.client;
  429. } else {
  430. rx_client = in->u_rx.rx_smmu.client;
  431. tx_client = in->u_tx.tx_smmu.client;
  432. }
  433. ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client);
  434. ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client);
  435. if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) {
  436. IPAERR("fail to alloc EP.\n");
  437. return -EFAULT;
  438. }
  439. if (ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES ||
  440. ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES) {
  441. IPAERR("ep out of range.\n");
  442. return -EFAULT;
  443. }
  444. ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
  445. ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
  446. if (ep_rx->valid || ep_tx->valid) {
  447. IPAERR("EP already allocated.\n");
  448. return -EFAULT;
  449. }
  450. memset(ep_rx, 0, offsetof(struct ipa3_ep_context, sys));
  451. memset(ep_tx, 0, offsetof(struct ipa3_ep_context, sys));
  452. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  453. #ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
  454. if (wdi_notify)
  455. ipa3_ctx->uc_wdi_ctx.stats_notify = wdi_notify;
  456. else
  457. IPADBG("wdi_notify is null\n");
  458. #endif
  459. /* setup rx ep cfg */
  460. ep_rx->valid = 1;
  461. ep_rx->client = rx_client;
  462. result = ipa3_disable_data_path(ipa_ep_idx_rx);
  463. if (result) {
  464. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  465. ipa_ep_idx_rx);
  466. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  467. return -EFAULT;
  468. }
  469. ep_rx->client_notify = in->notify;
  470. ep_rx->priv = in->priv;
  471. if (in->is_smmu_enabled == false)
  472. memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg,
  473. sizeof(ep_rx->cfg));
  474. else
  475. memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg,
  476. sizeof(ep_rx->cfg));
  477. if (ipa3_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) {
  478. IPAERR("fail to setup rx pipe cfg\n");
  479. result = -EFAULT;
  480. goto fail;
  481. }
  482. IPADBG("ipa3_ctx->ipa_wdi3_over_gsi %d\n",
  483. ipa3_ctx->ipa_wdi3_over_gsi);
  484. /* setup RX gsi channel */
  485. if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
  486. &in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR,
  487. ep_rx)) {
  488. IPAERR("fail to setup wdi3 gsi rx channel\n");
  489. result = -EFAULT;
  490. goto fail;
  491. }
  492. if (gsi_query_channel_db_addr(ep_rx->gsi_chan_hdl,
  493. &gsi_db_addr_low, &gsi_db_addr_high)) {
  494. IPAERR("failed to query gsi rx db addr\n");
  495. result = -EFAULT;
  496. goto fail;
  497. }
  498. /* only 32 bit lsb is used */
  499. out->rx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
  500. IPADBG("out->rx_uc_db_pa %llu\n", out->rx_uc_db_pa);
  501. ipa3_install_dflt_flt_rules(ipa_ep_idx_rx);
  502. IPADBG("client %d (ep: %d) connected\n", rx_client,
  503. ipa_ep_idx_rx);
  504. /* setup tx ep cfg */
  505. ep_tx->valid = 1;
  506. ep_tx->client = tx_client;
  507. result = ipa3_disable_data_path(ipa_ep_idx_tx);
  508. if (result) {
  509. IPAERR("disable data path failed res=%d ep=%d.\n", result,
  510. ipa_ep_idx_tx);
  511. result = -EFAULT;
  512. goto fail;
  513. }
  514. if (in->is_smmu_enabled == false)
  515. memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg,
  516. sizeof(ep_tx->cfg));
  517. else
  518. memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg,
  519. sizeof(ep_tx->cfg));
  520. ep_tx->cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
  521. ep_tx->cfg.aggr.aggr = IPA_GENERIC;
  522. ep_tx->cfg.aggr.aggr_byte_limit = IPA_WLAN_AGGR_BYTE_LIMIT;
  523. ep_tx->cfg.aggr.aggr_pkt_limit = IPA_WLAN_AGGR_PKT_LIMIT;
  524. ep_tx->cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
  525. if (ipa3_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) {
  526. IPAERR("fail to setup tx pipe cfg\n");
  527. result = -EFAULT;
  528. goto fail;
  529. }
  530. /* setup TX gsi channel */
  531. if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
  532. &in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR,
  533. ep_tx)) {
  534. IPAERR("fail to setup wdi3 gsi tx channel\n");
  535. result = -EFAULT;
  536. goto fail;
  537. }
  538. if (gsi_query_channel_db_addr(ep_tx->gsi_chan_hdl,
  539. &gsi_db_addr_low, &gsi_db_addr_high)) {
  540. IPAERR("failed to query gsi tx db addr\n");
  541. result = -EFAULT;
  542. goto fail;
  543. }
  544. /* only 32 bit lsb is used */
  545. out->tx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
  546. IPADBG("out->tx_uc_db_pa %llu\n", out->tx_uc_db_pa);
  547. IPADBG("client %d (ep: %d) connected\n", tx_client,
  548. ipa_ep_idx_tx);
  549. /* ring initial event ring dbs */
  550. gsi_query_evt_ring_db_addr(ep_rx->gsi_evt_ring_hdl,
  551. &evt_ring_db_addr_low, &evt_ring_db_addr_high);
  552. IPADBG("evt_ring_hdl %lu, db_addr_low %u db_addr_high %u\n",
  553. ep_rx->gsi_evt_ring_hdl, evt_ring_db_addr_low,
  554. evt_ring_db_addr_high);
  555. /* only 32 bit lsb is used */
  556. db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
  557. /*
  558. * IPA/GSI driver should ring the event DB once after
  559. * initialization of the event, with a value that is
  560. * outside of the ring range. Eg: ring base = 0x1000,
  561. * ring size = 0x100 => AP can write value > 0x1100
  562. * into the doorbell address. Eg: 0x 1110
  563. */
  564. iowrite32(in->u_rx.rx.event_ring_size / 4 + 10, db_addr);
  565. gsi_query_evt_ring_db_addr(ep_tx->gsi_evt_ring_hdl,
  566. &evt_ring_db_addr_low, &evt_ring_db_addr_high);
  567. /* only 32 bit lsb is used */
  568. db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
  569. /*
  570. * IPA/GSI driver should ring the event DB once after
  571. * initialization of the event, with a value that is
  572. * outside of the ring range. Eg: ring base = 0x1000,
  573. * ring size = 0x100 => AP can write value > 0x1100
  574. * into the doorbell address. Eg: 0x 1110
  575. */
  576. iowrite32(in->u_tx.tx.event_ring_size / 4 + 10, db_addr);
  577. fail:
  578. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  579. return result;
  580. }
  581. int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
  582. {
  583. struct ipa3_ep_context *ep_tx, *ep_rx;
  584. int result = 0;
  585. /* wdi3 only support over gsi */
  586. if (!ipa3_ctx->ipa_wdi3_over_gsi) {
  587. IPAERR("wdi3 over uc offload not supported");
  588. WARN_ON(1);
  589. return -EFAULT;
  590. }
  591. IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
  592. IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
  593. if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES ||
  594. ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES) {
  595. IPAERR("invalid ipa ep index\n");
  596. return -EINVAL;
  597. }
  598. ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
  599. ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
  600. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
  601. /* tear down tx pipe */
  602. result = ipa3_reset_gsi_channel(ipa_ep_idx_tx);
  603. if (result != GSI_STATUS_SUCCESS) {
  604. IPAERR("failed to reset gsi channel: %d.\n", result);
  605. goto exit;
  606. }
  607. result = gsi_reset_evt_ring(ep_tx->gsi_evt_ring_hdl);
  608. if (result != GSI_STATUS_SUCCESS) {
  609. IPAERR("failed to reset evt ring: %d.\n", result);
  610. goto exit;
  611. }
  612. result = ipa3_release_gsi_channel(ipa_ep_idx_tx);
  613. if (result) {
  614. IPAERR("failed to release gsi channel: %d\n", result);
  615. goto exit;
  616. }
  617. memset(ep_tx, 0, sizeof(struct ipa3_ep_context));
  618. IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx);
  619. /* tear down rx pipe */
  620. result = ipa3_reset_gsi_channel(ipa_ep_idx_rx);
  621. if (result != GSI_STATUS_SUCCESS) {
  622. IPAERR("failed to reset gsi channel: %d.\n", result);
  623. goto exit;
  624. }
  625. result = gsi_reset_evt_ring(ep_rx->gsi_evt_ring_hdl);
  626. if (result != GSI_STATUS_SUCCESS) {
  627. IPAERR("failed to reset evt ring: %d.\n", result);
  628. goto exit;
  629. }
  630. result = ipa3_release_gsi_channel(ipa_ep_idx_rx);
  631. if (result) {
  632. IPAERR("failed to release gsi channel: %d\n", result);
  633. goto exit;
  634. }
  635. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
  636. ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_WDI3);
  637. ipa3_delete_dflt_flt_rules(ipa_ep_idx_rx);
  638. memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
  639. IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx);
  640. exit:
  641. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_by_pipe(ipa_ep_idx_tx));
  642. return result;
  643. }
  644. int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
  645. {
  646. struct ipa3_ep_context *ep_tx, *ep_rx;
  647. int result = 0;
  648. /* wdi3 only support over gsi */
  649. if (!ipa3_ctx->ipa_wdi3_over_gsi) {
  650. IPAERR("wdi3 over uc offload not supported");
  651. WARN_ON(1);
  652. return -EFAULT;
  653. }
  654. IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
  655. IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
  656. ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
  657. ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
  658. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
  659. /* start gsi tx channel */
  660. result = gsi_start_channel(ep_tx->gsi_chan_hdl);
  661. if (result) {
  662. IPAERR("failed to start gsi tx channel\n");
  663. result = -EFAULT;
  664. goto exit;
  665. }
  666. /* start gsi rx channel */
  667. result = gsi_start_channel(ep_rx->gsi_chan_hdl);
  668. if (result) {
  669. IPAERR("failed to start gsi rx channel\n");
  670. result = -EFAULT;
  671. goto exit;
  672. }
  673. /* start uC gsi dbg stats monitor */
  674. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
  675. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
  676. = ep_rx->gsi_chan_hdl;
  677. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
  678. = DIR_PRODUCER;
  679. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
  680. = ep_tx->gsi_chan_hdl;
  681. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
  682. = DIR_CONSUMER;
  683. ipa3_uc_debug_stats_alloc(
  684. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
  685. }
  686. /* enable data path */
  687. result = ipa3_enable_data_path(ipa_ep_idx_rx);
  688. if (result) {
  689. IPAERR("enable data path failed res=%d clnt=%d.\n", result,
  690. ipa_ep_idx_rx);
  691. result = -EFAULT;
  692. goto exit;
  693. }
  694. result = ipa3_enable_data_path(ipa_ep_idx_tx);
  695. if (result) {
  696. IPAERR("enable data path failed res=%d clnt=%d.\n", result,
  697. ipa_ep_idx_tx);
  698. result = -EFAULT;
  699. goto exit;
  700. }
  701. exit:
  702. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
  703. return result;
  704. }
  705. int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
  706. {
  707. int result = 0;
  708. struct ipa3_ep_context *ep;
  709. u32 source_pipe_bitmask = 0;
  710. bool disable_force_clear = false;
  711. struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
  712. /* wdi3 only support over gsi */
  713. if (!ipa3_ctx->ipa_wdi3_over_gsi) {
  714. IPAERR("wdi3 over uc offload not supported");
  715. WARN_ON(1);
  716. return -EFAULT;
  717. }
  718. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  719. /* disable tx data path */
  720. result = ipa3_disable_data_path(ipa_ep_idx_tx);
  721. if (result) {
  722. IPAERR("enable data path failed res=%d clnt=%d.\n", result,
  723. ipa_ep_idx_tx);
  724. result = -EFAULT;
  725. goto fail;
  726. }
  727. /* disable rx data path */
  728. result = ipa3_disable_data_path(ipa_ep_idx_rx);
  729. if (result) {
  730. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  731. ipa_ep_idx_rx);
  732. result = -EFAULT;
  733. goto fail;
  734. }
  735. /*
  736. * For WDI 3.0 need to ensure pipe will be empty before suspend
  737. * as IPA uC will fail to suspend the pipe otherwise.
  738. */
  739. ep = &ipa3_ctx->ep[ipa_ep_idx_rx];
  740. source_pipe_bitmask = 1 <<
  741. ipa3_get_ep_mapping(ep->client);
  742. result = ipa3_enable_force_clear(ipa_ep_idx_rx,
  743. false, source_pipe_bitmask);
  744. if (result) {
  745. /*
  746. * assuming here modem SSR, AP can remove
  747. * the delay in this case
  748. */
  749. IPAERR("failed to force clear %d\n", result);
  750. IPAERR("remove delay from SCND reg\n");
  751. ep_ctrl_scnd.endp_delay = false;
  752. ipahal_write_reg_n_fields(
  753. IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx_rx,
  754. &ep_ctrl_scnd);
  755. } else {
  756. disable_force_clear = true;
  757. }
  758. /* stop gsi rx channel */
  759. result = ipa3_stop_gsi_channel(ipa_ep_idx_rx);
  760. if (result) {
  761. IPAERR("failed to stop gsi rx channel\n");
  762. result = -EFAULT;
  763. goto fail;
  764. }
  765. /* stop gsi tx channel */
  766. result = ipa3_stop_gsi_channel(ipa_ep_idx_tx);
  767. if (result) {
  768. IPAERR("failed to stop gsi tx channel\n");
  769. result = -EFAULT;
  770. goto fail;
  771. }
  772. /* stop uC gsi dbg stats monitor */
  773. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
  774. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
  775. = 0xff;
  776. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
  777. = DIR_PRODUCER;
  778. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
  779. = 0xff;
  780. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
  781. = DIR_CONSUMER;
  782. ipa3_uc_debug_stats_alloc(
  783. ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
  784. }
  785. if (disable_force_clear)
  786. ipa3_disable_force_clear(ipa_ep_idx_rx);
  787. fail:
  788. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  789. return result;
  790. }
  791. int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id)
  792. {
  793. int result = 0;
  794. struct ipa3_ep_context *ep;
  795. union __packed gsi_channel_scratch ch_scratch;
  796. memset(&ch_scratch, 0, sizeof(ch_scratch));
  797. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  798. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  799. IPAERR_RL("bad parm, %d\n", clnt_hdl);
  800. return -EINVAL;
  801. }
  802. ep = &ipa3_ctx->ep[clnt_hdl];
  803. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  804. result = gsi_read_channel_scratch(ep->gsi_chan_hdl, &ch_scratch);
  805. if (result != GSI_STATUS_SUCCESS) {
  806. IPAERR("failed to read channel scratch %d\n", result);
  807. goto exit;
  808. }
  809. result = gsi_stop_channel(ep->gsi_chan_hdl);
  810. if (result != GSI_STATUS_SUCCESS && result != -GSI_STATUS_AGAIN &&
  811. result != -GSI_STATUS_TIMED_OUT) {
  812. IPAERR("failed to stop gsi channel %d\n", result);
  813. goto exit;
  814. }
  815. ch_scratch.wdi3.qmap_id = qmap_id;
  816. result = gsi_write_channel_scratch(ep->gsi_chan_hdl,
  817. ch_scratch);
  818. if (result != GSI_STATUS_SUCCESS) {
  819. IPAERR("failed to write channel scratch %d\n", result);
  820. goto exit;
  821. }
  822. result = gsi_start_channel(ep->gsi_chan_hdl);
  823. if (result != GSI_STATUS_SUCCESS) {
  824. IPAERR("failed to start gsi channel %d\n", result);
  825. goto exit;
  826. }
  827. exit:
  828. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  829. return result;
  830. }
  831. /**
  832. * ipa3_get_wdi3_gsi_stats() - Query WDI3 gsi stats from uc
  833. * @stats: [inout] stats blob from client populated by driver
  834. *
  835. * Returns: 0 on success, negative on failure
  836. *
  837. * @note Cannot be called from atomic context
  838. *
  839. */
  840. int ipa3_get_wdi3_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
  841. {
  842. int i;
  843. if (!ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio) {
  844. IPAERR("bad NULL parms for wdi3_gsi_stats\n");
  845. return -EINVAL;
  846. }
  847. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  848. for (i = 0; i < MAX_WDI3_CHANNELS; i++) {
  849. stats->ring[i].ringFull = ioread32(
  850. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  851. + i * IPA3_UC_DEBUG_STATS_OFF +
  852. IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
  853. stats->ring[i].ringEmpty = ioread32(
  854. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  855. + i * IPA3_UC_DEBUG_STATS_OFF +
  856. IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
  857. stats->ring[i].ringUsageHigh = ioread32(
  858. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  859. + i * IPA3_UC_DEBUG_STATS_OFF +
  860. IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
  861. stats->ring[i].ringUsageLow = ioread32(
  862. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  863. + i * IPA3_UC_DEBUG_STATS_OFF +
  864. IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
  865. stats->ring[i].RingUtilCount = ioread32(
  866. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
  867. + i * IPA3_UC_DEBUG_STATS_OFF +
  868. IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
  869. }
  870. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  871. return 0;
  872. }