ipa_wigig_i.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include <linux/if_ether.h>
  7. #include <linux/log2.h>
  8. #include <linux/ipa_wigig.h>
  9. #define IPA_WIGIG_DESC_RING_EL_SIZE 32
  10. #define IPA_WIGIG_STATUS_RING_EL_SIZE 16
  11. #define GSI_STOP_MAX_RETRY_CNT 10
  12. #define IPA_WIGIG_CONNECTED BIT(0)
  13. #define IPA_WIGIG_ENABLED BIT(1)
  14. #define IPA_WIGIG_MSB_MASK 0xFFFFFFFF00000000
  15. #define IPA_WIGIG_LSB_MASK 0x00000000FFFFFFFF
  16. #define IPA_WIGIG_MSB(num) ((u32)((num & IPA_WIGIG_MSB_MASK) >> 32))
  17. #define IPA_WIGIG_LSB(num) ((u32)(num & IPA_WIGIG_LSB_MASK))
  18. /* extract PCIE addresses [0:39] relevant msb */
  19. #define IPA_WIGIG_8_MSB_MASK 0xFF00000000
  20. #define IPA_WIGIG_8_MSB(num) ((u32)((num & IPA_WIGIG_8_MSB_MASK) >> 32))
  21. #define W11AD_RX 0
  22. #define W11AD_TX 1
  23. #define W11AD_TO_GSI_DB_m 1
  24. #define W11AD_TO_GSI_DB_n 1
  25. static LIST_HEAD(smmu_reg_addr_list);
  26. static LIST_HEAD(smmu_ring_addr_list);
  27. static DEFINE_MUTEX(smmu_lock);
  28. struct ipa_wigig_smmu_reg_addr {
  29. struct list_head link;
  30. phys_addr_t phys_addr;
  31. enum ipa_smmu_cb_type cb_type;
  32. u8 count;
  33. };
  34. struct ipa_wigig_smmu_ring_addr {
  35. struct list_head link;
  36. u64 iova;
  37. enum ipa_smmu_cb_type cb_type;
  38. u8 count;
  39. };
  40. static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
  41. unsigned long val, void *data)
  42. {
  43. IPADBG("val %ld\n", val);
  44. if (!ipa3_ctx) {
  45. IPAERR("IPA ctx is null\n");
  46. return -EINVAL;
  47. }
  48. WARN_ON(data != ipa3_ctx);
  49. if (ipa3_ctx->uc_wigig_ctx.uc_ready_cb) {
  50. ipa3_ctx->uc_wigig_ctx.uc_ready_cb(
  51. ipa3_ctx->uc_wigig_ctx.priv);
  52. ipa3_ctx->uc_wigig_ctx.uc_ready_cb =
  53. NULL;
  54. ipa3_ctx->uc_wigig_ctx.priv = NULL;
  55. }
  56. IPADBG("exit\n");
  57. return 0;
  58. }
  59. static struct notifier_block uc_loaded_notifier = {
  60. .notifier_call = ipa3_wigig_uc_loaded_handler,
  61. };
  62. int ipa3_wigig_init_i(void)
  63. {
  64. IPADBG("\n");
  65. ipa3_uc_register_ready_cb(&uc_loaded_notifier);
  66. IPADBG("exit\n");
  67. return 0;
  68. }
  69. int ipa3_wigig_uc_init(
  70. struct ipa_wdi_uc_ready_params *inout,
  71. ipa_wigig_misc_int_cb int_notify,
  72. phys_addr_t *uc_db_pa)
  73. {
  74. int result = 0;
  75. IPADBG("\n");
  76. if (inout == NULL) {
  77. IPAERR("inout is NULL");
  78. return -EINVAL;
  79. }
  80. if (int_notify == NULL) {
  81. IPAERR("int_notify is NULL");
  82. return -EINVAL;
  83. }
  84. result = ipa3_uc_state_check();
  85. if (result) {
  86. inout->is_uC_ready = false;
  87. ipa3_ctx->uc_wigig_ctx.uc_ready_cb = inout->notify;
  88. } else {
  89. inout->is_uC_ready = true;
  90. }
  91. ipa3_ctx->uc_wigig_ctx.priv = inout->priv;
  92. ipa3_ctx->uc_wigig_ctx.misc_notify_cb = int_notify;
  93. *uc_db_pa = ipa3_ctx->ipa_wrapper_base +
  94. ipahal_get_reg_base() +
  95. ipahal_get_reg_mn_ofst(
  96. IPA_UC_MAILBOX_m_n,
  97. W11AD_TO_GSI_DB_m,
  98. W11AD_TO_GSI_DB_n);
  99. IPADBG("exit\n");
  100. return 0;
  101. }
  102. static int ipa3_wigig_tx_bit_to_ep(
  103. const u8 tx_bit_num,
  104. enum ipa_client_type *type)
  105. {
  106. IPADBG("tx_bit_num %d\n", tx_bit_num);
  107. switch (tx_bit_num) {
  108. case 2:
  109. *type = IPA_CLIENT_WIGIG1_CONS;
  110. break;
  111. case 3:
  112. *type = IPA_CLIENT_WIGIG2_CONS;
  113. break;
  114. case 4:
  115. *type = IPA_CLIENT_WIGIG3_CONS;
  116. break;
  117. case 5:
  118. *type = IPA_CLIENT_WIGIG4_CONS;
  119. break;
  120. default:
  121. IPAERR("invalid tx_bit_num %d\n", tx_bit_num);
  122. return -EINVAL;
  123. }
  124. IPADBG("exit\n");
  125. return 0;
  126. }
  127. static int ipa3_wigig_smmu_map_buffers(bool Rx,
  128. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  129. void *buff,
  130. bool map)
  131. {
  132. int result;
  133. /* data buffers */
  134. if (Rx) {
  135. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu =
  136. (struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
  137. int num_elem =
  138. pipe_smmu->desc_ring_size /
  139. IPA_WIGIG_DESC_RING_EL_SIZE;
  140. result = ipa3_smmu_map_peer_buff(
  141. dbuff_smmu->data_buffer_base_iova,
  142. dbuff_smmu->data_buffer_size * num_elem,
  143. map,
  144. &dbuff_smmu->data_buffer_base,
  145. IPA_SMMU_CB_11AD);
  146. if (result) {
  147. IPAERR(
  148. "failed to %s rx data_buffer %d, num elem %d\n"
  149. , map ? "map" : "unmap",
  150. result, num_elem);
  151. goto fail_map_buff;
  152. }
  153. } else {
  154. int i;
  155. struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu =
  156. (struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
  157. for (i = 0; i < dbuff_smmu->num_buffers; i++) {
  158. result = ipa3_smmu_map_peer_buff(
  159. *(dbuff_smmu->data_buffer_base_iova + i),
  160. dbuff_smmu->data_buffer_size,
  161. map,
  162. (dbuff_smmu->data_buffer_base + i),
  163. IPA_SMMU_CB_11AD);
  164. if (result) {
  165. IPAERR(
  166. "%d: failed to %s tx data buffer %d\n"
  167. , i, map ? "map" : "unmap",
  168. result);
  169. for (i--; i >= 0; i--) {
  170. result = ipa3_smmu_map_peer_buff(
  171. *(dbuff_smmu->data_buffer_base_iova +
  172. i),
  173. dbuff_smmu->data_buffer_size,
  174. !map,
  175. (dbuff_smmu->data_buffer_base +
  176. i),
  177. IPA_SMMU_CB_11AD);
  178. }
  179. goto fail_map_buff;
  180. }
  181. }
  182. }
  183. IPADBG("exit\n");
  184. return 0;
  185. fail_map_buff:
  186. return result;
  187. }
  188. static int ipa3_wigig_smmu_map_reg(phys_addr_t phys_addr, bool map,
  189. enum ipa_smmu_cb_type cb_type)
  190. {
  191. struct ipa_wigig_smmu_reg_addr *entry;
  192. struct ipa_wigig_smmu_reg_addr *next;
  193. int result = 0;
  194. IPADBG("addr %pa, %s\n", &phys_addr, map ? "map" : "unmap");
  195. mutex_lock(&smmu_lock);
  196. list_for_each_entry_safe(entry, next, &smmu_reg_addr_list, link) {
  197. if ((entry->phys_addr == phys_addr) &&
  198. (entry->cb_type == cb_type)) {
  199. IPADBG("cb %d, page %pa already mapped, ", cb_type,
  200. &phys_addr);
  201. if (map) {
  202. entry->count++;
  203. IPADBG("inc to %d\n", (entry->count));
  204. } else {
  205. --entry->count;
  206. IPADBG("dec to %d\n", entry->count);
  207. if (!(entry->count)) {
  208. IPADBG("unmap and delete\n");
  209. result = ipa3_smmu_map_peer_reg(
  210. phys_addr, map, cb_type);
  211. if (result) {
  212. IPAERR("failed to unmap %pa\n",
  213. &phys_addr);
  214. goto finish;
  215. }
  216. list_del(&entry->link);
  217. kfree(entry);
  218. }
  219. }
  220. goto finish;
  221. }
  222. }
  223. IPADBG("new page found %pa, map and add to list CB %d\n", &phys_addr,
  224. cb_type);
  225. result = ipa3_smmu_map_peer_reg(phys_addr, map, cb_type);
  226. if (result) {
  227. IPAERR("failed to map %pa\n", &phys_addr);
  228. goto finish;
  229. }
  230. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  231. if (entry == NULL) {
  232. IPAERR("couldn't allocate for %pa\n", &phys_addr);
  233. ipa3_smmu_map_peer_reg(phys_addr, !map, cb_type);
  234. result = -ENOMEM;
  235. goto finish;
  236. }
  237. INIT_LIST_HEAD(&entry->link);
  238. entry->phys_addr = phys_addr;
  239. entry->cb_type = cb_type;
  240. entry->count = 1;
  241. list_add(&entry->link, &smmu_reg_addr_list);
  242. finish:
  243. mutex_unlock(&smmu_lock);
  244. IPADBG("exit\n");
  245. return result;
  246. }
  247. static int ipa3_wigig_smmu_map_ring(u64 iova, u32 size, bool map,
  248. struct sg_table *sgt, enum ipa_smmu_cb_type cb_type)
  249. {
  250. struct ipa_wigig_smmu_ring_addr *entry;
  251. struct ipa_wigig_smmu_ring_addr *next;
  252. int result = 0;
  253. IPADBG("iova %llX, %s\n", iova, map ? "map" : "unmap");
  254. mutex_lock(&smmu_lock);
  255. list_for_each_entry_safe(entry, next, &smmu_ring_addr_list, link) {
  256. if ((entry->iova == iova) &&
  257. (entry->cb_type == cb_type)) {
  258. IPADBG("cb %d, page 0x%llX already mapped, ", cb_type,
  259. iova);
  260. if (map) {
  261. entry->count++;
  262. IPADBG("inc to %d\n", (entry->count));
  263. } else {
  264. --entry->count;
  265. IPADBG("dec to %d\n", entry->count);
  266. if (!(entry->count)) {
  267. IPADBG("unmap and delete\n");
  268. result = ipa3_smmu_map_peer_buff(
  269. iova, size, map, sgt, cb_type);
  270. if (result) {
  271. IPAERR(
  272. "failed to unmap 0x%llX\n",
  273. iova);
  274. goto finish;
  275. }
  276. list_del(&entry->link);
  277. kfree(entry);
  278. }
  279. }
  280. goto finish;
  281. }
  282. }
  283. IPADBG("new page found 0x%llX, map and add to list\n", iova);
  284. result = ipa3_smmu_map_peer_buff(iova, size, map, sgt, cb_type);
  285. if (result) {
  286. IPAERR("failed to map 0x%llX\n", iova);
  287. goto finish;
  288. }
  289. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  290. if (entry == NULL) {
  291. IPAERR("couldn't allocate for 0x%llX\n", iova);
  292. ipa3_smmu_map_peer_buff(iova, size, !map, sgt, cb_type);
  293. result = -ENOMEM;
  294. goto finish;
  295. }
  296. INIT_LIST_HEAD(&entry->link);
  297. entry->iova = iova;
  298. entry->cb_type = cb_type;
  299. entry->count = 1;
  300. list_add(&entry->link, &smmu_ring_addr_list);
  301. finish:
  302. mutex_unlock(&smmu_lock);
  303. IPADBG("exit\n");
  304. return result;
  305. }
  306. static int ipa3_wigig_smmu_map_channel(bool Rx,
  307. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  308. void *buff,
  309. bool map)
  310. {
  311. int result = 0;
  312. struct ipa_smmu_cb_ctx *smmu_ctx = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
  313. IPADBG("\n");
  314. /*
  315. * --------------------------------------------------------------------
  316. * entity |HWHEAD|HWTAIL|HWHEAD|HWTAIL| misc | buffers| rings|
  317. * |Sring |Sring |Dring |Dring | regs | | |
  318. * --------------------------------------------------------------------
  319. * GSI (apps CB) | TX |RX, TX| |RX, TX| | |Rx, TX|
  320. * --------------------------------------------------------------------
  321. * IPA (11AD CB) | | | | | | RX, TX | |
  322. * --------------------------------------------------------------------
  323. * uc (uC CB) | RX | | TX | |always| | |
  324. * --------------------------------------------------------------------
  325. *
  326. * buffers are mapped to 11AD CB. in case this context bank is shared,
  327. * mapping is done by 11ad driver only and applies to both 11ad and
  328. * IPA HWs (page tables are shared). Otherwise, mapping is done here.
  329. */
  330. if (!smmu_ctx) {
  331. IPAERR("11AD SMMU ctx is null\n");
  332. return -EINVAL;
  333. }
  334. if (Rx) {
  335. IPADBG("RX %s status_ring_HWHEAD_pa %pa uC CB\n",
  336. map ? "map" : "unmap",
  337. &pipe_smmu->status_ring_HWHEAD_pa);
  338. result = ipa3_wigig_smmu_map_reg(
  339. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  340. map,
  341. IPA_SMMU_CB_UC);
  342. if (result) {
  343. IPAERR(
  344. "failed to %s status_ring_HWAHEAD %d\n",
  345. map ? "map" : "unmap",
  346. result);
  347. goto fail;
  348. }
  349. } else {
  350. IPADBG("TX %s status_ring_HWHEAD_pa %pa AP CB\n",
  351. map ? "map" : "unmap",
  352. &pipe_smmu->status_ring_HWHEAD_pa);
  353. result = ipa3_wigig_smmu_map_reg(
  354. rounddown(pipe_smmu->status_ring_HWHEAD_pa,
  355. PAGE_SIZE),
  356. map,
  357. IPA_SMMU_CB_AP);
  358. if (result) {
  359. IPAERR(
  360. "failed to %s status_ring_HWAHEAD %d\n",
  361. map ? "map" : "unmap",
  362. result);
  363. goto fail;
  364. }
  365. IPADBG("TX %s desc_ring_HWHEAD_pa %pa uC CB\n",
  366. map ? "map" : "unmap",
  367. &pipe_smmu->desc_ring_HWHEAD_pa);
  368. result = ipa3_wigig_smmu_map_reg(
  369. rounddown(pipe_smmu->desc_ring_HWHEAD_pa,
  370. PAGE_SIZE),
  371. map,
  372. IPA_SMMU_CB_UC);
  373. if (result) {
  374. IPAERR("failed to %s desc_ring_HWHEAD %d\n",
  375. map ? "map" : "unmap",
  376. result);
  377. goto fail_desc_HWHEAD;
  378. }
  379. }
  380. IPADBG("%s status_ring_HWTAIL_pa %pa AP CB\n",
  381. map ? "map" : "unmap",
  382. &pipe_smmu->status_ring_HWTAIL_pa);
  383. result = ipa3_wigig_smmu_map_reg(
  384. rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
  385. map,
  386. IPA_SMMU_CB_AP);
  387. if (result) {
  388. IPAERR(
  389. "failed to %s status_ring_HWTAIL %d\n",
  390. map ? "map" : "unmap",
  391. result);
  392. goto fail_status_HWTAIL;
  393. }
  394. IPADBG("%s desc_ring_HWTAIL_pa %pa AP CB\n",
  395. map ? "map" : "unmap",
  396. &pipe_smmu->desc_ring_HWTAIL_pa);
  397. result = ipa3_wigig_smmu_map_reg(
  398. rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
  399. map,
  400. IPA_SMMU_CB_AP);
  401. if (result) {
  402. IPAERR("failed to %s desc_ring_HWTAIL %d\n",
  403. map ? "map" : "unmap",
  404. result);
  405. goto fail_desc_HWTAIL;
  406. }
  407. /* rings */
  408. IPADBG("%s desc_ring_base_iova %llX AP CB\n",
  409. map ? "map" : "unmap",
  410. pipe_smmu->desc_ring_base_iova);
  411. result = ipa3_wigig_smmu_map_ring(
  412. pipe_smmu->desc_ring_base_iova,
  413. pipe_smmu->desc_ring_size,
  414. map,
  415. &pipe_smmu->desc_ring_base,
  416. IPA_SMMU_CB_AP);
  417. if (result) {
  418. IPAERR("failed to %s desc_ring_base %d\n",
  419. map ? "map" : "unmap",
  420. result);
  421. goto fail_desc_ring;
  422. }
  423. IPADBG("%s status_ring_base_iova %llX AP CB\n",
  424. map ? "map" : "unmap",
  425. pipe_smmu->status_ring_base_iova);
  426. result = ipa3_wigig_smmu_map_ring(
  427. pipe_smmu->status_ring_base_iova,
  428. pipe_smmu->status_ring_size,
  429. map,
  430. &pipe_smmu->status_ring_base,
  431. IPA_SMMU_CB_AP);
  432. if (result) {
  433. IPAERR("failed to %s status_ring_base %d\n",
  434. map ? "map" : "unmap",
  435. result);
  436. goto fail_status_ring;
  437. }
  438. if (!smmu_ctx->shared) {
  439. IPADBG("CB not shared - map buffers\n");
  440. result = ipa3_wigig_smmu_map_buffers(Rx, pipe_smmu, buff, map);
  441. if (result) {
  442. IPAERR("failed to %s buffers %d\n",
  443. map ? "map" : "unmap",
  444. result);
  445. goto fail_buffers;
  446. }
  447. }
  448. IPADBG("exit\n");
  449. return 0;
  450. fail_buffers:
  451. ipa3_wigig_smmu_map_ring(
  452. pipe_smmu->status_ring_base_iova, pipe_smmu->status_ring_size,
  453. !map, &pipe_smmu->status_ring_base, IPA_SMMU_CB_AP);
  454. fail_status_ring:
  455. ipa3_wigig_smmu_map_ring(
  456. pipe_smmu->desc_ring_base_iova, pipe_smmu->desc_ring_size,
  457. !map, &pipe_smmu->desc_ring_base, IPA_SMMU_CB_AP);
  458. fail_desc_ring:
  459. ipa3_wigig_smmu_map_reg(
  460. rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
  461. !map, IPA_SMMU_CB_AP);
  462. fail_desc_HWTAIL:
  463. ipa3_wigig_smmu_map_reg(
  464. rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
  465. !map, IPA_SMMU_CB_AP);
  466. fail_status_HWTAIL:
  467. if (Rx)
  468. ipa3_wigig_smmu_map_reg(
  469. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  470. !map, IPA_SMMU_CB_UC);
  471. else
  472. ipa3_wigig_smmu_map_reg(
  473. rounddown(pipe_smmu->desc_ring_HWHEAD_pa, PAGE_SIZE),
  474. !map, IPA_SMMU_CB_UC);
  475. fail_desc_HWHEAD:
  476. if (!Rx)
  477. ipa3_wigig_smmu_map_reg(
  478. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  479. !map, IPA_SMMU_CB_AP);
  480. fail:
  481. return result;
  482. }
  483. static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
  484. {
  485. switch (notify->evt_id) {
  486. case GSI_CHAN_INVALID_TRE_ERR:
  487. IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
  488. break;
  489. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  490. IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  491. break;
  492. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  493. IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  494. break;
  495. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  496. IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  497. break;
  498. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  499. IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  500. break;
  501. case GSI_CHAN_HWO_1_ERR:
  502. IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
  503. break;
  504. default:
  505. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  506. }
  507. ipa_assert();
  508. }
  509. static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
  510. {
  511. switch (notify->evt_id) {
  512. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  513. IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  514. break;
  515. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  516. IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  517. break;
  518. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  519. IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  520. break;
  521. case GSI_EVT_EVT_RING_EMPTY_ERR:
  522. IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
  523. break;
  524. default:
  525. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  526. }
  527. ipa_assert();
  528. }
  529. static int ipa3_wigig_config_gsi(bool Rx,
  530. bool smmu_en,
  531. void *pipe_info,
  532. void *buff,
  533. const struct ipa_gsi_ep_config *ep_gsi,
  534. struct ipa3_ep_context *ep)
  535. {
  536. struct gsi_evt_ring_props evt_props;
  537. struct gsi_chan_props channel_props;
  538. union __packed gsi_channel_scratch gsi_scratch;
  539. int gsi_res;
  540. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
  541. struct ipa_wigig_pipe_setup_info *pipe;
  542. struct ipa_wigig_rx_pipe_data_buffer_info *rx_dbuff;
  543. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *rx_dbuff_smmu;
  544. struct ipa_wigig_tx_pipe_data_buffer_info *tx_dbuff;
  545. struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_dbuff_smmu;
  546. IPADBG("%s, %s\n", Rx ? "Rx" : "Tx", smmu_en ? "smmu en" : "smmu dis");
  547. /* alloc event ring */
  548. memset(&evt_props, 0, sizeof(evt_props));
  549. evt_props.intf = GSI_EVT_CHTYPE_11AD_EV;
  550. evt_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  551. evt_props.intr = GSI_INTR_MSI;
  552. evt_props.intvec = 0;
  553. evt_props.exclusive = true;
  554. evt_props.err_cb = ipa_gsi_evt_ring_err_cb;
  555. evt_props.user_data = NULL;
  556. evt_props.int_modc = 200;
  557. evt_props.int_modt = 15;
  558. evt_props.ring_base_vaddr = NULL;
  559. if (smmu_en) {
  560. pipe_smmu = (struct ipa_wigig_pipe_setup_info_smmu *)pipe_info;
  561. evt_props.ring_base_addr =
  562. pipe_smmu->desc_ring_base_iova;
  563. evt_props.ring_len = pipe_smmu->desc_ring_size;
  564. evt_props.msi_addr = pipe_smmu->desc_ring_HWTAIL_pa;
  565. } else {
  566. pipe = (struct ipa_wigig_pipe_setup_info *)pipe_info;
  567. evt_props.ring_base_addr = pipe->desc_ring_base_pa;
  568. evt_props.ring_len = pipe->desc_ring_size;
  569. evt_props.msi_addr = pipe->desc_ring_HWTAIL_pa;
  570. }
  571. gsi_res = gsi_alloc_evt_ring(&evt_props,
  572. ipa3_ctx->gsi_dev_hdl,
  573. &ep->gsi_evt_ring_hdl);
  574. if (gsi_res != GSI_STATUS_SUCCESS) {
  575. IPAERR("Error allocating event ring: %d\n", gsi_res);
  576. return -EFAULT;
  577. }
  578. /* event scratch not configured by SW for TX channels */
  579. if (Rx) {
  580. union __packed gsi_evt_scratch evt_scratch;
  581. memset(&evt_scratch, 0, sizeof(evt_scratch));
  582. evt_scratch.w11ad.update_status_hwtail_mod_threshold = 200;
  583. gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
  584. evt_scratch);
  585. if (gsi_res != GSI_STATUS_SUCCESS) {
  586. IPAERR("Error writing WIGIG event ring scratch: %d\n",
  587. gsi_res);
  588. goto fail_write_evt_scratch;
  589. }
  590. }
  591. ep->gsi_mem_info.evt_ring_len = evt_props.ring_len;
  592. ep->gsi_mem_info.evt_ring_base_addr = evt_props.ring_base_addr;
  593. ep->gsi_mem_info.evt_ring_base_vaddr = evt_props.ring_base_vaddr;
  594. /* alloc channel ring */
  595. memset(&channel_props, 0, sizeof(channel_props));
  596. memset(&gsi_scratch, 0, sizeof(gsi_scratch));
  597. if (Rx)
  598. channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  599. else
  600. channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
  601. channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  602. channel_props.prot = GSI_CHAN_PROT_11AD;
  603. channel_props.ch_id = ep_gsi->ipa_gsi_chan_num;
  604. channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
  605. channel_props.xfer_cb = NULL;
  606. channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  607. channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  608. channel_props.prefetch_mode = ep_gsi->prefetch_mode;
  609. channel_props.empty_lvl_threshold = ep_gsi->prefetch_threshold;
  610. channel_props.low_weight = 1;
  611. channel_props.err_cb = ipa_gsi_chan_err_cb;
  612. channel_props.ring_base_vaddr = NULL;
  613. if (Rx) {
  614. if (smmu_en) {
  615. rx_dbuff_smmu =
  616. (struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
  617. channel_props.ring_base_addr =
  618. pipe_smmu->status_ring_base_iova;
  619. channel_props.ring_len =
  620. pipe_smmu->status_ring_size;
  621. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
  622. IPA_WIGIG_LSB(
  623. pipe_smmu->status_ring_HWTAIL_pa);
  624. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
  625. IPA_WIGIG_MSB(
  626. pipe_smmu->status_ring_HWTAIL_pa);
  627. gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
  628. IPA_WIGIG_LSB(
  629. rx_dbuff_smmu->data_buffer_base_iova);
  630. gsi_scratch.rx_11ad.data_buffers_base_address_msb =
  631. IPA_WIGIG_MSB(
  632. rx_dbuff_smmu->data_buffer_base_iova);
  633. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
  634. ilog2(rx_dbuff_smmu->data_buffer_size);
  635. } else {
  636. rx_dbuff =
  637. (struct ipa_wigig_rx_pipe_data_buffer_info *)buff;
  638. channel_props.ring_base_addr =
  639. pipe->status_ring_base_pa;
  640. channel_props.ring_len = pipe->status_ring_size;
  641. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
  642. IPA_WIGIG_LSB(pipe->status_ring_HWTAIL_pa);
  643. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
  644. IPA_WIGIG_MSB(pipe->status_ring_HWTAIL_pa);
  645. gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
  646. IPA_WIGIG_LSB(rx_dbuff->data_buffer_base_pa);
  647. gsi_scratch.rx_11ad.data_buffers_base_address_msb =
  648. IPA_WIGIG_MSB(rx_dbuff->data_buffer_base_pa);
  649. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
  650. ilog2(rx_dbuff->data_buffer_size);
  651. }
  652. IPADBG("rx scratch: status_ring_hwtail_address_lsb 0x%X\n",
  653. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb);
  654. IPADBG("rx scratch: status_ring_hwtail_address_msb 0x%X\n",
  655. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb);
  656. IPADBG("rx scratch: data_buffers_base_address_lsb 0x%X\n",
  657. gsi_scratch.rx_11ad.data_buffers_base_address_lsb);
  658. IPADBG("rx scratch: data_buffers_base_address_msb 0x%X\n",
  659. gsi_scratch.rx_11ad.data_buffers_base_address_msb);
  660. IPADBG("rx scratch: fixed_data_buffer_size_pow_2 %d\n",
  661. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2);
  662. IPADBG("rx scratch 0x[%X][%X][%X][%X]\n",
  663. gsi_scratch.data.word1,
  664. gsi_scratch.data.word2,
  665. gsi_scratch.data.word3,
  666. gsi_scratch.data.word4);
  667. } else {
  668. if (smmu_en) {
  669. tx_dbuff_smmu =
  670. (struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
  671. channel_props.ring_base_addr =
  672. pipe_smmu->desc_ring_base_iova;
  673. channel_props.ring_len =
  674. pipe_smmu->desc_ring_size;
  675. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
  676. IPA_WIGIG_LSB(
  677. pipe_smmu->status_ring_HWTAIL_pa);
  678. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb =
  679. IPA_WIGIG_LSB(
  680. pipe_smmu->status_ring_HWHEAD_pa);
  681. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb =
  682. IPA_WIGIG_8_MSB(
  683. pipe_smmu->status_ring_HWHEAD_pa);
  684. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
  685. ilog2(tx_dbuff_smmu->data_buffer_size);
  686. gsi_scratch.tx_11ad.status_ring_num_elem =
  687. pipe_smmu->status_ring_size /
  688. IPA_WIGIG_STATUS_RING_EL_SIZE;
  689. } else {
  690. tx_dbuff =
  691. (struct ipa_wigig_tx_pipe_data_buffer_info *)buff;
  692. channel_props.ring_base_addr = pipe->desc_ring_base_pa;
  693. channel_props.ring_len = pipe->desc_ring_size;
  694. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
  695. IPA_WIGIG_LSB(
  696. pipe->status_ring_HWTAIL_pa);
  697. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb =
  698. IPA_WIGIG_LSB(
  699. pipe->status_ring_HWHEAD_pa);
  700. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb =
  701. IPA_WIGIG_8_MSB(pipe->status_ring_HWHEAD_pa);
  702. gsi_scratch.tx_11ad.status_ring_num_elem =
  703. pipe->status_ring_size /
  704. IPA_WIGIG_STATUS_RING_EL_SIZE;
  705. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
  706. ilog2(tx_dbuff->data_buffer_size);
  707. }
  708. gsi_scratch.tx_11ad.update_status_hwtail_mod_threshold = 200;
  709. IPADBG("tx scratch: status_ring_hwtail_address_lsb 0x%X\n",
  710. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb);
  711. IPADBG("tx scratch: status_ring_hwhead_address_lsb 0x%X\n",
  712. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb);
  713. IPADBG("tx scratch: status_ring_hwhead_hwtail_8_msb 0x%X\n",
  714. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb);
  715. IPADBG("tx scratch:status_ring_num_elem %d\n",
  716. gsi_scratch.tx_11ad.status_ring_num_elem);
  717. IPADBG("tx scratch:fixed_data_buffer_size_pow_2 %d\n",
  718. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2);
  719. IPADBG("tx scratch 0x[%X][%X][%X][%X]\n",
  720. gsi_scratch.data.word1,
  721. gsi_scratch.data.word2,
  722. gsi_scratch.data.word3,
  723. gsi_scratch.data.word4);
  724. }
  725. IPADBG("ch_id: %d\n", channel_props.ch_id);
  726. IPADBG("evt_ring_hdl: %ld\n", channel_props.evt_ring_hdl);
  727. IPADBG("re_size: %d\n", channel_props.re_size);
  728. IPADBG("GSI channel ring len: %d\n", channel_props.ring_len);
  729. IPADBG("channel ring base addr = 0x%llX\n",
  730. (unsigned long long)channel_props.ring_base_addr);
  731. IPADBG("Allocating GSI channel\n");
  732. gsi_res = gsi_alloc_channel(&channel_props,
  733. ipa3_ctx->gsi_dev_hdl,
  734. &ep->gsi_chan_hdl);
  735. if (gsi_res != GSI_STATUS_SUCCESS) {
  736. IPAERR("gsi_alloc_channel failed %d\n", gsi_res);
  737. goto fail_alloc_channel;
  738. }
  739. IPADBG("Writing Channel scratch\n");
  740. ep->gsi_mem_info.chan_ring_len = channel_props.ring_len;
  741. ep->gsi_mem_info.chan_ring_base_addr = channel_props.ring_base_addr;
  742. ep->gsi_mem_info.chan_ring_base_vaddr =
  743. channel_props.ring_base_vaddr;
  744. gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
  745. gsi_scratch);
  746. if (gsi_res != GSI_STATUS_SUCCESS) {
  747. IPAERR("gsi_write_channel_scratch failed %d\n",
  748. gsi_res);
  749. goto fail_write_channel_scratch;
  750. }
  751. IPADBG("exit\n");
  752. return 0;
  753. fail_write_channel_scratch:
  754. gsi_dealloc_channel(ep->gsi_chan_hdl);
  755. fail_alloc_channel:
  756. fail_write_evt_scratch:
  757. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  758. return -EFAULT;
  759. }
  760. static int ipa3_wigig_config_uc(bool init,
  761. bool Rx,
  762. u8 wifi_ch,
  763. u8 gsi_ch,
  764. phys_addr_t HWHEAD)
  765. {
  766. struct ipa_mem_buffer cmd;
  767. enum ipa_cpu_2_hw_offload_commands command;
  768. int result;
  769. IPADBG("%s\n", init ? "init" : "Deinit");
  770. if (init) {
  771. struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data;
  772. cmd.size = sizeof(*cmd_data);
  773. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  774. &cmd.phys_base, GFP_KERNEL);
  775. if (cmd.base == NULL) {
  776. IPAERR("fail to get DMA memory.\n");
  777. return -ENOMEM;
  778. }
  779. cmd_data =
  780. (struct IpaHwOffloadSetUpCmdData_t_v4_0 *)cmd.base;
  781. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  782. cmd_data->SetupCh_params.W11AdSetupCh_params.dir =
  783. Rx ? W11AD_RX : W11AD_TX;
  784. cmd_data->SetupCh_params.W11AdSetupCh_params.gsi_ch = gsi_ch;
  785. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_ch = wifi_ch;
  786. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_msb =
  787. IPA_WIGIG_MSB(HWHEAD);
  788. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_lsb =
  789. IPA_WIGIG_LSB(HWHEAD);
  790. command = IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP;
  791. } else {
  792. struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data;
  793. cmd.size = sizeof(*cmd_data);
  794. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  795. &cmd.phys_base, GFP_KERNEL);
  796. if (cmd.base == NULL) {
  797. IPAERR("fail to get DMA memory.\n");
  798. return -ENOMEM;
  799. }
  800. cmd_data =
  801. (struct IpaHwOffloadCommonChCmdData_t_v4_0 *)cmd.base;
  802. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  803. cmd_data->CommonCh_params.W11AdCommonCh_params.gsi_ch = gsi_ch;
  804. command = IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN;
  805. }
  806. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  807. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  808. command,
  809. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  810. false, 10 * HZ);
  811. if (result) {
  812. IPAERR("fail to %s uc for %s gsi channel %d\n",
  813. init ? "init" : "deinit",
  814. Rx ? "Rx" : "Tx", gsi_ch);
  815. }
  816. dma_free_coherent(ipa3_ctx->uc_pdev,
  817. cmd.size, cmd.base, cmd.phys_base);
  818. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  819. IPADBG("exit\n");
  820. return result;
  821. }
  822. int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out)
  823. {
  824. int ipa_ep_idx;
  825. struct ipa3_ep_context *ep;
  826. struct ipa_ep_cfg ep_cfg;
  827. enum ipa_client_type rx_client = IPA_CLIENT_WIGIG_PROD;
  828. bool is_smmu_enabled;
  829. struct ipa_wigig_conn_rx_in_params_smmu *input_smmu = NULL;
  830. struct ipa_wigig_conn_rx_in_params *input = NULL;
  831. const struct ipa_gsi_ep_config *ep_gsi;
  832. void *pipe_info;
  833. void *buff;
  834. phys_addr_t status_ring_HWHEAD_pa;
  835. int result;
  836. IPADBG("\n");
  837. ipa_ep_idx = ipa_get_ep_mapping(rx_client);
  838. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  839. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  840. IPAERR("fail to get ep (IPA_CLIENT_WIGIG_PROD) %d.\n",
  841. ipa_ep_idx);
  842. return -EFAULT;
  843. }
  844. ep = &ipa3_ctx->ep[ipa_ep_idx];
  845. if (ep->valid) {
  846. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  847. return -EFAULT;
  848. }
  849. if (ep->gsi_offload_state) {
  850. IPAERR("WIGIG channel bad state 0x%X\n",
  851. ep->gsi_offload_state);
  852. return -EFAULT;
  853. }
  854. ep_gsi = ipa3_get_gsi_ep_info(rx_client);
  855. if (!ep_gsi) {
  856. IPAERR("Failed getting GSI EP info for client=%d\n",
  857. rx_client);
  858. return -EPERM;
  859. }
  860. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  861. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  862. /* setup rx ep cfg */
  863. ep->valid = 1;
  864. ep->client = rx_client;
  865. result = ipa3_disable_data_path(ipa_ep_idx);
  866. if (result) {
  867. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  868. ipa_ep_idx);
  869. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  870. return -EFAULT;
  871. }
  872. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  873. if (is_smmu_enabled) {
  874. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu;
  875. input_smmu = (struct ipa_wigig_conn_rx_in_params_smmu *)in;
  876. dbuff_smmu = &input_smmu->dbuff_smmu;
  877. ep->client_notify = input_smmu->notify;
  878. ep->priv = input_smmu->priv;
  879. IPADBG(
  880. "desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
  881. (unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
  882. input_smmu->pipe_smmu.desc_ring_size,
  883. (unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
  884. input_smmu->pipe_smmu.status_ring_size);
  885. IPADBG("data_buffer_base_iova 0x%llX data_buffer_size %d",
  886. (unsigned long long)dbuff_smmu->data_buffer_base_iova,
  887. input_smmu->dbuff_smmu.data_buffer_size);
  888. if (IPA_WIGIG_MSB(
  889. dbuff_smmu->data_buffer_base_iova) &
  890. 0xFFFFFF00) {
  891. IPAERR(
  892. "data_buffers_base_address_msb is over the 8 bit limit (0x%llX)\n",
  893. (unsigned long long)dbuff_smmu->data_buffer_base_iova);
  894. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  895. return -EFAULT;
  896. }
  897. if (dbuff_smmu->data_buffer_size >> 16) {
  898. IPAERR(
  899. "data_buffer_size is over the 16 bit limit (%d)\n"
  900. , dbuff_smmu->data_buffer_size);
  901. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  902. return -EFAULT;
  903. }
  904. } else {
  905. input = (struct ipa_wigig_conn_rx_in_params *)in;
  906. ep->client_notify = input->notify;
  907. ep->priv = input->priv;
  908. IPADBG(
  909. "desc_ring_base_pa %pa desc_ring_size %d status_ring_base_pa %pa status_ring_size %d",
  910. &input->pipe.desc_ring_base_pa,
  911. input->pipe.desc_ring_size,
  912. &input->pipe.status_ring_base_pa,
  913. input->pipe.status_ring_size);
  914. IPADBG("data_buffer_base_pa %pa data_buffer_size %d",
  915. &input->dbuff.data_buffer_base_pa,
  916. input->dbuff.data_buffer_size);
  917. if (
  918. IPA_WIGIG_MSB(input->dbuff.data_buffer_base_pa) & 0xFFFFFF00) {
  919. IPAERR(
  920. "data_buffers_base_address_msb is over the 8 bit limit (0x%pa)\n"
  921. , &input->dbuff.data_buffer_base_pa);
  922. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  923. return -EFAULT;
  924. }
  925. if (input->dbuff.data_buffer_size >> 16) {
  926. IPAERR(
  927. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  928. , input->dbuff.data_buffer_size);
  929. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  930. return -EFAULT;
  931. }
  932. }
  933. memset(&ep_cfg, 0, sizeof(ep_cfg));
  934. ep_cfg.nat.nat_en = IPA_SRC_NAT;
  935. ep_cfg.hdr.hdr_len = ETH_HLEN;
  936. ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
  937. ep_cfg.hdr.hdr_ofst_pkt_size = 0;
  938. ep_cfg.hdr.hdr_additional_const_len = 0;
  939. ep_cfg.hdr_ext.hdr_little_endian = true;
  940. ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
  941. ep_cfg.hdr.hdr_metadata_reg_valid = 1;
  942. ep_cfg.mode.mode = IPA_BASIC;
  943. if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
  944. IPAERR("fail to setup rx pipe cfg\n");
  945. result = -EFAULT;
  946. goto fail;
  947. }
  948. if (is_smmu_enabled) {
  949. result = ipa3_wigig_smmu_map_channel(true,
  950. &input_smmu->pipe_smmu,
  951. &input_smmu->dbuff_smmu,
  952. true);
  953. if (result) {
  954. IPAERR("failed to setup rx pipe smmu map\n");
  955. result = -EFAULT;
  956. goto fail;
  957. }
  958. pipe_info = &input_smmu->pipe_smmu;
  959. buff = &input_smmu->dbuff_smmu;
  960. status_ring_HWHEAD_pa =
  961. input_smmu->pipe_smmu.status_ring_HWHEAD_pa;
  962. } else {
  963. pipe_info = &input->pipe;
  964. buff = &input->dbuff;
  965. status_ring_HWHEAD_pa =
  966. input->pipe.status_ring_HWHEAD_pa;
  967. }
  968. result = ipa3_wigig_config_gsi(true,
  969. is_smmu_enabled,
  970. pipe_info,
  971. buff,
  972. ep_gsi, ep);
  973. if (result)
  974. goto fail_gsi;
  975. result = ipa3_wigig_config_uc(
  976. true, true, 0,
  977. ep_gsi->ipa_gsi_chan_num,
  978. status_ring_HWHEAD_pa);
  979. if (result)
  980. goto fail_uc_config;
  981. ipa3_install_dflt_flt_rules(ipa_ep_idx);
  982. out->client = IPA_CLIENT_WIGIG_PROD;
  983. ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
  984. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  985. IPADBG("wigig rx pipe connected successfully\n");
  986. IPADBG("exit\n");
  987. return 0;
  988. fail_uc_config:
  989. /* Release channel and evt*/
  990. ipa3_release_gsi_channel(ipa_ep_idx);
  991. fail_gsi:
  992. if (input_smmu)
  993. ipa3_wigig_smmu_map_channel(true, &input_smmu->pipe_smmu,
  994. &input_smmu->dbuff_smmu, false);
  995. fail:
  996. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  997. return result;
  998. }
  999. int ipa3_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out)
  1000. {
  1001. int ipa_ep_idx;
  1002. struct ipa3_ep_context *ep;
  1003. struct ipa_ep_cfg ep_cfg;
  1004. enum ipa_client_type tx_client;
  1005. bool is_smmu_enabled;
  1006. struct ipa_wigig_conn_tx_in_params_smmu *input_smmu = NULL;
  1007. struct ipa_wigig_conn_tx_in_params *input = NULL;
  1008. const struct ipa_gsi_ep_config *ep_gsi;
  1009. u32 aggr_byte_limit;
  1010. int result;
  1011. void *pipe_info;
  1012. void *buff;
  1013. phys_addr_t desc_ring_HWHEAD_pa;
  1014. u8 wifi_ch;
  1015. IPADBG("\n");
  1016. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  1017. if (is_smmu_enabled) {
  1018. input_smmu = (struct ipa_wigig_conn_tx_in_params_smmu *)in;
  1019. IPADBG(
  1020. "desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
  1021. (unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
  1022. input_smmu->pipe_smmu.desc_ring_size,
  1023. (unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
  1024. input_smmu->pipe_smmu.status_ring_size);
  1025. IPADBG("num buffers %d, data buffer size %d\n",
  1026. input_smmu->dbuff_smmu.num_buffers,
  1027. input_smmu->dbuff_smmu.data_buffer_size);
  1028. if (ipa3_wigig_tx_bit_to_ep(input_smmu->int_gen_tx_bit_num,
  1029. &tx_client)) {
  1030. return -EINVAL;
  1031. }
  1032. if (input_smmu->dbuff_smmu.data_buffer_size >> 16) {
  1033. IPAERR(
  1034. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  1035. , input_smmu->dbuff_smmu.data_buffer_size);
  1036. return -EFAULT;
  1037. }
  1038. if (IPA_WIGIG_8_MSB(
  1039. input_smmu->pipe_smmu.status_ring_HWHEAD_pa)
  1040. != IPA_WIGIG_8_MSB(
  1041. input_smmu->pipe_smmu.status_ring_HWTAIL_pa)) {
  1042. IPAERR(
  1043. "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
  1044. , input_smmu->pipe_smmu.status_ring_HWHEAD_pa,
  1045. input_smmu->pipe_smmu.status_ring_HWTAIL_pa);
  1046. return -EFAULT;
  1047. }
  1048. wifi_ch = input_smmu->int_gen_tx_bit_num;
  1049. /* convert to kBytes */
  1050. aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
  1051. input_smmu->dbuff_smmu.data_buffer_size);
  1052. } else {
  1053. input = (struct ipa_wigig_conn_tx_in_params *)in;
  1054. IPADBG(
  1055. "desc_ring_base_pa %pa desc_ring_size %d status_ring_base_pa %pa status_ring_size %d",
  1056. &input->pipe.desc_ring_base_pa,
  1057. input->pipe.desc_ring_size,
  1058. &input->pipe.status_ring_base_pa,
  1059. input->pipe.status_ring_size);
  1060. IPADBG("data_buffer_size %d", input->dbuff.data_buffer_size);
  1061. if (ipa3_wigig_tx_bit_to_ep(input->int_gen_tx_bit_num,
  1062. &tx_client)) {
  1063. return -EINVAL;
  1064. }
  1065. if (input->dbuff.data_buffer_size >> 16) {
  1066. IPAERR(
  1067. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  1068. , input->dbuff.data_buffer_size);
  1069. return -EFAULT;
  1070. }
  1071. if (IPA_WIGIG_8_MSB(
  1072. input->pipe.status_ring_HWHEAD_pa)
  1073. != IPA_WIGIG_8_MSB(
  1074. input->pipe.status_ring_HWTAIL_pa)) {
  1075. IPAERR(
  1076. "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
  1077. , input->pipe.status_ring_HWHEAD_pa,
  1078. input->pipe.status_ring_HWTAIL_pa);
  1079. return -EFAULT;
  1080. }
  1081. wifi_ch = input->int_gen_tx_bit_num;
  1082. /* convert to kBytes */
  1083. aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
  1084. input->dbuff.data_buffer_size);
  1085. }
  1086. IPADBG("client type is %d\n", tx_client);
  1087. ipa_ep_idx = ipa_get_ep_mapping(tx_client);
  1088. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1089. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1090. IPAERR("fail to get ep (%d) %d.\n",
  1091. tx_client, ipa_ep_idx);
  1092. return -EFAULT;
  1093. }
  1094. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1095. if (ep->valid) {
  1096. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  1097. return -EFAULT;
  1098. }
  1099. if (ep->gsi_offload_state) {
  1100. IPAERR("WIGIG channel bad state 0x%X\n",
  1101. ep->gsi_offload_state);
  1102. return -EFAULT;
  1103. }
  1104. ep_gsi = ipa3_get_gsi_ep_info(tx_client);
  1105. if (!ep_gsi) {
  1106. IPAERR("Failed getting GSI EP info for client=%d\n",
  1107. tx_client);
  1108. return -EFAULT;
  1109. }
  1110. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  1111. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1112. /* setup tx ep cfg */
  1113. ep->valid = 1;
  1114. ep->client = tx_client;
  1115. result = ipa3_disable_data_path(ipa_ep_idx);
  1116. if (result) {
  1117. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  1118. ipa_ep_idx);
  1119. goto fail;
  1120. }
  1121. ep->client_notify = NULL;
  1122. ep->priv = NULL;
  1123. memset(&ep_cfg, 0, sizeof(ep_cfg));
  1124. ep_cfg.nat.nat_en = IPA_DST_NAT;
  1125. ep_cfg.hdr.hdr_len = ETH_HLEN;
  1126. ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
  1127. ep_cfg.hdr.hdr_ofst_pkt_size = 0;
  1128. ep_cfg.hdr.hdr_additional_const_len = 0;
  1129. ep_cfg.hdr_ext.hdr_little_endian = true;
  1130. ep_cfg.mode.mode = IPA_BASIC;
  1131. /* config hard byte limit, max is the buffer size (in kB)*/
  1132. ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
  1133. ep_cfg.aggr.aggr = IPA_GENERIC;
  1134. ep_cfg.aggr.aggr_pkt_limit = 1;
  1135. ep_cfg.aggr.aggr_byte_limit = aggr_byte_limit;
  1136. ep_cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
  1137. if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
  1138. IPAERR("fail to setup rx pipe cfg\n");
  1139. result = -EFAULT;
  1140. goto fail;
  1141. }
  1142. if (is_smmu_enabled) {
  1143. result = ipa3_wigig_smmu_map_channel(false,
  1144. &input_smmu->pipe_smmu,
  1145. &input_smmu->dbuff_smmu,
  1146. true);
  1147. if (result) {
  1148. IPAERR(
  1149. "failed to setup tx pipe smmu map client %d (ep %d)\n"
  1150. , tx_client, ipa_ep_idx);
  1151. result = -EFAULT;
  1152. goto fail;
  1153. }
  1154. pipe_info = &input_smmu->pipe_smmu;
  1155. buff = &input_smmu->dbuff_smmu;
  1156. desc_ring_HWHEAD_pa =
  1157. input_smmu->pipe_smmu.desc_ring_HWHEAD_pa;
  1158. } else {
  1159. pipe_info = &input->pipe;
  1160. buff = &input->dbuff;
  1161. desc_ring_HWHEAD_pa =
  1162. input->pipe.desc_ring_HWHEAD_pa;
  1163. }
  1164. result = ipa3_wigig_config_gsi(false,
  1165. is_smmu_enabled,
  1166. pipe_info,
  1167. buff,
  1168. ep_gsi, ep);
  1169. if (result)
  1170. goto fail_gsi;
  1171. result = ipa3_wigig_config_uc(
  1172. true, false, wifi_ch,
  1173. ep_gsi->ipa_gsi_chan_num,
  1174. desc_ring_HWHEAD_pa);
  1175. if (result)
  1176. goto fail_uc_config;
  1177. out->client = tx_client;
  1178. ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
  1179. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1180. IPADBG("wigig client %d (ep %d) connected successfully\n", tx_client,
  1181. ipa_ep_idx);
  1182. return 0;
  1183. fail_uc_config:
  1184. /* Release channel and evt*/
  1185. ipa3_release_gsi_channel(ipa_ep_idx);
  1186. fail_gsi:
  1187. if (input_smmu)
  1188. ipa3_wigig_smmu_map_channel(false, &input_smmu->pipe_smmu,
  1189. &input_smmu->dbuff_smmu, false);
  1190. fail:
  1191. ep->valid = 0;
  1192. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1193. return result;
  1194. }
  1195. int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
  1196. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  1197. void *dbuff)
  1198. {
  1199. bool is_smmu_enabled;
  1200. int ipa_ep_idx;
  1201. struct ipa3_ep_context *ep;
  1202. const struct ipa_gsi_ep_config *ep_gsi;
  1203. int result;
  1204. bool rx = false;
  1205. IPADBG("\n");
  1206. ipa_ep_idx = ipa_get_ep_mapping(client);
  1207. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1208. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1209. IPAERR("fail to get ep (%d) %d.\n",
  1210. client, ipa_ep_idx);
  1211. return -EFAULT;
  1212. }
  1213. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1214. if (!ep->valid) {
  1215. IPAERR("Invalid EP\n");
  1216. return -EFAULT;
  1217. }
  1218. ep_gsi = ipa3_get_gsi_ep_info(client);
  1219. if (!ep_gsi) {
  1220. IPAERR("Failed getting GSI EP info for client=%d\n",
  1221. client);
  1222. return -EFAULT;
  1223. }
  1224. if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
  1225. IPAERR("client in bad state(client %d) 0x%X\n",
  1226. client, ep->gsi_offload_state);
  1227. return -EFAULT;
  1228. }
  1229. if (client == IPA_CLIENT_WIGIG_PROD)
  1230. rx = true;
  1231. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1232. /* Release channel and evt*/
  1233. result = ipa3_release_gsi_channel(ipa_ep_idx);
  1234. if (result) {
  1235. IPAERR("failed to deallocate channel\n");
  1236. goto fail;
  1237. }
  1238. /* only gsi ch number and dir are necessary */
  1239. result = ipa3_wigig_config_uc(
  1240. false, rx, 0,
  1241. ep_gsi->ipa_gsi_chan_num, 0);
  1242. if (result) {
  1243. IPAERR("failed uC channel teardown %d\n", result);
  1244. WARN_ON(1);
  1245. }
  1246. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  1247. if (is_smmu_enabled) {
  1248. if (!pipe_smmu || !dbuff) {
  1249. IPAERR("smmu input is null %pK %pK\n",
  1250. pipe_smmu, dbuff);
  1251. WARN_ON(1);
  1252. } else {
  1253. result = ipa3_wigig_smmu_map_channel(rx,
  1254. pipe_smmu,
  1255. dbuff,
  1256. false);
  1257. if (result) {
  1258. IPAERR(
  1259. "failed to unmap pipe smmu %d (ep %d)\n"
  1260. , client, ipa_ep_idx);
  1261. result = -EFAULT;
  1262. goto fail;
  1263. }
  1264. }
  1265. if (rx) {
  1266. if (!list_empty(&smmu_reg_addr_list)) {
  1267. IPAERR("smmu_reg_addr_list not empty\n");
  1268. WARN_ON(1);
  1269. }
  1270. if (!list_empty(&smmu_ring_addr_list)) {
  1271. IPAERR("smmu_ring_addr_list not empty\n");
  1272. WARN_ON(1);
  1273. }
  1274. }
  1275. } else if (pipe_smmu || dbuff) {
  1276. IPAERR("smmu input is not null %pK %pK\n",
  1277. pipe_smmu, dbuff);
  1278. WARN_ON(1);
  1279. }
  1280. memset(ep, 0, sizeof(struct ipa3_ep_context));
  1281. ep->gsi_offload_state = 0;
  1282. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1283. IPADBG("client (ep: %d) disconnected\n", ipa_ep_idx);
  1284. IPADBG("exit\n");
  1285. return 0;
  1286. fail:
  1287. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1288. return result;
  1289. }
  1290. int ipa3_wigig_uc_msi_init(bool init,
  1291. phys_addr_t periph_baddr_pa,
  1292. phys_addr_t pseudo_cause_pa,
  1293. phys_addr_t int_gen_tx_pa,
  1294. phys_addr_t int_gen_rx_pa,
  1295. phys_addr_t dma_ep_misc_pa)
  1296. {
  1297. int result;
  1298. struct ipa_mem_buffer cmd;
  1299. enum ipa_cpu_2_hw_offload_commands command;
  1300. bool map = false;
  1301. IPADBG("params: %s, %pa, %pa, %pa, %pa, %pa\n",
  1302. init ? "init" : "deInit",
  1303. &periph_baddr_pa,
  1304. &pseudo_cause_pa,
  1305. &int_gen_tx_pa,
  1306. &int_gen_rx_pa,
  1307. &dma_ep_misc_pa);
  1308. /* first make sure registers are SMMU mapped if necessary*/
  1309. if ((!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC])) {
  1310. if (init)
  1311. map = true;
  1312. IPADBG("SMMU enabled, map %d\n", map);
  1313. result = ipa3_smmu_map_peer_reg(
  1314. rounddown(pseudo_cause_pa, PAGE_SIZE),
  1315. map,
  1316. IPA_SMMU_CB_UC);
  1317. if (result) {
  1318. IPAERR(
  1319. "failed to %s pseudo_cause reg %d\n",
  1320. map ? "map" : "unmap",
  1321. result);
  1322. goto fail;
  1323. }
  1324. result = ipa3_smmu_map_peer_reg(
  1325. rounddown(int_gen_tx_pa, PAGE_SIZE),
  1326. map,
  1327. IPA_SMMU_CB_UC);
  1328. if (result) {
  1329. IPAERR(
  1330. "failed to %s int_gen_tx reg %d\n",
  1331. map ? "map" : "unmap",
  1332. result);
  1333. goto fail_gen_tx;
  1334. }
  1335. result = ipa3_smmu_map_peer_reg(
  1336. rounddown(int_gen_rx_pa, PAGE_SIZE),
  1337. map,
  1338. IPA_SMMU_CB_UC);
  1339. if (result) {
  1340. IPAERR(
  1341. "failed to %s int_gen_rx reg %d\n",
  1342. map ? "map" : "unmap",
  1343. result);
  1344. goto fail_gen_rx;
  1345. }
  1346. result = ipa3_smmu_map_peer_reg(
  1347. rounddown(dma_ep_misc_pa, PAGE_SIZE),
  1348. map,
  1349. IPA_SMMU_CB_UC);
  1350. if (result) {
  1351. IPAERR(
  1352. "failed to %s dma_ep_misc reg %d\n",
  1353. map ? "map" : "unmap",
  1354. result);
  1355. goto fail_dma_ep_misc;
  1356. }
  1357. }
  1358. /* now send the wigig hw base address to uC*/
  1359. if (init) {
  1360. struct IpaHwPeripheralInitCmdData_t *cmd_data;
  1361. cmd.size = sizeof(*cmd_data);
  1362. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1363. &cmd.phys_base, GFP_KERNEL);
  1364. if (cmd.base == NULL) {
  1365. IPAERR("fail to get DMA memory.\n");
  1366. result = -ENOMEM;
  1367. if (map)
  1368. goto fail_alloc;
  1369. return result;
  1370. }
  1371. cmd_data = (struct IpaHwPeripheralInitCmdData_t *)cmd.base;
  1372. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  1373. cmd_data->Init_params.W11AdInit_params.periph_baddr_msb =
  1374. IPA_WIGIG_MSB(periph_baddr_pa);
  1375. cmd_data->Init_params.W11AdInit_params.periph_baddr_lsb =
  1376. IPA_WIGIG_LSB(periph_baddr_pa);
  1377. command = IPA_CPU_2_HW_CMD_PERIPHERAL_INIT;
  1378. } else {
  1379. struct IpaHwPeripheralDeinitCmdData_t *cmd_data;
  1380. cmd.size = sizeof(*cmd_data);
  1381. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1382. &cmd.phys_base, GFP_KERNEL);
  1383. if (cmd.base == NULL) {
  1384. IPAERR("fail to get DMA memory.\n");
  1385. result = -ENOMEM;
  1386. if (map)
  1387. goto fail_alloc;
  1388. return result;
  1389. }
  1390. cmd_data = (struct IpaHwPeripheralDeinitCmdData_t *)cmd.base;
  1391. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  1392. command = IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT;
  1393. }
  1394. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1395. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  1396. command,
  1397. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  1398. false, 10 * HZ);
  1399. if (result) {
  1400. IPAERR("fail to %s uc MSI config\n", init ? "init" : "deinit");
  1401. goto fail_command;
  1402. }
  1403. dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1404. cmd.base, cmd.phys_base);
  1405. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1406. IPADBG("exit\n");
  1407. return 0;
  1408. fail_command:
  1409. dma_free_coherent(ipa3_ctx->uc_pdev,
  1410. cmd.size,
  1411. cmd.base, cmd.phys_base);
  1412. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1413. fail_alloc:
  1414. ipa3_smmu_map_peer_reg(
  1415. rounddown(dma_ep_misc_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1416. fail_dma_ep_misc:
  1417. ipa3_smmu_map_peer_reg(
  1418. rounddown(int_gen_rx_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1419. fail_gen_rx:
  1420. ipa3_smmu_map_peer_reg(
  1421. rounddown(int_gen_tx_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1422. fail_gen_tx:
  1423. ipa3_smmu_map_peer_reg(
  1424. rounddown(pseudo_cause_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1425. fail:
  1426. return result;
  1427. }
  1428. int ipa3_enable_wigig_pipe_i(enum ipa_client_type client)
  1429. {
  1430. int ipa_ep_idx, res;
  1431. struct ipa3_ep_context *ep;
  1432. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1433. int retry_cnt = 0;
  1434. uint64_t val;
  1435. IPADBG("\n");
  1436. ipa_ep_idx = ipa_get_ep_mapping(client);
  1437. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1438. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1439. IPAERR("fail to get ep (%d) %d.\n",
  1440. client, ipa_ep_idx);
  1441. return -EFAULT;
  1442. }
  1443. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1444. if (!ep->valid) {
  1445. IPAERR("Invalid EP\n");
  1446. return -EFAULT;
  1447. }
  1448. if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
  1449. IPAERR("WIGIG channel bad state 0x%X\n",
  1450. ep->gsi_offload_state);
  1451. return -EFAULT;
  1452. }
  1453. IPA_ACTIVE_CLIENTS_INC_EP(client);
  1454. res = ipa3_enable_data_path(ipa_ep_idx);
  1455. if (res)
  1456. goto fail_enable_datapath;
  1457. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1458. ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
  1459. /* ring the event db (outside the ring boundary)*/
  1460. val = ep->gsi_mem_info.evt_ring_base_addr +
  1461. ep->gsi_mem_info.evt_ring_len;
  1462. res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl, val);
  1463. if (res) {
  1464. IPAERR(
  1465. "fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n"
  1466. , res, ep->gsi_evt_ring_hdl,
  1467. (unsigned long long)val);
  1468. res = -EFAULT;
  1469. goto fail_ring_evt;
  1470. }
  1471. IPADBG("start channel\n");
  1472. res = gsi_start_channel(ep->gsi_chan_hdl);
  1473. if (res != GSI_STATUS_SUCCESS) {
  1474. IPAERR("gsi_start_channel failed %d\n", res);
  1475. WARN_ON(1);
  1476. res = -EFAULT;
  1477. goto fail_gsi_start;
  1478. }
  1479. /* for TX we have to ring the channel db (last desc in the ring) */
  1480. if (client != IPA_CLIENT_WIGIG_PROD) {
  1481. uint64_t val;
  1482. val = ep->gsi_mem_info.chan_ring_base_addr +
  1483. ep->gsi_mem_info.chan_ring_len -
  1484. IPA_WIGIG_DESC_RING_EL_SIZE;
  1485. IPADBG("ring ch doorbell (0x%llX) TX %ld\n", val,
  1486. ep->gsi_chan_hdl);
  1487. res = gsi_ring_ch_ring_db(ep->gsi_chan_hdl, val);
  1488. if (res) {
  1489. IPAERR(
  1490. "fail to ring channel db %d. hdl=%lu wp=0x%llx\n"
  1491. , res, ep->gsi_chan_hdl,
  1492. (unsigned long long)val);
  1493. res = -EFAULT;
  1494. goto fail_ring_ch;
  1495. }
  1496. }
  1497. ep->gsi_offload_state |= IPA_WIGIG_ENABLED;
  1498. IPADBG("exit\n");
  1499. return 0;
  1500. fail_ring_ch:
  1501. res = ipa3_stop_gsi_channel(ipa_ep_idx);
  1502. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1503. res != -GSI_STATUS_TIMED_OUT) {
  1504. IPAERR("failed to stop channel res = %d\n", res);
  1505. } else if (res == -GSI_STATUS_AGAIN) {
  1506. IPADBG("GSI stop channel failed retry cnt = %d\n",
  1507. retry_cnt);
  1508. retry_cnt++;
  1509. if (retry_cnt < GSI_STOP_MAX_RETRY_CNT)
  1510. goto fail_ring_ch;
  1511. } else {
  1512. IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
  1513. }
  1514. res = -EFAULT;
  1515. fail_gsi_start:
  1516. fail_ring_evt:
  1517. ipa3_disable_data_path(ipa_ep_idx);
  1518. fail_enable_datapath:
  1519. IPA_ACTIVE_CLIENTS_DEC_EP(client);
  1520. return res;
  1521. }
  1522. int ipa3_disable_wigig_pipe_i(enum ipa_client_type client)
  1523. {
  1524. int ipa_ep_idx, res;
  1525. struct ipa3_ep_context *ep;
  1526. struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
  1527. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1528. bool disable_force_clear = false;
  1529. u32 source_pipe_bitmask = 0;
  1530. int retry_cnt = 0;
  1531. IPADBG("\n");
  1532. ipa_ep_idx = ipa_get_ep_mapping(client);
  1533. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1534. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1535. IPAERR("fail to get ep (%d) %d.\n",
  1536. client, ipa_ep_idx);
  1537. return -EFAULT;
  1538. }
  1539. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1540. IPAERR("ep %d out of range.\n", ipa_ep_idx);
  1541. return -EFAULT;
  1542. }
  1543. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1544. if (!ep->valid) {
  1545. IPAERR("Invalid EP\n");
  1546. return -EFAULT;
  1547. }
  1548. if (ep->gsi_offload_state !=
  1549. (IPA_WIGIG_CONNECTED | IPA_WIGIG_ENABLED)) {
  1550. IPAERR("WIGIG channel bad state 0x%X\n",
  1551. ep->gsi_offload_state);
  1552. return -EFAULT;
  1553. }
  1554. IPADBG("pipe %d\n", ipa_ep_idx);
  1555. source_pipe_bitmask = 1 << ipa_ep_idx;
  1556. res = ipa3_enable_force_clear(ipa_ep_idx,
  1557. false, source_pipe_bitmask);
  1558. if (res) {
  1559. /*
  1560. * assuming here modem SSR, AP can remove
  1561. * the delay in this case
  1562. */
  1563. IPAERR("failed to force clear %d\n", res);
  1564. IPAERR("remove delay from SCND reg\n");
  1565. ep_ctrl_scnd.endp_delay = false;
  1566. ipahal_write_reg_n_fields(
  1567. IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx,
  1568. &ep_ctrl_scnd);
  1569. } else {
  1570. disable_force_clear = true;
  1571. }
  1572. retry_gsi_stop:
  1573. res = ipa3_stop_gsi_channel(ipa_ep_idx);
  1574. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1575. res != -GSI_STATUS_TIMED_OUT) {
  1576. IPAERR("failed to stop channel res = %d\n", res);
  1577. goto fail_stop_channel;
  1578. } else if (res == -GSI_STATUS_AGAIN) {
  1579. IPADBG("GSI stop channel failed retry cnt = %d\n",
  1580. retry_cnt);
  1581. retry_cnt++;
  1582. if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT)
  1583. goto fail_stop_channel;
  1584. goto retry_gsi_stop;
  1585. } else {
  1586. IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
  1587. }
  1588. res = ipa3_reset_gsi_channel(ipa_ep_idx);
  1589. if (res != GSI_STATUS_SUCCESS) {
  1590. IPAERR("Failed to reset chan: %d.\n", res);
  1591. goto fail_stop_channel;
  1592. }
  1593. if (disable_force_clear)
  1594. ipa3_disable_force_clear(ipa_ep_idx);
  1595. res = ipa3_disable_data_path(ipa_ep_idx);
  1596. if (res) {
  1597. WARN_ON(1);
  1598. return res;
  1599. }
  1600. /* Set the delay after disabling IPA Producer pipe */
  1601. if (IPA_CLIENT_IS_PROD(ep->client)) {
  1602. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1603. ep_cfg_ctrl.ipa_ep_delay = true;
  1604. ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
  1605. }
  1606. ep->gsi_offload_state &= ~IPA_WIGIG_ENABLED;
  1607. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx));
  1608. IPADBG("exit\n");
  1609. return 0;
  1610. fail_stop_channel:
  1611. ipa_assert();
  1612. return res;
  1613. }