ipa_wigig_i.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include <linux/if_ether.h>
  7. #include <linux/log2.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/ipa_wigig.h>
  10. #define IPA_WIGIG_DESC_RING_EL_SIZE 32
  11. #define IPA_WIGIG_STATUS_RING_EL_SIZE 16
  12. #define GSI_STOP_MAX_RETRY_CNT 10
  13. #define IPA_WIGIG_CONNECTED BIT(0)
  14. #define IPA_WIGIG_ENABLED BIT(1)
  15. #define IPA_WIGIG_MSB_MASK 0xFFFFFFFF00000000
  16. #define IPA_WIGIG_LSB_MASK 0x00000000FFFFFFFF
  17. #define IPA_WIGIG_MSB(num) ((u32)((num & IPA_WIGIG_MSB_MASK) >> 32))
  18. #define IPA_WIGIG_LSB(num) ((u32)(num & IPA_WIGIG_LSB_MASK))
  19. /* extract PCIE addresses [0:39] relevant msb */
  20. #define IPA_WIGIG_8_MSB_MASK 0xFF00000000
  21. #define IPA_WIGIG_8_MSB(num) ((u32)((num & IPA_WIGIG_8_MSB_MASK) >> 32))
  22. #define W11AD_RX 0
  23. #define W11AD_TX 1
  24. #define W11AD_TO_GSI_DB_m 1
  25. #define W11AD_TO_GSI_DB_n 1
  26. static LIST_HEAD(smmu_reg_addr_list);
  27. static LIST_HEAD(smmu_ring_addr_list);
  28. static DEFINE_MUTEX(smmu_lock);
  29. struct dentry *wigig_dent;
  30. struct ipa_wigig_smmu_reg_addr {
  31. struct list_head link;
  32. phys_addr_t phys_addr;
  33. enum ipa_smmu_cb_type cb_type;
  34. u8 count;
  35. };
  36. struct ipa_wigig_smmu_ring_addr {
  37. struct list_head link;
  38. u64 iova;
  39. enum ipa_smmu_cb_type cb_type;
  40. u8 count;
  41. };
  42. static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
  43. unsigned long val, void *data)
  44. {
  45. IPADBG("val %ld\n", val);
  46. if (!ipa3_ctx) {
  47. IPAERR("IPA ctx is null\n");
  48. return -EINVAL;
  49. }
  50. WARN_ON(data != ipa3_ctx);
  51. if (ipa3_ctx->uc_wigig_ctx.uc_ready_cb) {
  52. ipa3_ctx->uc_wigig_ctx.uc_ready_cb(
  53. ipa3_ctx->uc_wigig_ctx.priv);
  54. ipa3_ctx->uc_wigig_ctx.uc_ready_cb =
  55. NULL;
  56. ipa3_ctx->uc_wigig_ctx.priv = NULL;
  57. }
  58. IPADBG("exit\n");
  59. return 0;
  60. }
  61. static struct notifier_block uc_loaded_notifier = {
  62. .notifier_call = ipa3_wigig_uc_loaded_handler,
  63. };
  64. int ipa3_wigig_init_i(void)
  65. {
  66. IPADBG("\n");
  67. ipa3_uc_register_ready_cb(&uc_loaded_notifier);
  68. IPADBG("exit\n");
  69. return 0;
  70. }
  71. int ipa3_wigig_internal_init(
  72. struct ipa_wdi_uc_ready_params *inout,
  73. ipa_wigig_misc_int_cb int_notify,
  74. phys_addr_t *uc_db_pa)
  75. {
  76. int result = 0;
  77. IPADBG("\n");
  78. if (inout == NULL) {
  79. IPAERR("inout is NULL");
  80. return -EINVAL;
  81. }
  82. if (int_notify == NULL) {
  83. IPAERR("int_notify is NULL");
  84. return -EINVAL;
  85. }
  86. result = ipa3_uc_state_check();
  87. if (result) {
  88. inout->is_uC_ready = false;
  89. ipa3_ctx->uc_wigig_ctx.uc_ready_cb = inout->notify;
  90. } else {
  91. inout->is_uC_ready = true;
  92. }
  93. ipa3_ctx->uc_wigig_ctx.priv = inout->priv;
  94. ipa3_ctx->uc_wigig_ctx.misc_notify_cb = int_notify;
  95. *uc_db_pa = ipa3_ctx->ipa_wrapper_base +
  96. ipahal_get_reg_base() +
  97. ipahal_get_reg_mn_ofst(
  98. IPA_UC_MAILBOX_m_n,
  99. W11AD_TO_GSI_DB_m,
  100. W11AD_TO_GSI_DB_n);
  101. IPADBG("exit\n");
  102. return 0;
  103. }
  104. static int ipa3_wigig_tx_bit_to_ep(
  105. const u8 tx_bit_num,
  106. enum ipa_client_type *type)
  107. {
  108. IPADBG("tx_bit_num %d\n", tx_bit_num);
  109. switch (tx_bit_num) {
  110. case 2:
  111. *type = IPA_CLIENT_WIGIG1_CONS;
  112. break;
  113. case 3:
  114. *type = IPA_CLIENT_WIGIG2_CONS;
  115. break;
  116. case 4:
  117. *type = IPA_CLIENT_WIGIG3_CONS;
  118. break;
  119. case 5:
  120. *type = IPA_CLIENT_WIGIG4_CONS;
  121. break;
  122. default:
  123. IPAERR("invalid tx_bit_num %d\n", tx_bit_num);
  124. return -EINVAL;
  125. }
  126. IPADBG("exit\n");
  127. return 0;
  128. }
  129. static int ipa3_wigig_smmu_map_buffers(bool Rx,
  130. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  131. void *buff,
  132. bool map)
  133. {
  134. int result;
  135. /* data buffers */
  136. if (Rx) {
  137. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu =
  138. (struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
  139. int num_elem =
  140. pipe_smmu->desc_ring_size /
  141. IPA_WIGIG_DESC_RING_EL_SIZE;
  142. result = ipa3_smmu_map_peer_buff(
  143. dbuff_smmu->data_buffer_base_iova,
  144. dbuff_smmu->data_buffer_size * num_elem,
  145. map,
  146. &dbuff_smmu->data_buffer_base,
  147. IPA_SMMU_CB_11AD);
  148. if (result) {
  149. IPAERR(
  150. "failed to %s rx data_buffer %d, num elem %d\n"
  151. , map ? "map" : "unmap",
  152. result, num_elem);
  153. goto fail_map_buff;
  154. }
  155. } else {
  156. int i;
  157. struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu =
  158. (struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
  159. for (i = 0; i < dbuff_smmu->num_buffers; i++) {
  160. result = ipa3_smmu_map_peer_buff(
  161. *(dbuff_smmu->data_buffer_base_iova + i),
  162. dbuff_smmu->data_buffer_size,
  163. map,
  164. (dbuff_smmu->data_buffer_base + i),
  165. IPA_SMMU_CB_11AD);
  166. if (result) {
  167. IPAERR(
  168. "%d: failed to %s tx data buffer %d\n"
  169. , i, map ? "map" : "unmap",
  170. result);
  171. for (i--; i >= 0; i--) {
  172. result = ipa3_smmu_map_peer_buff(
  173. *(dbuff_smmu->data_buffer_base_iova +
  174. i),
  175. dbuff_smmu->data_buffer_size,
  176. !map,
  177. (dbuff_smmu->data_buffer_base +
  178. i),
  179. IPA_SMMU_CB_11AD);
  180. }
  181. goto fail_map_buff;
  182. }
  183. }
  184. }
  185. IPADBG("exit\n");
  186. return 0;
  187. fail_map_buff:
  188. return result;
  189. }
  190. static int ipa3_wigig_smmu_map_reg(phys_addr_t phys_addr, bool map,
  191. enum ipa_smmu_cb_type cb_type)
  192. {
  193. struct ipa_wigig_smmu_reg_addr *entry;
  194. struct ipa_wigig_smmu_reg_addr *next;
  195. int result = 0;
  196. IPADBG("addr %pa, %s\n", &phys_addr, map ? "map" : "unmap");
  197. mutex_lock(&smmu_lock);
  198. list_for_each_entry_safe(entry, next, &smmu_reg_addr_list, link) {
  199. if ((entry->phys_addr == phys_addr) &&
  200. (entry->cb_type == cb_type)) {
  201. IPADBG("cb %d, page %pa already mapped, ", cb_type,
  202. &phys_addr);
  203. if (map) {
  204. entry->count++;
  205. IPADBG("inc to %d\n", (entry->count));
  206. } else {
  207. --entry->count;
  208. IPADBG("dec to %d\n", entry->count);
  209. if (!(entry->count)) {
  210. IPADBG("unmap and delete\n");
  211. result = ipa3_smmu_map_peer_reg(
  212. phys_addr, map, cb_type);
  213. if (result) {
  214. IPAERR("failed to unmap %pa\n",
  215. &phys_addr);
  216. goto finish;
  217. }
  218. list_del(&entry->link);
  219. kfree(entry);
  220. }
  221. }
  222. goto finish;
  223. }
  224. }
  225. IPADBG("new page found %pa, map and add to list CB %d\n", &phys_addr,
  226. cb_type);
  227. result = ipa3_smmu_map_peer_reg(phys_addr, map, cb_type);
  228. if (result) {
  229. IPAERR("failed to map %pa\n", &phys_addr);
  230. goto finish;
  231. }
  232. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  233. if (entry == NULL) {
  234. IPAERR("couldn't allocate for %pa\n", &phys_addr);
  235. ipa3_smmu_map_peer_reg(phys_addr, !map, cb_type);
  236. result = -ENOMEM;
  237. goto finish;
  238. }
  239. INIT_LIST_HEAD(&entry->link);
  240. entry->phys_addr = phys_addr;
  241. entry->cb_type = cb_type;
  242. entry->count = 1;
  243. list_add(&entry->link, &smmu_reg_addr_list);
  244. finish:
  245. mutex_unlock(&smmu_lock);
  246. IPADBG("exit\n");
  247. return result;
  248. }
  249. static int ipa3_wigig_smmu_map_ring(u64 iova, u32 size, bool map,
  250. struct sg_table *sgt, enum ipa_smmu_cb_type cb_type)
  251. {
  252. struct ipa_wigig_smmu_ring_addr *entry;
  253. struct ipa_wigig_smmu_ring_addr *next;
  254. int result = 0;
  255. IPADBG("iova %llX, %s\n", iova, map ? "map" : "unmap");
  256. mutex_lock(&smmu_lock);
  257. list_for_each_entry_safe(entry, next, &smmu_ring_addr_list, link) {
  258. if ((entry->iova == iova) &&
  259. (entry->cb_type == cb_type)) {
  260. IPADBG("cb %d, page 0x%llX already mapped, ", cb_type,
  261. iova);
  262. if (map) {
  263. entry->count++;
  264. IPADBG("inc to %d\n", (entry->count));
  265. } else {
  266. --entry->count;
  267. IPADBG("dec to %d\n", entry->count);
  268. if (!(entry->count)) {
  269. IPADBG("unmap and delete\n");
  270. result = ipa3_smmu_map_peer_buff(
  271. iova, size, map, sgt, cb_type);
  272. if (result) {
  273. IPAERR(
  274. "failed to unmap 0x%llX\n",
  275. iova);
  276. goto finish;
  277. }
  278. list_del(&entry->link);
  279. kfree(entry);
  280. }
  281. }
  282. goto finish;
  283. }
  284. }
  285. IPADBG("new page found 0x%llX, map and add to list\n", iova);
  286. result = ipa3_smmu_map_peer_buff(iova, size, map, sgt, cb_type);
  287. if (result) {
  288. IPAERR("failed to map 0x%llX\n", iova);
  289. goto finish;
  290. }
  291. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  292. if (entry == NULL) {
  293. IPAERR("couldn't allocate for 0x%llX\n", iova);
  294. ipa3_smmu_map_peer_buff(iova, size, !map, sgt, cb_type);
  295. result = -ENOMEM;
  296. goto finish;
  297. }
  298. INIT_LIST_HEAD(&entry->link);
  299. entry->iova = iova;
  300. entry->cb_type = cb_type;
  301. entry->count = 1;
  302. list_add(&entry->link, &smmu_ring_addr_list);
  303. finish:
  304. mutex_unlock(&smmu_lock);
  305. IPADBG("exit\n");
  306. return result;
  307. }
  308. static int ipa3_wigig_smmu_map_channel(bool Rx,
  309. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  310. void *buff,
  311. bool map)
  312. {
  313. int result = 0;
  314. struct ipa_smmu_cb_ctx *smmu_ctx = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
  315. IPADBG("\n");
  316. /*
  317. * --------------------------------------------------------------------
  318. * entity |HWHEAD|HWTAIL|HWHEAD|HWTAIL| misc | buffers| rings|
  319. * |Sring |Sring |Dring |Dring | regs | | |
  320. * --------------------------------------------------------------------
  321. * GSI (apps CB) | TX |RX, TX| |RX, TX| | |Rx, TX|
  322. * --------------------------------------------------------------------
  323. * IPA (11AD CB) | | | | | | RX, TX | |
  324. * --------------------------------------------------------------------
  325. * uc (uC CB) | RX | | TX | |always| | |
  326. * --------------------------------------------------------------------
  327. *
  328. * buffers are mapped to 11AD CB. in case this context bank is shared,
  329. * mapping is done by 11ad driver only and applies to both 11ad and
  330. * IPA HWs (page tables are shared). Otherwise, mapping is done here.
  331. */
  332. if (!smmu_ctx) {
  333. IPAERR("11AD SMMU ctx is null\n");
  334. return -EINVAL;
  335. }
  336. if (Rx) {
  337. IPADBG("RX %s status_ring_HWHEAD_pa %pa uC CB\n",
  338. map ? "map" : "unmap",
  339. &pipe_smmu->status_ring_HWHEAD_pa);
  340. result = ipa3_wigig_smmu_map_reg(
  341. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  342. map,
  343. IPA_SMMU_CB_UC);
  344. if (result) {
  345. IPAERR(
  346. "failed to %s status_ring_HWAHEAD %d\n",
  347. map ? "map" : "unmap",
  348. result);
  349. goto fail;
  350. }
  351. } else {
  352. IPADBG("TX %s status_ring_HWHEAD_pa %pa AP CB\n",
  353. map ? "map" : "unmap",
  354. &pipe_smmu->status_ring_HWHEAD_pa);
  355. result = ipa3_wigig_smmu_map_reg(
  356. rounddown(pipe_smmu->status_ring_HWHEAD_pa,
  357. PAGE_SIZE),
  358. map,
  359. IPA_SMMU_CB_AP);
  360. if (result) {
  361. IPAERR(
  362. "failed to %s status_ring_HWAHEAD %d\n",
  363. map ? "map" : "unmap",
  364. result);
  365. goto fail;
  366. }
  367. IPADBG("TX %s desc_ring_HWHEAD_pa %pa uC CB\n",
  368. map ? "map" : "unmap",
  369. &pipe_smmu->desc_ring_HWHEAD_pa);
  370. result = ipa3_wigig_smmu_map_reg(
  371. rounddown(pipe_smmu->desc_ring_HWHEAD_pa,
  372. PAGE_SIZE),
  373. map,
  374. IPA_SMMU_CB_UC);
  375. if (result) {
  376. IPAERR("failed to %s desc_ring_HWHEAD %d\n",
  377. map ? "map" : "unmap",
  378. result);
  379. goto fail_desc_HWHEAD;
  380. }
  381. }
  382. IPADBG("%s status_ring_HWTAIL_pa %pa AP CB\n",
  383. map ? "map" : "unmap",
  384. &pipe_smmu->status_ring_HWTAIL_pa);
  385. result = ipa3_wigig_smmu_map_reg(
  386. rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
  387. map,
  388. IPA_SMMU_CB_AP);
  389. if (result) {
  390. IPAERR(
  391. "failed to %s status_ring_HWTAIL %d\n",
  392. map ? "map" : "unmap",
  393. result);
  394. goto fail_status_HWTAIL;
  395. }
  396. IPADBG("%s desc_ring_HWTAIL_pa %pa AP CB\n",
  397. map ? "map" : "unmap",
  398. &pipe_smmu->desc_ring_HWTAIL_pa);
  399. result = ipa3_wigig_smmu_map_reg(
  400. rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
  401. map,
  402. IPA_SMMU_CB_AP);
  403. if (result) {
  404. IPAERR("failed to %s desc_ring_HWTAIL %d\n",
  405. map ? "map" : "unmap",
  406. result);
  407. goto fail_desc_HWTAIL;
  408. }
  409. /* rings */
  410. IPADBG("%s desc_ring_base_iova %llX AP CB\n",
  411. map ? "map" : "unmap",
  412. pipe_smmu->desc_ring_base_iova);
  413. result = ipa3_wigig_smmu_map_ring(
  414. pipe_smmu->desc_ring_base_iova,
  415. pipe_smmu->desc_ring_size,
  416. map,
  417. &pipe_smmu->desc_ring_base,
  418. IPA_SMMU_CB_AP);
  419. if (result) {
  420. IPAERR("failed to %s desc_ring_base %d\n",
  421. map ? "map" : "unmap",
  422. result);
  423. goto fail_desc_ring;
  424. }
  425. IPADBG("%s status_ring_base_iova %llX AP CB\n",
  426. map ? "map" : "unmap",
  427. pipe_smmu->status_ring_base_iova);
  428. result = ipa3_wigig_smmu_map_ring(
  429. pipe_smmu->status_ring_base_iova,
  430. pipe_smmu->status_ring_size,
  431. map,
  432. &pipe_smmu->status_ring_base,
  433. IPA_SMMU_CB_AP);
  434. if (result) {
  435. IPAERR("failed to %s status_ring_base %d\n",
  436. map ? "map" : "unmap",
  437. result);
  438. goto fail_status_ring;
  439. }
  440. if (!smmu_ctx->shared) {
  441. IPADBG("CB not shared - map buffers\n");
  442. result = ipa3_wigig_smmu_map_buffers(Rx, pipe_smmu, buff, map);
  443. if (result) {
  444. IPAERR("failed to %s buffers %d\n",
  445. map ? "map" : "unmap",
  446. result);
  447. goto fail_buffers;
  448. }
  449. }
  450. IPADBG("exit\n");
  451. return 0;
  452. fail_buffers:
  453. ipa3_wigig_smmu_map_ring(
  454. pipe_smmu->status_ring_base_iova, pipe_smmu->status_ring_size,
  455. !map, &pipe_smmu->status_ring_base, IPA_SMMU_CB_AP);
  456. fail_status_ring:
  457. ipa3_wigig_smmu_map_ring(
  458. pipe_smmu->desc_ring_base_iova, pipe_smmu->desc_ring_size,
  459. !map, &pipe_smmu->desc_ring_base, IPA_SMMU_CB_AP);
  460. fail_desc_ring:
  461. ipa3_wigig_smmu_map_reg(
  462. rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
  463. !map, IPA_SMMU_CB_AP);
  464. fail_desc_HWTAIL:
  465. ipa3_wigig_smmu_map_reg(
  466. rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
  467. !map, IPA_SMMU_CB_AP);
  468. fail_status_HWTAIL:
  469. if (Rx)
  470. ipa3_wigig_smmu_map_reg(
  471. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  472. !map, IPA_SMMU_CB_UC);
  473. else
  474. ipa3_wigig_smmu_map_reg(
  475. rounddown(pipe_smmu->desc_ring_HWHEAD_pa, PAGE_SIZE),
  476. !map, IPA_SMMU_CB_UC);
  477. fail_desc_HWHEAD:
  478. if (!Rx)
  479. ipa3_wigig_smmu_map_reg(
  480. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  481. !map, IPA_SMMU_CB_AP);
  482. fail:
  483. return result;
  484. }
  485. static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
  486. {
  487. switch (notify->evt_id) {
  488. case GSI_CHAN_INVALID_TRE_ERR:
  489. IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
  490. break;
  491. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  492. IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  493. break;
  494. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  495. IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  496. break;
  497. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  498. IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  499. break;
  500. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  501. IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  502. break;
  503. case GSI_CHAN_HWO_1_ERR:
  504. IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
  505. break;
  506. default:
  507. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  508. }
  509. ipa_assert();
  510. }
  511. static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
  512. {
  513. switch (notify->evt_id) {
  514. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  515. IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  516. break;
  517. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  518. IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  519. break;
  520. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  521. IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  522. break;
  523. case GSI_EVT_EVT_RING_EMPTY_ERR:
  524. IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
  525. break;
  526. default:
  527. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  528. }
  529. ipa_assert();
  530. }
  531. static uint16_t int_modt = 15;
  532. static uint8_t int_modc = 200;
  533. static uint8_t tx_hwtail_mod_threshold = 200;
  534. static uint8_t rx_hwtail_mod_threshold = 200;
  535. static int ipa3_wigig_config_gsi(bool Rx,
  536. bool smmu_en,
  537. void *pipe_info,
  538. void *buff,
  539. const struct ipa_gsi_ep_config *ep_gsi,
  540. struct ipa3_ep_context *ep)
  541. {
  542. struct gsi_evt_ring_props evt_props;
  543. struct gsi_chan_props channel_props;
  544. union __packed gsi_channel_scratch gsi_scratch;
  545. int gsi_res;
  546. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
  547. struct ipa_wigig_pipe_setup_info *pipe;
  548. struct ipa_wigig_rx_pipe_data_buffer_info *rx_dbuff;
  549. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *rx_dbuff_smmu;
  550. struct ipa_wigig_tx_pipe_data_buffer_info *tx_dbuff;
  551. struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_dbuff_smmu;
  552. IPADBG("%s, %s\n", Rx ? "Rx" : "Tx", smmu_en ? "smmu en" : "smmu dis");
  553. /* alloc event ring */
  554. memset(&evt_props, 0, sizeof(evt_props));
  555. evt_props.intf = GSI_EVT_CHTYPE_11AD_EV;
  556. evt_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  557. evt_props.intr = GSI_INTR_MSI;
  558. evt_props.intvec = 0;
  559. evt_props.exclusive = true;
  560. evt_props.err_cb = ipa_gsi_evt_ring_err_cb;
  561. evt_props.user_data = NULL;
  562. evt_props.int_modc = int_modc;
  563. evt_props.int_modt = int_modt;
  564. evt_props.ring_base_vaddr = NULL;
  565. if (smmu_en) {
  566. pipe_smmu = (struct ipa_wigig_pipe_setup_info_smmu *)pipe_info;
  567. evt_props.ring_base_addr =
  568. pipe_smmu->desc_ring_base_iova;
  569. evt_props.ring_len = pipe_smmu->desc_ring_size;
  570. evt_props.msi_addr = pipe_smmu->desc_ring_HWTAIL_pa;
  571. } else {
  572. pipe = (struct ipa_wigig_pipe_setup_info *)pipe_info;
  573. evt_props.ring_base_addr = pipe->desc_ring_base_pa;
  574. evt_props.ring_len = pipe->desc_ring_size;
  575. evt_props.msi_addr = pipe->desc_ring_HWTAIL_pa;
  576. }
  577. gsi_res = gsi_alloc_evt_ring(&evt_props,
  578. ipa3_ctx->gsi_dev_hdl,
  579. &ep->gsi_evt_ring_hdl);
  580. if (gsi_res != GSI_STATUS_SUCCESS) {
  581. IPAERR("Error allocating event ring: %d\n", gsi_res);
  582. return -EFAULT;
  583. }
  584. /* event scratch not configured by SW for TX channels */
  585. if (Rx) {
  586. union __packed gsi_evt_scratch evt_scratch;
  587. memset(&evt_scratch, 0, sizeof(evt_scratch));
  588. evt_scratch.w11ad.update_status_hwtail_mod_threshold =
  589. rx_hwtail_mod_threshold;
  590. gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
  591. evt_scratch);
  592. if (gsi_res != GSI_STATUS_SUCCESS) {
  593. IPAERR("Error writing WIGIG event ring scratch: %d\n",
  594. gsi_res);
  595. goto fail_write_evt_scratch;
  596. }
  597. }
  598. ep->gsi_mem_info.evt_ring_len = evt_props.ring_len;
  599. ep->gsi_mem_info.evt_ring_base_addr = evt_props.ring_base_addr;
  600. ep->gsi_mem_info.evt_ring_base_vaddr = evt_props.ring_base_vaddr;
  601. /* alloc channel ring */
  602. memset(&channel_props, 0, sizeof(channel_props));
  603. memset(&gsi_scratch, 0, sizeof(gsi_scratch));
  604. if (Rx)
  605. channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  606. else
  607. channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
  608. channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  609. channel_props.prot = GSI_CHAN_PROT_11AD;
  610. channel_props.ch_id = ep_gsi->ipa_gsi_chan_num;
  611. channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
  612. channel_props.xfer_cb = NULL;
  613. channel_props.db_in_bytes = 0;
  614. channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  615. channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  616. channel_props.prefetch_mode = ep_gsi->prefetch_mode;
  617. channel_props.empty_lvl_threshold = ep_gsi->prefetch_threshold;
  618. channel_props.low_weight = 1;
  619. channel_props.err_cb = ipa_gsi_chan_err_cb;
  620. channel_props.ring_base_vaddr = NULL;
  621. if (Rx) {
  622. if (smmu_en) {
  623. rx_dbuff_smmu =
  624. (struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
  625. channel_props.ring_base_addr =
  626. pipe_smmu->status_ring_base_iova;
  627. channel_props.ring_len =
  628. pipe_smmu->status_ring_size;
  629. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
  630. IPA_WIGIG_LSB(
  631. pipe_smmu->status_ring_HWTAIL_pa);
  632. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
  633. IPA_WIGIG_MSB(
  634. pipe_smmu->status_ring_HWTAIL_pa);
  635. gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
  636. IPA_WIGIG_LSB(
  637. rx_dbuff_smmu->data_buffer_base_iova);
  638. gsi_scratch.rx_11ad.data_buffers_base_address_msb =
  639. IPA_WIGIG_MSB(
  640. rx_dbuff_smmu->data_buffer_base_iova);
  641. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
  642. ilog2(rx_dbuff_smmu->data_buffer_size);
  643. } else {
  644. rx_dbuff =
  645. (struct ipa_wigig_rx_pipe_data_buffer_info *)buff;
  646. channel_props.ring_base_addr =
  647. pipe->status_ring_base_pa;
  648. channel_props.ring_len = pipe->status_ring_size;
  649. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
  650. IPA_WIGIG_LSB(pipe->status_ring_HWTAIL_pa);
  651. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
  652. IPA_WIGIG_MSB(pipe->status_ring_HWTAIL_pa);
  653. gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
  654. IPA_WIGIG_LSB(rx_dbuff->data_buffer_base_pa);
  655. gsi_scratch.rx_11ad.data_buffers_base_address_msb =
  656. IPA_WIGIG_MSB(rx_dbuff->data_buffer_base_pa);
  657. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
  658. ilog2(rx_dbuff->data_buffer_size);
  659. }
  660. IPADBG("rx scratch: status_ring_hwtail_address_lsb 0x%X\n",
  661. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb);
  662. IPADBG("rx scratch: status_ring_hwtail_address_msb 0x%X\n",
  663. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb);
  664. IPADBG("rx scratch: data_buffers_base_address_lsb 0x%X\n",
  665. gsi_scratch.rx_11ad.data_buffers_base_address_lsb);
  666. IPADBG("rx scratch: data_buffers_base_address_msb 0x%X\n",
  667. gsi_scratch.rx_11ad.data_buffers_base_address_msb);
  668. IPADBG("rx scratch: fixed_data_buffer_size_pow_2 %d\n",
  669. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2);
  670. IPADBG("rx scratch 0x[%X][%X][%X][%X]\n",
  671. gsi_scratch.data.word1,
  672. gsi_scratch.data.word2,
  673. gsi_scratch.data.word3,
  674. gsi_scratch.data.word4);
  675. } else {
  676. if (smmu_en) {
  677. tx_dbuff_smmu =
  678. (struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
  679. channel_props.ring_base_addr =
  680. pipe_smmu->desc_ring_base_iova;
  681. channel_props.ring_len =
  682. pipe_smmu->desc_ring_size;
  683. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
  684. IPA_WIGIG_LSB(
  685. pipe_smmu->status_ring_HWTAIL_pa);
  686. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb =
  687. IPA_WIGIG_LSB(
  688. pipe_smmu->status_ring_HWHEAD_pa);
  689. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb =
  690. IPA_WIGIG_8_MSB(
  691. pipe_smmu->status_ring_HWHEAD_pa);
  692. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
  693. ilog2(tx_dbuff_smmu->data_buffer_size);
  694. gsi_scratch.tx_11ad.status_ring_num_elem =
  695. pipe_smmu->status_ring_size /
  696. IPA_WIGIG_STATUS_RING_EL_SIZE;
  697. } else {
  698. tx_dbuff =
  699. (struct ipa_wigig_tx_pipe_data_buffer_info *)buff;
  700. channel_props.ring_base_addr = pipe->desc_ring_base_pa;
  701. channel_props.ring_len = pipe->desc_ring_size;
  702. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
  703. IPA_WIGIG_LSB(
  704. pipe->status_ring_HWTAIL_pa);
  705. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb =
  706. IPA_WIGIG_LSB(
  707. pipe->status_ring_HWHEAD_pa);
  708. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb =
  709. IPA_WIGIG_8_MSB(pipe->status_ring_HWHEAD_pa);
  710. gsi_scratch.tx_11ad.status_ring_num_elem =
  711. pipe->status_ring_size /
  712. IPA_WIGIG_STATUS_RING_EL_SIZE;
  713. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
  714. ilog2(tx_dbuff->data_buffer_size);
  715. }
  716. gsi_scratch.tx_11ad.update_status_hwtail_mod_threshold =
  717. tx_hwtail_mod_threshold;
  718. IPADBG("tx scratch: status_ring_hwtail_address_lsb 0x%X\n",
  719. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb);
  720. IPADBG("tx scratch: status_ring_hwhead_address_lsb 0x%X\n",
  721. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb);
  722. IPADBG("tx scratch: status_ring_hwhead_hwtail_8_msb 0x%X\n",
  723. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb);
  724. IPADBG("tx scratch:status_ring_num_elem %d\n",
  725. gsi_scratch.tx_11ad.status_ring_num_elem);
  726. IPADBG("tx scratch:fixed_data_buffer_size_pow_2 %d\n",
  727. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2);
  728. IPADBG("tx scratch 0x[%X][%X][%X][%X]\n",
  729. gsi_scratch.data.word1,
  730. gsi_scratch.data.word2,
  731. gsi_scratch.data.word3,
  732. gsi_scratch.data.word4);
  733. }
  734. IPADBG("ch_id: %d\n", channel_props.ch_id);
  735. IPADBG("evt_ring_hdl: %ld\n", channel_props.evt_ring_hdl);
  736. IPADBG("re_size: %d\n", channel_props.re_size);
  737. IPADBG("GSI channel ring len: %d\n", channel_props.ring_len);
  738. IPADBG("channel ring base addr = 0x%llX\n",
  739. (unsigned long long)channel_props.ring_base_addr);
  740. IPADBG("Allocating GSI channel\n");
  741. gsi_res = gsi_alloc_channel(&channel_props,
  742. ipa3_ctx->gsi_dev_hdl,
  743. &ep->gsi_chan_hdl);
  744. if (gsi_res != GSI_STATUS_SUCCESS) {
  745. IPAERR("gsi_alloc_channel failed %d\n", gsi_res);
  746. goto fail_alloc_channel;
  747. }
  748. IPADBG("Writing Channel scratch\n");
  749. ep->gsi_mem_info.chan_ring_len = channel_props.ring_len;
  750. ep->gsi_mem_info.chan_ring_base_addr = channel_props.ring_base_addr;
  751. ep->gsi_mem_info.chan_ring_base_vaddr =
  752. channel_props.ring_base_vaddr;
  753. gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
  754. gsi_scratch);
  755. if (gsi_res != GSI_STATUS_SUCCESS) {
  756. IPAERR("gsi_write_channel_scratch failed %d\n",
  757. gsi_res);
  758. goto fail_write_channel_scratch;
  759. }
  760. IPADBG("exit\n");
  761. return 0;
  762. fail_write_channel_scratch:
  763. gsi_dealloc_channel(ep->gsi_chan_hdl);
  764. fail_alloc_channel:
  765. fail_write_evt_scratch:
  766. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  767. return -EFAULT;
  768. }
  769. static int ipa3_wigig_config_uc(bool init,
  770. bool Rx,
  771. u8 wifi_ch,
  772. u8 gsi_ch,
  773. phys_addr_t HWHEAD)
  774. {
  775. struct ipa_mem_buffer cmd;
  776. enum ipa_cpu_2_hw_offload_commands command;
  777. int result;
  778. IPADBG("%s\n", init ? "init" : "Deinit");
  779. if (init) {
  780. struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data;
  781. cmd.size = sizeof(*cmd_data);
  782. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  783. &cmd.phys_base, GFP_KERNEL);
  784. if (cmd.base == NULL) {
  785. IPAERR("fail to get DMA memory.\n");
  786. return -ENOMEM;
  787. }
  788. cmd_data =
  789. (struct IpaHwOffloadSetUpCmdData_t_v4_0 *)cmd.base;
  790. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  791. cmd_data->SetupCh_params.W11AdSetupCh_params.dir =
  792. Rx ? W11AD_RX : W11AD_TX;
  793. cmd_data->SetupCh_params.W11AdSetupCh_params.gsi_ch = gsi_ch;
  794. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_ch = wifi_ch;
  795. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_msb =
  796. IPA_WIGIG_MSB(HWHEAD);
  797. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_lsb =
  798. IPA_WIGIG_LSB(HWHEAD);
  799. command = IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP;
  800. } else {
  801. struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data;
  802. cmd.size = sizeof(*cmd_data);
  803. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  804. &cmd.phys_base, GFP_KERNEL);
  805. if (cmd.base == NULL) {
  806. IPAERR("fail to get DMA memory.\n");
  807. return -ENOMEM;
  808. }
  809. cmd_data =
  810. (struct IpaHwOffloadCommonChCmdData_t_v4_0 *)cmd.base;
  811. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  812. cmd_data->CommonCh_params.W11AdCommonCh_params.gsi_ch = gsi_ch;
  813. command = IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN;
  814. }
  815. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  816. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  817. command,
  818. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  819. false, 10 * HZ);
  820. if (result) {
  821. IPAERR("fail to %s uc for %s gsi channel %d\n",
  822. init ? "init" : "deinit",
  823. Rx ? "Rx" : "Tx", gsi_ch);
  824. }
  825. dma_free_coherent(ipa3_ctx->uc_pdev,
  826. cmd.size, cmd.base, cmd.phys_base);
  827. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  828. IPADBG("exit\n");
  829. return result;
  830. }
  831. int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
  832. struct dentry **parent)
  833. {
  834. int ipa_ep_idx;
  835. struct ipa3_ep_context *ep;
  836. struct ipa_ep_cfg ep_cfg;
  837. enum ipa_client_type rx_client = IPA_CLIENT_WIGIG_PROD;
  838. bool is_smmu_enabled;
  839. struct ipa_wigig_conn_rx_in_params_smmu *input_smmu = NULL;
  840. struct ipa_wigig_conn_rx_in_params *input = NULL;
  841. const struct ipa_gsi_ep_config *ep_gsi;
  842. void *pipe_info;
  843. void *buff;
  844. phys_addr_t status_ring_HWHEAD_pa;
  845. int result;
  846. IPADBG("\n");
  847. *parent = wigig_dent;
  848. ipa_ep_idx = ipa_get_ep_mapping(rx_client);
  849. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  850. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  851. IPAERR("fail to get ep (IPA_CLIENT_WIGIG_PROD) %d.\n",
  852. ipa_ep_idx);
  853. return -EFAULT;
  854. }
  855. ep = &ipa3_ctx->ep[ipa_ep_idx];
  856. if (ep->valid) {
  857. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  858. return -EFAULT;
  859. }
  860. if (ep->gsi_offload_state) {
  861. IPAERR("WIGIG channel bad state 0x%X\n",
  862. ep->gsi_offload_state);
  863. return -EFAULT;
  864. }
  865. ep_gsi = ipa3_get_gsi_ep_info(rx_client);
  866. if (!ep_gsi) {
  867. IPAERR("Failed getting GSI EP info for client=%d\n",
  868. rx_client);
  869. return -EPERM;
  870. }
  871. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  872. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  873. /* setup rx ep cfg */
  874. ep->valid = 1;
  875. ep->client = rx_client;
  876. result = ipa3_disable_data_path(ipa_ep_idx);
  877. if (result) {
  878. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  879. ipa_ep_idx);
  880. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  881. return -EFAULT;
  882. }
  883. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  884. if (is_smmu_enabled) {
  885. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu;
  886. input_smmu = (struct ipa_wigig_conn_rx_in_params_smmu *)in;
  887. dbuff_smmu = &input_smmu->dbuff_smmu;
  888. ep->client_notify = input_smmu->notify;
  889. ep->priv = input_smmu->priv;
  890. IPADBG(
  891. "desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
  892. (unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
  893. input_smmu->pipe_smmu.desc_ring_size,
  894. (unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
  895. input_smmu->pipe_smmu.status_ring_size);
  896. IPADBG("data_buffer_base_iova 0x%llX data_buffer_size %d",
  897. (unsigned long long)dbuff_smmu->data_buffer_base_iova,
  898. input_smmu->dbuff_smmu.data_buffer_size);
  899. if (IPA_WIGIG_MSB(
  900. dbuff_smmu->data_buffer_base_iova) &
  901. 0xFFFFFF00) {
  902. IPAERR(
  903. "data_buffers_base_address_msb is over the 8 bit limit (0x%llX)\n",
  904. (unsigned long long)dbuff_smmu->data_buffer_base_iova);
  905. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  906. return -EFAULT;
  907. }
  908. if (dbuff_smmu->data_buffer_size >> 16) {
  909. IPAERR(
  910. "data_buffer_size is over the 16 bit limit (%d)\n"
  911. , dbuff_smmu->data_buffer_size);
  912. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  913. return -EFAULT;
  914. }
  915. } else {
  916. input = (struct ipa_wigig_conn_rx_in_params *)in;
  917. ep->client_notify = input->notify;
  918. ep->priv = input->priv;
  919. IPADBG(
  920. "desc_ring_base_pa %pa desc_ring_size %d status_ring_base_pa %pa status_ring_size %d",
  921. &input->pipe.desc_ring_base_pa,
  922. input->pipe.desc_ring_size,
  923. &input->pipe.status_ring_base_pa,
  924. input->pipe.status_ring_size);
  925. IPADBG("data_buffer_base_pa %pa data_buffer_size %d",
  926. &input->dbuff.data_buffer_base_pa,
  927. input->dbuff.data_buffer_size);
  928. if (
  929. IPA_WIGIG_MSB(input->dbuff.data_buffer_base_pa) & 0xFFFFFF00) {
  930. IPAERR(
  931. "data_buffers_base_address_msb is over the 8 bit limit (0x%pa)\n"
  932. , &input->dbuff.data_buffer_base_pa);
  933. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  934. return -EFAULT;
  935. }
  936. if (input->dbuff.data_buffer_size >> 16) {
  937. IPAERR(
  938. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  939. , input->dbuff.data_buffer_size);
  940. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  941. return -EFAULT;
  942. }
  943. }
  944. memset(&ep_cfg, 0, sizeof(ep_cfg));
  945. ep_cfg.nat.nat_en = IPA_SRC_NAT;
  946. ep_cfg.hdr.hdr_len = ETH_HLEN;
  947. ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
  948. ep_cfg.hdr.hdr_ofst_pkt_size = 0;
  949. ep_cfg.hdr.hdr_additional_const_len = 0;
  950. ep_cfg.hdr_ext.hdr_little_endian = true;
  951. ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
  952. ep_cfg.hdr.hdr_metadata_reg_valid = 1;
  953. ep_cfg.mode.mode = IPA_BASIC;
  954. if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
  955. IPAERR("fail to setup rx pipe cfg\n");
  956. result = -EFAULT;
  957. goto fail;
  958. }
  959. if (is_smmu_enabled) {
  960. result = ipa3_wigig_smmu_map_channel(true,
  961. &input_smmu->pipe_smmu,
  962. &input_smmu->dbuff_smmu,
  963. true);
  964. if (result) {
  965. IPAERR("failed to setup rx pipe smmu map\n");
  966. result = -EFAULT;
  967. goto fail;
  968. }
  969. pipe_info = &input_smmu->pipe_smmu;
  970. buff = &input_smmu->dbuff_smmu;
  971. status_ring_HWHEAD_pa =
  972. input_smmu->pipe_smmu.status_ring_HWHEAD_pa;
  973. } else {
  974. pipe_info = &input->pipe;
  975. buff = &input->dbuff;
  976. status_ring_HWHEAD_pa =
  977. input->pipe.status_ring_HWHEAD_pa;
  978. }
  979. result = ipa3_wigig_config_gsi(true,
  980. is_smmu_enabled,
  981. pipe_info,
  982. buff,
  983. ep_gsi, ep);
  984. if (result)
  985. goto fail_gsi;
  986. result = ipa3_wigig_config_uc(
  987. true, true, 0,
  988. ep_gsi->ipa_gsi_chan_num,
  989. status_ring_HWHEAD_pa);
  990. if (result)
  991. goto fail_uc_config;
  992. ipa3_install_dflt_flt_rules(ipa_ep_idx);
  993. out->client = IPA_CLIENT_WIGIG_PROD;
  994. ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
  995. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  996. IPADBG("wigig rx pipe connected successfully\n");
  997. IPADBG("exit\n");
  998. return 0;
  999. fail_uc_config:
  1000. /* Release channel and evt*/
  1001. ipa3_release_gsi_channel(ipa_ep_idx);
  1002. fail_gsi:
  1003. if (input_smmu)
  1004. ipa3_wigig_smmu_map_channel(true, &input_smmu->pipe_smmu,
  1005. &input_smmu->dbuff_smmu, false);
  1006. fail:
  1007. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1008. return result;
  1009. }
  1010. int ipa3_conn_wigig_client_i(void *in,
  1011. struct ipa_wigig_conn_out_params *out,
  1012. ipa_notify_cb tx_notify,
  1013. void *priv)
  1014. {
  1015. int ipa_ep_idx;
  1016. struct ipa3_ep_context *ep;
  1017. struct ipa_ep_cfg ep_cfg;
  1018. enum ipa_client_type tx_client;
  1019. bool is_smmu_enabled;
  1020. struct ipa_wigig_conn_tx_in_params_smmu *input_smmu = NULL;
  1021. struct ipa_wigig_conn_tx_in_params *input = NULL;
  1022. const struct ipa_gsi_ep_config *ep_gsi;
  1023. u32 aggr_byte_limit;
  1024. int result;
  1025. void *pipe_info;
  1026. void *buff;
  1027. phys_addr_t desc_ring_HWHEAD_pa;
  1028. u8 wifi_ch;
  1029. IPADBG("\n");
  1030. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  1031. if (is_smmu_enabled) {
  1032. input_smmu = (struct ipa_wigig_conn_tx_in_params_smmu *)in;
  1033. IPADBG(
  1034. "desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
  1035. (unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
  1036. input_smmu->pipe_smmu.desc_ring_size,
  1037. (unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
  1038. input_smmu->pipe_smmu.status_ring_size);
  1039. IPADBG("num buffers %d, data buffer size %d\n",
  1040. input_smmu->dbuff_smmu.num_buffers,
  1041. input_smmu->dbuff_smmu.data_buffer_size);
  1042. if (ipa3_wigig_tx_bit_to_ep(input_smmu->int_gen_tx_bit_num,
  1043. &tx_client)) {
  1044. return -EINVAL;
  1045. }
  1046. if (input_smmu->dbuff_smmu.data_buffer_size >> 16) {
  1047. IPAERR(
  1048. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  1049. , input_smmu->dbuff_smmu.data_buffer_size);
  1050. return -EFAULT;
  1051. }
  1052. if (IPA_WIGIG_8_MSB(
  1053. input_smmu->pipe_smmu.status_ring_HWHEAD_pa)
  1054. != IPA_WIGIG_8_MSB(
  1055. input_smmu->pipe_smmu.status_ring_HWTAIL_pa)) {
  1056. IPAERR(
  1057. "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
  1058. , input_smmu->pipe_smmu.status_ring_HWHEAD_pa,
  1059. input_smmu->pipe_smmu.status_ring_HWTAIL_pa);
  1060. return -EFAULT;
  1061. }
  1062. wifi_ch = input_smmu->int_gen_tx_bit_num;
  1063. /* convert to kBytes */
  1064. aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
  1065. input_smmu->dbuff_smmu.data_buffer_size);
  1066. } else {
  1067. input = (struct ipa_wigig_conn_tx_in_params *)in;
  1068. IPADBG(
  1069. "desc_ring_base_pa %pa desc_ring_size %d status_ring_base_pa %pa status_ring_size %d",
  1070. &input->pipe.desc_ring_base_pa,
  1071. input->pipe.desc_ring_size,
  1072. &input->pipe.status_ring_base_pa,
  1073. input->pipe.status_ring_size);
  1074. IPADBG("data_buffer_size %d", input->dbuff.data_buffer_size);
  1075. if (ipa3_wigig_tx_bit_to_ep(input->int_gen_tx_bit_num,
  1076. &tx_client)) {
  1077. return -EINVAL;
  1078. }
  1079. if (input->dbuff.data_buffer_size >> 16) {
  1080. IPAERR(
  1081. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  1082. , input->dbuff.data_buffer_size);
  1083. return -EFAULT;
  1084. }
  1085. if (IPA_WIGIG_8_MSB(
  1086. input->pipe.status_ring_HWHEAD_pa)
  1087. != IPA_WIGIG_8_MSB(
  1088. input->pipe.status_ring_HWTAIL_pa)) {
  1089. IPAERR(
  1090. "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
  1091. , input->pipe.status_ring_HWHEAD_pa,
  1092. input->pipe.status_ring_HWTAIL_pa);
  1093. return -EFAULT;
  1094. }
  1095. wifi_ch = input->int_gen_tx_bit_num;
  1096. /* convert to kBytes */
  1097. aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
  1098. input->dbuff.data_buffer_size);
  1099. }
  1100. IPADBG("client type is %d\n", tx_client);
  1101. ipa_ep_idx = ipa_get_ep_mapping(tx_client);
  1102. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1103. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1104. IPAERR("fail to get ep (%d) %d.\n",
  1105. tx_client, ipa_ep_idx);
  1106. return -EFAULT;
  1107. }
  1108. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1109. if (ep->valid) {
  1110. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  1111. return -EFAULT;
  1112. }
  1113. if (ep->gsi_offload_state) {
  1114. IPAERR("WIGIG channel bad state 0x%X\n",
  1115. ep->gsi_offload_state);
  1116. return -EFAULT;
  1117. }
  1118. ep_gsi = ipa3_get_gsi_ep_info(tx_client);
  1119. if (!ep_gsi) {
  1120. IPAERR("Failed getting GSI EP info for client=%d\n",
  1121. tx_client);
  1122. return -EFAULT;
  1123. }
  1124. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  1125. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1126. /* setup tx ep cfg */
  1127. ep->valid = 1;
  1128. ep->client = tx_client;
  1129. result = ipa3_disable_data_path(ipa_ep_idx);
  1130. if (result) {
  1131. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  1132. ipa_ep_idx);
  1133. goto fail;
  1134. }
  1135. ep->client_notify = tx_notify;
  1136. ep->priv = priv;
  1137. memset(&ep_cfg, 0, sizeof(ep_cfg));
  1138. ep_cfg.nat.nat_en = IPA_DST_NAT;
  1139. ep_cfg.hdr.hdr_len = ETH_HLEN;
  1140. ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
  1141. ep_cfg.hdr.hdr_ofst_pkt_size = 0;
  1142. ep_cfg.hdr.hdr_additional_const_len = 0;
  1143. ep_cfg.hdr_ext.hdr_little_endian = true;
  1144. ep_cfg.mode.mode = IPA_BASIC;
  1145. /* config hard byte limit, max is the buffer size (in kB)*/
  1146. ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
  1147. ep_cfg.aggr.aggr = IPA_GENERIC;
  1148. ep_cfg.aggr.aggr_pkt_limit = 1;
  1149. ep_cfg.aggr.aggr_byte_limit = aggr_byte_limit;
  1150. ep_cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
  1151. if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
  1152. IPAERR("fail to setup rx pipe cfg\n");
  1153. result = -EFAULT;
  1154. goto fail;
  1155. }
  1156. if (is_smmu_enabled) {
  1157. result = ipa3_wigig_smmu_map_channel(false,
  1158. &input_smmu->pipe_smmu,
  1159. &input_smmu->dbuff_smmu,
  1160. true);
  1161. if (result) {
  1162. IPAERR(
  1163. "failed to setup tx pipe smmu map client %d (ep %d)\n"
  1164. , tx_client, ipa_ep_idx);
  1165. result = -EFAULT;
  1166. goto fail;
  1167. }
  1168. pipe_info = &input_smmu->pipe_smmu;
  1169. buff = &input_smmu->dbuff_smmu;
  1170. desc_ring_HWHEAD_pa =
  1171. input_smmu->pipe_smmu.desc_ring_HWHEAD_pa;
  1172. } else {
  1173. pipe_info = &input->pipe;
  1174. buff = &input->dbuff;
  1175. desc_ring_HWHEAD_pa =
  1176. input->pipe.desc_ring_HWHEAD_pa;
  1177. }
  1178. result = ipa3_wigig_config_gsi(false,
  1179. is_smmu_enabled,
  1180. pipe_info,
  1181. buff,
  1182. ep_gsi, ep);
  1183. if (result)
  1184. goto fail_gsi;
  1185. result = ipa3_wigig_config_uc(
  1186. true, false, wifi_ch,
  1187. ep_gsi->ipa_gsi_chan_num,
  1188. desc_ring_HWHEAD_pa);
  1189. if (result)
  1190. goto fail_uc_config;
  1191. out->client = tx_client;
  1192. ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
  1193. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1194. IPADBG("wigig client %d (ep %d) connected successfully\n", tx_client,
  1195. ipa_ep_idx);
  1196. return 0;
  1197. fail_uc_config:
  1198. /* Release channel and evt*/
  1199. ipa3_release_gsi_channel(ipa_ep_idx);
  1200. fail_gsi:
  1201. if (input_smmu)
  1202. ipa3_wigig_smmu_map_channel(false, &input_smmu->pipe_smmu,
  1203. &input_smmu->dbuff_smmu, false);
  1204. fail:
  1205. ep->valid = 0;
  1206. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1207. return result;
  1208. }
  1209. int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
  1210. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  1211. void *dbuff)
  1212. {
  1213. bool is_smmu_enabled;
  1214. int ipa_ep_idx;
  1215. struct ipa3_ep_context *ep;
  1216. const struct ipa_gsi_ep_config *ep_gsi;
  1217. int result;
  1218. bool rx = false;
  1219. IPADBG("\n");
  1220. ipa_ep_idx = ipa_get_ep_mapping(client);
  1221. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1222. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1223. IPAERR("fail to get ep (%d) %d.\n",
  1224. client, ipa_ep_idx);
  1225. return -EFAULT;
  1226. }
  1227. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1228. if (!ep->valid) {
  1229. IPAERR("Invalid EP\n");
  1230. return -EFAULT;
  1231. }
  1232. ep_gsi = ipa3_get_gsi_ep_info(client);
  1233. if (!ep_gsi) {
  1234. IPAERR("Failed getting GSI EP info for client=%d\n",
  1235. client);
  1236. return -EFAULT;
  1237. }
  1238. if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
  1239. IPAERR("client in bad state(client %d) 0x%X\n",
  1240. client, ep->gsi_offload_state);
  1241. return -EFAULT;
  1242. }
  1243. if (client == IPA_CLIENT_WIGIG_PROD)
  1244. rx = true;
  1245. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1246. /* Release channel and evt*/
  1247. result = ipa3_release_gsi_channel(ipa_ep_idx);
  1248. if (result) {
  1249. IPAERR("failed to deallocate channel\n");
  1250. goto fail;
  1251. }
  1252. /* only gsi ch number and dir are necessary */
  1253. result = ipa3_wigig_config_uc(
  1254. false, rx, 0,
  1255. ep_gsi->ipa_gsi_chan_num, 0);
  1256. if (result) {
  1257. IPAERR("failed uC channel teardown %d\n", result);
  1258. WARN_ON(1);
  1259. }
  1260. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  1261. if (is_smmu_enabled) {
  1262. if (!pipe_smmu || !dbuff) {
  1263. IPAERR("smmu input is null %pK %pK\n",
  1264. pipe_smmu, dbuff);
  1265. WARN_ON(1);
  1266. } else {
  1267. result = ipa3_wigig_smmu_map_channel(rx,
  1268. pipe_smmu,
  1269. dbuff,
  1270. false);
  1271. if (result) {
  1272. IPAERR(
  1273. "failed to unmap pipe smmu %d (ep %d)\n"
  1274. , client, ipa_ep_idx);
  1275. result = -EFAULT;
  1276. goto fail;
  1277. }
  1278. }
  1279. if (rx) {
  1280. if (!list_empty(&smmu_reg_addr_list)) {
  1281. IPAERR("smmu_reg_addr_list not empty\n");
  1282. WARN_ON(1);
  1283. }
  1284. if (!list_empty(&smmu_ring_addr_list)) {
  1285. IPAERR("smmu_ring_addr_list not empty\n");
  1286. WARN_ON(1);
  1287. }
  1288. }
  1289. } else if (pipe_smmu || dbuff) {
  1290. IPAERR("smmu input is not null %pK %pK\n",
  1291. pipe_smmu, dbuff);
  1292. WARN_ON(1);
  1293. }
  1294. memset(ep, 0, sizeof(struct ipa3_ep_context));
  1295. ep->gsi_offload_state = 0;
  1296. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1297. IPADBG("client (ep: %d) disconnected\n", ipa_ep_idx);
  1298. IPADBG("exit\n");
  1299. return 0;
  1300. fail:
  1301. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1302. return result;
  1303. }
  1304. int ipa3_wigig_uc_msi_init(bool init,
  1305. phys_addr_t periph_baddr_pa,
  1306. phys_addr_t pseudo_cause_pa,
  1307. phys_addr_t int_gen_tx_pa,
  1308. phys_addr_t int_gen_rx_pa,
  1309. phys_addr_t dma_ep_misc_pa)
  1310. {
  1311. int result;
  1312. struct ipa_mem_buffer cmd;
  1313. enum ipa_cpu_2_hw_offload_commands command;
  1314. bool map = false;
  1315. IPADBG("params: %s, %pa, %pa, %pa, %pa, %pa\n",
  1316. init ? "init" : "deInit",
  1317. &periph_baddr_pa,
  1318. &pseudo_cause_pa,
  1319. &int_gen_tx_pa,
  1320. &int_gen_rx_pa,
  1321. &dma_ep_misc_pa);
  1322. /* first make sure registers are SMMU mapped if necessary*/
  1323. if ((!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC])) {
  1324. if (init)
  1325. map = true;
  1326. IPADBG("SMMU enabled, map %d\n", map);
  1327. result = ipa3_smmu_map_peer_reg(
  1328. rounddown(pseudo_cause_pa, PAGE_SIZE),
  1329. map,
  1330. IPA_SMMU_CB_UC);
  1331. if (result) {
  1332. IPAERR(
  1333. "failed to %s pseudo_cause reg %d\n",
  1334. map ? "map" : "unmap",
  1335. result);
  1336. goto fail;
  1337. }
  1338. result = ipa3_smmu_map_peer_reg(
  1339. rounddown(int_gen_tx_pa, PAGE_SIZE),
  1340. map,
  1341. IPA_SMMU_CB_UC);
  1342. if (result) {
  1343. IPAERR(
  1344. "failed to %s int_gen_tx reg %d\n",
  1345. map ? "map" : "unmap",
  1346. result);
  1347. goto fail_gen_tx;
  1348. }
  1349. result = ipa3_smmu_map_peer_reg(
  1350. rounddown(int_gen_rx_pa, PAGE_SIZE),
  1351. map,
  1352. IPA_SMMU_CB_UC);
  1353. if (result) {
  1354. IPAERR(
  1355. "failed to %s int_gen_rx reg %d\n",
  1356. map ? "map" : "unmap",
  1357. result);
  1358. goto fail_gen_rx;
  1359. }
  1360. result = ipa3_smmu_map_peer_reg(
  1361. rounddown(dma_ep_misc_pa, PAGE_SIZE),
  1362. map,
  1363. IPA_SMMU_CB_UC);
  1364. if (result) {
  1365. IPAERR(
  1366. "failed to %s dma_ep_misc reg %d\n",
  1367. map ? "map" : "unmap",
  1368. result);
  1369. goto fail_dma_ep_misc;
  1370. }
  1371. }
  1372. /* now send the wigig hw base address to uC*/
  1373. if (init) {
  1374. struct IpaHwPeripheralInitCmdData_t *cmd_data;
  1375. cmd.size = sizeof(*cmd_data);
  1376. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1377. &cmd.phys_base, GFP_KERNEL);
  1378. if (cmd.base == NULL) {
  1379. IPAERR("fail to get DMA memory.\n");
  1380. result = -ENOMEM;
  1381. if (map)
  1382. goto fail_alloc;
  1383. return result;
  1384. }
  1385. cmd_data = (struct IpaHwPeripheralInitCmdData_t *)cmd.base;
  1386. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  1387. cmd_data->Init_params.W11AdInit_params.periph_baddr_msb =
  1388. IPA_WIGIG_MSB(periph_baddr_pa);
  1389. cmd_data->Init_params.W11AdInit_params.periph_baddr_lsb =
  1390. IPA_WIGIG_LSB(periph_baddr_pa);
  1391. command = IPA_CPU_2_HW_CMD_PERIPHERAL_INIT;
  1392. } else {
  1393. struct IpaHwPeripheralDeinitCmdData_t *cmd_data;
  1394. cmd.size = sizeof(*cmd_data);
  1395. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1396. &cmd.phys_base, GFP_KERNEL);
  1397. if (cmd.base == NULL) {
  1398. IPAERR("fail to get DMA memory.\n");
  1399. result = -ENOMEM;
  1400. if (map)
  1401. goto fail_alloc;
  1402. return result;
  1403. }
  1404. cmd_data = (struct IpaHwPeripheralDeinitCmdData_t *)cmd.base;
  1405. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  1406. command = IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT;
  1407. }
  1408. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1409. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  1410. command,
  1411. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  1412. false, 10 * HZ);
  1413. if (result) {
  1414. IPAERR("fail to %s uc MSI config\n", init ? "init" : "deinit");
  1415. goto fail_command;
  1416. }
  1417. dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1418. cmd.base, cmd.phys_base);
  1419. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1420. IPADBG("exit\n");
  1421. return 0;
  1422. fail_command:
  1423. dma_free_coherent(ipa3_ctx->uc_pdev,
  1424. cmd.size,
  1425. cmd.base, cmd.phys_base);
  1426. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1427. fail_alloc:
  1428. ipa3_smmu_map_peer_reg(
  1429. rounddown(dma_ep_misc_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1430. fail_dma_ep_misc:
  1431. ipa3_smmu_map_peer_reg(
  1432. rounddown(int_gen_rx_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1433. fail_gen_rx:
  1434. ipa3_smmu_map_peer_reg(
  1435. rounddown(int_gen_tx_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1436. fail_gen_tx:
  1437. ipa3_smmu_map_peer_reg(
  1438. rounddown(pseudo_cause_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1439. fail:
  1440. return result;
  1441. }
  1442. int ipa3_enable_wigig_pipe_i(enum ipa_client_type client)
  1443. {
  1444. int ipa_ep_idx, res;
  1445. struct ipa3_ep_context *ep;
  1446. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1447. int retry_cnt = 0;
  1448. uint64_t val;
  1449. IPADBG("\n");
  1450. ipa_ep_idx = ipa_get_ep_mapping(client);
  1451. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1452. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1453. IPAERR("fail to get ep (%d) %d.\n",
  1454. client, ipa_ep_idx);
  1455. return -EFAULT;
  1456. }
  1457. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1458. if (!ep->valid) {
  1459. IPAERR("Invalid EP\n");
  1460. return -EFAULT;
  1461. }
  1462. if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
  1463. IPAERR("WIGIG channel bad state 0x%X\n",
  1464. ep->gsi_offload_state);
  1465. return -EFAULT;
  1466. }
  1467. IPA_ACTIVE_CLIENTS_INC_EP(client);
  1468. res = ipa3_enable_data_path(ipa_ep_idx);
  1469. if (res)
  1470. goto fail_enable_datapath;
  1471. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1472. ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
  1473. /* ring the event db (outside the ring boundary)*/
  1474. val = ep->gsi_mem_info.evt_ring_base_addr +
  1475. ep->gsi_mem_info.evt_ring_len;
  1476. res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl, val);
  1477. if (res) {
  1478. IPAERR(
  1479. "fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n"
  1480. , res, ep->gsi_evt_ring_hdl,
  1481. (unsigned long long)val);
  1482. res = -EFAULT;
  1483. goto fail_ring_evt;
  1484. }
  1485. IPADBG("start channel\n");
  1486. res = gsi_start_channel(ep->gsi_chan_hdl);
  1487. if (res != GSI_STATUS_SUCCESS) {
  1488. IPAERR("gsi_start_channel failed %d\n", res);
  1489. WARN_ON(1);
  1490. res = -EFAULT;
  1491. goto fail_gsi_start;
  1492. }
  1493. /* for TX we have to ring the channel db (last desc in the ring) */
  1494. if (client != IPA_CLIENT_WIGIG_PROD) {
  1495. uint64_t val;
  1496. val = ep->gsi_mem_info.chan_ring_base_addr +
  1497. ep->gsi_mem_info.chan_ring_len -
  1498. IPA_WIGIG_DESC_RING_EL_SIZE;
  1499. IPADBG("ring ch doorbell (0x%llX) TX %ld\n", val,
  1500. ep->gsi_chan_hdl);
  1501. res = gsi_ring_ch_ring_db(ep->gsi_chan_hdl, val);
  1502. if (res) {
  1503. IPAERR(
  1504. "fail to ring channel db %d. hdl=%lu wp=0x%llx\n"
  1505. , res, ep->gsi_chan_hdl,
  1506. (unsigned long long)val);
  1507. res = -EFAULT;
  1508. goto fail_ring_ch;
  1509. }
  1510. }
  1511. ep->gsi_offload_state |= IPA_WIGIG_ENABLED;
  1512. IPADBG("exit\n");
  1513. return 0;
  1514. fail_ring_ch:
  1515. res = ipa3_stop_gsi_channel(ipa_ep_idx);
  1516. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1517. res != -GSI_STATUS_TIMED_OUT) {
  1518. IPAERR("failed to stop channel res = %d\n", res);
  1519. } else if (res == -GSI_STATUS_AGAIN) {
  1520. IPADBG("GSI stop channel failed retry cnt = %d\n",
  1521. retry_cnt);
  1522. retry_cnt++;
  1523. if (retry_cnt < GSI_STOP_MAX_RETRY_CNT)
  1524. goto fail_ring_ch;
  1525. } else {
  1526. IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
  1527. }
  1528. res = -EFAULT;
  1529. fail_gsi_start:
  1530. fail_ring_evt:
  1531. ipa3_disable_data_path(ipa_ep_idx);
  1532. fail_enable_datapath:
  1533. IPA_ACTIVE_CLIENTS_DEC_EP(client);
  1534. return res;
  1535. }
  1536. int ipa3_disable_wigig_pipe_i(enum ipa_client_type client)
  1537. {
  1538. int ipa_ep_idx, res;
  1539. struct ipa3_ep_context *ep;
  1540. struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
  1541. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1542. bool disable_force_clear = false;
  1543. u32 source_pipe_bitmask = 0;
  1544. int retry_cnt = 0;
  1545. IPADBG("\n");
  1546. ipa_ep_idx = ipa_get_ep_mapping(client);
  1547. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1548. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1549. IPAERR("fail to get ep (%d) %d.\n",
  1550. client, ipa_ep_idx);
  1551. return -EFAULT;
  1552. }
  1553. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1554. IPAERR("ep %d out of range.\n", ipa_ep_idx);
  1555. return -EFAULT;
  1556. }
  1557. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1558. if (!ep->valid) {
  1559. IPAERR("Invalid EP\n");
  1560. return -EFAULT;
  1561. }
  1562. if (ep->gsi_offload_state !=
  1563. (IPA_WIGIG_CONNECTED | IPA_WIGIG_ENABLED)) {
  1564. IPAERR("WIGIG channel bad state 0x%X\n",
  1565. ep->gsi_offload_state);
  1566. return -EFAULT;
  1567. }
  1568. IPADBG("pipe %d\n", ipa_ep_idx);
  1569. source_pipe_bitmask = 1 << ipa_ep_idx;
  1570. res = ipa3_enable_force_clear(ipa_ep_idx,
  1571. false, source_pipe_bitmask);
  1572. if (res) {
  1573. /*
  1574. * assuming here modem SSR, AP can remove
  1575. * the delay in this case
  1576. */
  1577. IPAERR("failed to force clear %d\n", res);
  1578. IPAERR("remove delay from SCND reg\n");
  1579. ep_ctrl_scnd.endp_delay = false;
  1580. ipahal_write_reg_n_fields(
  1581. IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx,
  1582. &ep_ctrl_scnd);
  1583. } else {
  1584. disable_force_clear = true;
  1585. }
  1586. retry_gsi_stop:
  1587. res = ipa3_stop_gsi_channel(ipa_ep_idx);
  1588. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1589. res != -GSI_STATUS_TIMED_OUT) {
  1590. IPAERR("failed to stop channel res = %d\n", res);
  1591. goto fail_stop_channel;
  1592. } else if (res == -GSI_STATUS_AGAIN) {
  1593. IPADBG("GSI stop channel failed retry cnt = %d\n",
  1594. retry_cnt);
  1595. retry_cnt++;
  1596. if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT)
  1597. goto fail_stop_channel;
  1598. goto retry_gsi_stop;
  1599. } else {
  1600. IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
  1601. }
  1602. res = ipa3_reset_gsi_channel(ipa_ep_idx);
  1603. if (res != GSI_STATUS_SUCCESS) {
  1604. IPAERR("Failed to reset chan: %d.\n", res);
  1605. goto fail_stop_channel;
  1606. }
  1607. if (disable_force_clear)
  1608. ipa3_disable_force_clear(ipa_ep_idx);
  1609. res = ipa3_disable_data_path(ipa_ep_idx);
  1610. if (res) {
  1611. WARN_ON(1);
  1612. return res;
  1613. }
  1614. /* Set the delay after disabling IPA Producer pipe */
  1615. if (IPA_CLIENT_IS_PROD(ep->client)) {
  1616. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1617. ep_cfg_ctrl.ipa_ep_delay = true;
  1618. ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
  1619. }
  1620. ep->gsi_offload_state &= ~IPA_WIGIG_ENABLED;
  1621. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx));
  1622. IPADBG("exit\n");
  1623. return 0;
  1624. fail_stop_channel:
  1625. ipa_assert();
  1626. return res;
  1627. }
  1628. #ifndef CONFIG_DEBUG_FS
  1629. int ipa3_wigig_init_debugfs_i(struct dentry *parent) { return 0; }
  1630. #else
  1631. int ipa3_wigig_init_debugfs_i(struct dentry *parent)
  1632. {
  1633. const mode_t read_write_mode = 0664;
  1634. struct dentry *file = NULL;
  1635. struct dentry *dent;
  1636. dent = debugfs_create_dir("ipa_wigig", parent);
  1637. if (IS_ERR_OR_NULL(dent)) {
  1638. IPAERR("fail to create folder in debug_fs\n");
  1639. return -EFAULT;
  1640. }
  1641. wigig_dent = dent;
  1642. file = debugfs_create_u8("modc", read_write_mode, dent,
  1643. &int_modc);
  1644. if (IS_ERR_OR_NULL(file)) {
  1645. IPAERR("fail to create file modc\n");
  1646. goto fail;
  1647. }
  1648. file = debugfs_create_u16("modt", read_write_mode, dent,
  1649. &int_modt);
  1650. if (IS_ERR_OR_NULL(file)) {
  1651. IPAERR("fail to create file modt\n");
  1652. goto fail;
  1653. }
  1654. file = debugfs_create_u8("rx_mod_th", read_write_mode, dent,
  1655. &rx_hwtail_mod_threshold);
  1656. if (IS_ERR_OR_NULL(file)) {
  1657. IPAERR("fail to create file rx_mod_th\n");
  1658. goto fail;
  1659. }
  1660. file = debugfs_create_u8("tx_mod_th", read_write_mode, dent,
  1661. &tx_hwtail_mod_threshold);
  1662. if (IS_ERR_OR_NULL(file)) {
  1663. IPAERR("fail to create file tx_mod_th\n");
  1664. goto fail;
  1665. }
  1666. return 0;
  1667. fail:
  1668. debugfs_remove_recursive(dent);
  1669. wigig_dent = NULL;
  1670. return -EFAULT;
  1671. }
  1672. #endif