ipa_wigig_i.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include <linux/if_ether.h>
  7. #include <linux/log2.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/ipa_wigig.h>
  10. #define IPA_WIGIG_DESC_RING_EL_SIZE 32
  11. #define IPA_WIGIG_STATUS_RING_EL_SIZE 16
  12. #define GSI_STOP_MAX_RETRY_CNT 10
  13. #define IPA_WIGIG_CONNECTED BIT(0)
  14. #define IPA_WIGIG_ENABLED BIT(1)
  15. #define IPA_WIGIG_MSB_MASK 0xFFFFFFFF00000000
  16. #define IPA_WIGIG_LSB_MASK 0x00000000FFFFFFFF
  17. #define IPA_WIGIG_MSB(num) ((u32)((num & IPA_WIGIG_MSB_MASK) >> 32))
  18. #define IPA_WIGIG_LSB(num) ((u32)(num & IPA_WIGIG_LSB_MASK))
  19. /* extract PCIE addresses [0:39] relevant msb */
  20. #define IPA_WIGIG_8_MSB_MASK 0xFF00000000
  21. #define IPA_WIGIG_8_MSB(num) ((u32)((num & IPA_WIGIG_8_MSB_MASK) >> 32))
  22. #define W11AD_RX 0
  23. #define W11AD_TX 1
  24. #define W11AD_TO_GSI_DB_m 1
  25. #define W11AD_TO_GSI_DB_n 1
  26. static LIST_HEAD(smmu_reg_addr_list);
  27. static LIST_HEAD(smmu_ring_addr_list);
  28. static DEFINE_MUTEX(smmu_lock);
  29. struct dentry *wigig_dent;
  30. struct ipa_wigig_smmu_reg_addr {
  31. struct list_head link;
  32. phys_addr_t phys_addr;
  33. enum ipa_smmu_cb_type cb_type;
  34. u8 count;
  35. };
  36. struct ipa_wigig_smmu_ring_addr {
  37. struct list_head link;
  38. u64 iova;
  39. enum ipa_smmu_cb_type cb_type;
  40. u8 count;
  41. };
  42. static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
  43. unsigned long val, void *data)
  44. {
  45. IPADBG("val %ld\n", val);
  46. if (!ipa3_ctx) {
  47. IPAERR("IPA ctx is null\n");
  48. return -EINVAL;
  49. }
  50. WARN_ON(data != ipa3_ctx);
  51. if (ipa3_ctx->uc_wigig_ctx.uc_ready_cb) {
  52. ipa3_ctx->uc_wigig_ctx.uc_ready_cb(
  53. ipa3_ctx->uc_wigig_ctx.priv);
  54. ipa3_ctx->uc_wigig_ctx.uc_ready_cb =
  55. NULL;
  56. ipa3_ctx->uc_wigig_ctx.priv = NULL;
  57. }
  58. IPADBG("exit\n");
  59. return 0;
  60. }
  61. static struct notifier_block uc_loaded_notifier = {
  62. .notifier_call = ipa3_wigig_uc_loaded_handler,
  63. };
  64. int ipa3_wigig_init_i(void)
  65. {
  66. IPADBG("\n");
  67. ipa3_uc_register_ready_cb(&uc_loaded_notifier);
  68. IPADBG("exit\n");
  69. return 0;
  70. }
  71. int ipa3_wigig_internal_init(
  72. struct ipa_wdi_uc_ready_params *inout,
  73. ipa_wigig_misc_int_cb int_notify,
  74. phys_addr_t *uc_db_pa)
  75. {
  76. int result = 0;
  77. IPADBG("\n");
  78. if (inout == NULL) {
  79. IPAERR("inout is NULL");
  80. return -EINVAL;
  81. }
  82. if (int_notify == NULL) {
  83. IPAERR("int_notify is NULL");
  84. return -EINVAL;
  85. }
  86. result = ipa3_uc_state_check();
  87. if (result) {
  88. inout->is_uC_ready = false;
  89. ipa3_ctx->uc_wigig_ctx.uc_ready_cb = inout->notify;
  90. } else {
  91. inout->is_uC_ready = true;
  92. }
  93. ipa3_ctx->uc_wigig_ctx.priv = inout->priv;
  94. ipa3_ctx->uc_wigig_ctx.misc_notify_cb = int_notify;
  95. *uc_db_pa = ipa3_ctx->ipa_wrapper_base +
  96. ipahal_get_reg_base() +
  97. ipahal_get_reg_mn_ofst(
  98. IPA_UC_MAILBOX_m_n,
  99. W11AD_TO_GSI_DB_m,
  100. W11AD_TO_GSI_DB_n);
  101. IPADBG("exit\n");
  102. return 0;
  103. }
  104. static int ipa3_wigig_tx_bit_to_ep(
  105. const u8 tx_bit_num,
  106. enum ipa_client_type *type)
  107. {
  108. IPADBG("tx_bit_num %d\n", tx_bit_num);
  109. switch (tx_bit_num) {
  110. case 2:
  111. *type = IPA_CLIENT_WIGIG1_CONS;
  112. break;
  113. case 3:
  114. *type = IPA_CLIENT_WIGIG2_CONS;
  115. break;
  116. case 4:
  117. *type = IPA_CLIENT_WIGIG3_CONS;
  118. break;
  119. case 5:
  120. *type = IPA_CLIENT_WIGIG4_CONS;
  121. break;
  122. default:
  123. IPAERR("invalid tx_bit_num %d\n", tx_bit_num);
  124. return -EINVAL;
  125. }
  126. IPADBG("exit\n");
  127. return 0;
  128. }
  129. static int ipa3_wigig_smmu_map_buffers(bool Rx,
  130. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  131. void *buff,
  132. bool map)
  133. {
  134. int result;
  135. /* data buffers */
  136. if (Rx) {
  137. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu =
  138. (struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
  139. int num_elem =
  140. pipe_smmu->desc_ring_size /
  141. IPA_WIGIG_DESC_RING_EL_SIZE;
  142. result = ipa3_smmu_map_peer_buff(
  143. dbuff_smmu->data_buffer_base_iova,
  144. dbuff_smmu->data_buffer_size * num_elem,
  145. map,
  146. &dbuff_smmu->data_buffer_base,
  147. IPA_SMMU_CB_11AD);
  148. if (result) {
  149. IPAERR(
  150. "failed to %s rx data_buffer %d, num elem %d\n"
  151. , map ? "map" : "unmap",
  152. result, num_elem);
  153. goto fail_map_buff;
  154. }
  155. } else {
  156. int i;
  157. struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu =
  158. (struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
  159. for (i = 0; i < dbuff_smmu->num_buffers; i++) {
  160. result = ipa3_smmu_map_peer_buff(
  161. *(dbuff_smmu->data_buffer_base_iova + i),
  162. dbuff_smmu->data_buffer_size,
  163. map,
  164. (dbuff_smmu->data_buffer_base + i),
  165. IPA_SMMU_CB_11AD);
  166. if (result) {
  167. IPAERR(
  168. "%d: failed to %s tx data buffer %d\n"
  169. , i, map ? "map" : "unmap",
  170. result);
  171. for (i--; i >= 0; i--) {
  172. result = ipa3_smmu_map_peer_buff(
  173. *(dbuff_smmu->data_buffer_base_iova +
  174. i),
  175. dbuff_smmu->data_buffer_size,
  176. !map,
  177. (dbuff_smmu->data_buffer_base +
  178. i),
  179. IPA_SMMU_CB_11AD);
  180. }
  181. goto fail_map_buff;
  182. }
  183. }
  184. }
  185. IPADBG("exit\n");
  186. return 0;
  187. fail_map_buff:
  188. return result;
  189. }
  190. static int ipa3_wigig_smmu_map_reg(phys_addr_t phys_addr, bool map,
  191. enum ipa_smmu_cb_type cb_type)
  192. {
  193. struct ipa_wigig_smmu_reg_addr *entry;
  194. struct ipa_wigig_smmu_reg_addr *next;
  195. int result = 0;
  196. IPADBG("addr %pa, %s\n", &phys_addr, map ? "map" : "unmap");
  197. mutex_lock(&smmu_lock);
  198. list_for_each_entry_safe(entry, next, &smmu_reg_addr_list, link) {
  199. if ((entry->phys_addr == phys_addr) &&
  200. (entry->cb_type == cb_type)) {
  201. IPADBG("cb %d, page %pa already mapped, ", cb_type,
  202. &phys_addr);
  203. if (map) {
  204. entry->count++;
  205. IPADBG("inc to %d\n", (entry->count));
  206. } else {
  207. --entry->count;
  208. IPADBG("dec to %d\n", entry->count);
  209. if (!(entry->count)) {
  210. IPADBG("unmap and delete\n");
  211. result = ipa3_smmu_map_peer_reg(
  212. phys_addr, map, cb_type);
  213. if (result) {
  214. IPAERR("failed to unmap %pa\n",
  215. &phys_addr);
  216. goto finish;
  217. }
  218. list_del(&entry->link);
  219. kfree(entry);
  220. }
  221. }
  222. goto finish;
  223. }
  224. }
  225. IPADBG("new page found %pa, map and add to list CB %d\n", &phys_addr,
  226. cb_type);
  227. result = ipa3_smmu_map_peer_reg(phys_addr, map, cb_type);
  228. if (result) {
  229. IPAERR("failed to map %pa\n", &phys_addr);
  230. goto finish;
  231. }
  232. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  233. if (entry == NULL) {
  234. IPAERR("couldn't allocate for %pa\n", &phys_addr);
  235. ipa3_smmu_map_peer_reg(phys_addr, !map, cb_type);
  236. result = -ENOMEM;
  237. goto finish;
  238. }
  239. INIT_LIST_HEAD(&entry->link);
  240. entry->phys_addr = phys_addr;
  241. entry->cb_type = cb_type;
  242. entry->count = 1;
  243. list_add(&entry->link, &smmu_reg_addr_list);
  244. finish:
  245. mutex_unlock(&smmu_lock);
  246. IPADBG("exit\n");
  247. return result;
  248. }
  249. static int ipa3_wigig_smmu_map_ring(u64 iova, u32 size, bool map,
  250. struct sg_table *sgt, enum ipa_smmu_cb_type cb_type)
  251. {
  252. struct ipa_wigig_smmu_ring_addr *entry;
  253. struct ipa_wigig_smmu_ring_addr *next;
  254. int result = 0;
  255. IPADBG("iova %llX, %s\n", iova, map ? "map" : "unmap");
  256. mutex_lock(&smmu_lock);
  257. list_for_each_entry_safe(entry, next, &smmu_ring_addr_list, link) {
  258. if ((entry->iova == iova) &&
  259. (entry->cb_type == cb_type)) {
  260. IPADBG("cb %d, page 0x%llX already mapped, ", cb_type,
  261. iova);
  262. if (map) {
  263. entry->count++;
  264. IPADBG("inc to %d\n", (entry->count));
  265. } else {
  266. --entry->count;
  267. IPADBG("dec to %d\n", entry->count);
  268. if (!(entry->count)) {
  269. IPADBG("unmap and delete\n");
  270. result = ipa3_smmu_map_peer_buff(
  271. iova, size, map, sgt, cb_type);
  272. if (result) {
  273. IPAERR(
  274. "failed to unmap 0x%llX\n",
  275. iova);
  276. goto finish;
  277. }
  278. list_del(&entry->link);
  279. kfree(entry);
  280. }
  281. }
  282. goto finish;
  283. }
  284. }
  285. IPADBG("new page found 0x%llX, map and add to list\n", iova);
  286. result = ipa3_smmu_map_peer_buff(iova, size, map, sgt, cb_type);
  287. if (result) {
  288. IPAERR("failed to map 0x%llX\n", iova);
  289. goto finish;
  290. }
  291. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  292. if (entry == NULL) {
  293. IPAERR("couldn't allocate for 0x%llX\n", iova);
  294. ipa3_smmu_map_peer_buff(iova, size, !map, sgt, cb_type);
  295. result = -ENOMEM;
  296. goto finish;
  297. }
  298. INIT_LIST_HEAD(&entry->link);
  299. entry->iova = iova;
  300. entry->cb_type = cb_type;
  301. entry->count = 1;
  302. list_add(&entry->link, &smmu_ring_addr_list);
  303. finish:
  304. mutex_unlock(&smmu_lock);
  305. IPADBG("exit\n");
  306. return result;
  307. }
  308. static int ipa3_wigig_smmu_map_channel(bool Rx,
  309. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  310. void *buff,
  311. bool map)
  312. {
  313. int result = 0;
  314. struct ipa_smmu_cb_ctx *smmu_ctx = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
  315. IPADBG("\n");
  316. /*
  317. * --------------------------------------------------------------------
  318. * entity |HWHEAD|HWTAIL|HWHEAD|HWTAIL| misc | buffers| rings|
  319. * |Sring |Sring |Dring |Dring | regs | | |
  320. * --------------------------------------------------------------------
  321. * GSI (apps CB) | TX |RX, TX| |RX, TX| | |Rx, TX|
  322. * --------------------------------------------------------------------
  323. * IPA (11AD CB) | | | | | | RX, TX | |
  324. * --------------------------------------------------------------------
  325. * uc (uC CB) | RX | | TX | |always| | |
  326. * --------------------------------------------------------------------
  327. *
  328. * buffers are mapped to 11AD CB. in case this context bank is shared,
  329. * mapping is done by 11ad driver only and applies to both 11ad and
  330. * IPA HWs (page tables are shared). Otherwise, mapping is done here.
  331. */
  332. if (!smmu_ctx) {
  333. IPAERR("11AD SMMU ctx is null\n");
  334. return -EINVAL;
  335. }
  336. if (Rx) {
  337. IPADBG("RX %s status_ring_HWHEAD_pa %pa uC CB\n",
  338. map ? "map" : "unmap",
  339. &pipe_smmu->status_ring_HWHEAD_pa);
  340. result = ipa3_wigig_smmu_map_reg(
  341. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  342. map,
  343. IPA_SMMU_CB_UC);
  344. if (result) {
  345. IPAERR(
  346. "failed to %s status_ring_HWAHEAD %d\n",
  347. map ? "map" : "unmap",
  348. result);
  349. goto fail;
  350. }
  351. } else {
  352. IPADBG("TX %s status_ring_HWHEAD_pa %pa AP CB\n",
  353. map ? "map" : "unmap",
  354. &pipe_smmu->status_ring_HWHEAD_pa);
  355. result = ipa3_wigig_smmu_map_reg(
  356. rounddown(pipe_smmu->status_ring_HWHEAD_pa,
  357. PAGE_SIZE),
  358. map,
  359. IPA_SMMU_CB_AP);
  360. if (result) {
  361. IPAERR(
  362. "failed to %s status_ring_HWAHEAD %d\n",
  363. map ? "map" : "unmap",
  364. result);
  365. goto fail;
  366. }
  367. IPADBG("TX %s desc_ring_HWHEAD_pa %pa uC CB\n",
  368. map ? "map" : "unmap",
  369. &pipe_smmu->desc_ring_HWHEAD_pa);
  370. result = ipa3_wigig_smmu_map_reg(
  371. rounddown(pipe_smmu->desc_ring_HWHEAD_pa,
  372. PAGE_SIZE),
  373. map,
  374. IPA_SMMU_CB_UC);
  375. if (result) {
  376. IPAERR("failed to %s desc_ring_HWHEAD %d\n",
  377. map ? "map" : "unmap",
  378. result);
  379. goto fail_desc_HWHEAD;
  380. }
  381. }
  382. IPADBG("%s status_ring_HWTAIL_pa %pa AP CB\n",
  383. map ? "map" : "unmap",
  384. &pipe_smmu->status_ring_HWTAIL_pa);
  385. result = ipa3_wigig_smmu_map_reg(
  386. rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
  387. map,
  388. IPA_SMMU_CB_AP);
  389. if (result) {
  390. IPAERR(
  391. "failed to %s status_ring_HWTAIL %d\n",
  392. map ? "map" : "unmap",
  393. result);
  394. goto fail_status_HWTAIL;
  395. }
  396. IPADBG("%s desc_ring_HWTAIL_pa %pa AP CB\n",
  397. map ? "map" : "unmap",
  398. &pipe_smmu->desc_ring_HWTAIL_pa);
  399. result = ipa3_wigig_smmu_map_reg(
  400. rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
  401. map,
  402. IPA_SMMU_CB_AP);
  403. if (result) {
  404. IPAERR("failed to %s desc_ring_HWTAIL %d\n",
  405. map ? "map" : "unmap",
  406. result);
  407. goto fail_desc_HWTAIL;
  408. }
  409. /* rings */
  410. IPADBG("%s desc_ring_base_iova %llX AP CB\n",
  411. map ? "map" : "unmap",
  412. pipe_smmu->desc_ring_base_iova);
  413. result = ipa3_wigig_smmu_map_ring(
  414. pipe_smmu->desc_ring_base_iova,
  415. pipe_smmu->desc_ring_size,
  416. map,
  417. &pipe_smmu->desc_ring_base,
  418. IPA_SMMU_CB_AP);
  419. if (result) {
  420. IPAERR("failed to %s desc_ring_base %d\n",
  421. map ? "map" : "unmap",
  422. result);
  423. goto fail_desc_ring;
  424. }
  425. IPADBG("%s status_ring_base_iova %llX AP CB\n",
  426. map ? "map" : "unmap",
  427. pipe_smmu->status_ring_base_iova);
  428. result = ipa3_wigig_smmu_map_ring(
  429. pipe_smmu->status_ring_base_iova,
  430. pipe_smmu->status_ring_size,
  431. map,
  432. &pipe_smmu->status_ring_base,
  433. IPA_SMMU_CB_AP);
  434. if (result) {
  435. IPAERR("failed to %s status_ring_base %d\n",
  436. map ? "map" : "unmap",
  437. result);
  438. goto fail_status_ring;
  439. }
  440. if (!smmu_ctx->shared) {
  441. IPADBG("CB not shared - map buffers\n");
  442. result = ipa3_wigig_smmu_map_buffers(Rx, pipe_smmu, buff, map);
  443. if (result) {
  444. IPAERR("failed to %s buffers %d\n",
  445. map ? "map" : "unmap",
  446. result);
  447. goto fail_buffers;
  448. }
  449. }
  450. IPADBG("exit\n");
  451. return 0;
  452. fail_buffers:
  453. ipa3_wigig_smmu_map_ring(
  454. pipe_smmu->status_ring_base_iova, pipe_smmu->status_ring_size,
  455. !map, &pipe_smmu->status_ring_base, IPA_SMMU_CB_AP);
  456. fail_status_ring:
  457. ipa3_wigig_smmu_map_ring(
  458. pipe_smmu->desc_ring_base_iova, pipe_smmu->desc_ring_size,
  459. !map, &pipe_smmu->desc_ring_base, IPA_SMMU_CB_AP);
  460. fail_desc_ring:
  461. ipa3_wigig_smmu_map_reg(
  462. rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
  463. !map, IPA_SMMU_CB_AP);
  464. fail_desc_HWTAIL:
  465. ipa3_wigig_smmu_map_reg(
  466. rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
  467. !map, IPA_SMMU_CB_AP);
  468. fail_status_HWTAIL:
  469. if (Rx)
  470. ipa3_wigig_smmu_map_reg(
  471. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  472. !map, IPA_SMMU_CB_UC);
  473. else
  474. ipa3_wigig_smmu_map_reg(
  475. rounddown(pipe_smmu->desc_ring_HWHEAD_pa, PAGE_SIZE),
  476. !map, IPA_SMMU_CB_UC);
  477. fail_desc_HWHEAD:
  478. if (!Rx)
  479. ipa3_wigig_smmu_map_reg(
  480. rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
  481. !map, IPA_SMMU_CB_AP);
  482. fail:
  483. return result;
  484. }
  485. static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
  486. {
  487. switch (notify->evt_id) {
  488. case GSI_CHAN_INVALID_TRE_ERR:
  489. IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
  490. break;
  491. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  492. IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  493. break;
  494. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  495. IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  496. break;
  497. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  498. IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  499. break;
  500. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  501. IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  502. break;
  503. case GSI_CHAN_HWO_1_ERR:
  504. IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
  505. break;
  506. default:
  507. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  508. }
  509. ipa_assert();
  510. }
  511. static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
  512. {
  513. switch (notify->evt_id) {
  514. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  515. IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  516. break;
  517. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  518. IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  519. break;
  520. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  521. IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  522. break;
  523. case GSI_EVT_EVT_RING_EMPTY_ERR:
  524. IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
  525. break;
  526. default:
  527. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  528. }
  529. ipa_assert();
  530. }
  531. static uint16_t int_modt = 15;
  532. static uint8_t int_modc = 200;
  533. static uint8_t tx_hwtail_mod_threshold = 200;
  534. static uint8_t rx_hwtail_mod_threshold = 200;
  535. static int ipa3_wigig_config_gsi(bool Rx,
  536. bool smmu_en,
  537. void *pipe_info,
  538. void *buff,
  539. const struct ipa_gsi_ep_config *ep_gsi,
  540. struct ipa3_ep_context *ep)
  541. {
  542. struct gsi_evt_ring_props evt_props;
  543. struct gsi_chan_props channel_props;
  544. union __packed gsi_channel_scratch gsi_scratch;
  545. int gsi_res;
  546. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
  547. struct ipa_wigig_pipe_setup_info *pipe;
  548. struct ipa_wigig_rx_pipe_data_buffer_info *rx_dbuff;
  549. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *rx_dbuff_smmu;
  550. struct ipa_wigig_tx_pipe_data_buffer_info *tx_dbuff;
  551. struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_dbuff_smmu;
  552. IPADBG("%s, %s\n", Rx ? "Rx" : "Tx", smmu_en ? "smmu en" : "smmu dis");
  553. /* alloc event ring */
  554. memset(&evt_props, 0, sizeof(evt_props));
  555. evt_props.intf = GSI_EVT_CHTYPE_11AD_EV;
  556. evt_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  557. evt_props.intr = GSI_INTR_MSI;
  558. evt_props.intvec = 0;
  559. evt_props.exclusive = true;
  560. evt_props.err_cb = ipa_gsi_evt_ring_err_cb;
  561. evt_props.user_data = NULL;
  562. evt_props.int_modc = int_modc;
  563. evt_props.int_modt = int_modt;
  564. evt_props.ring_base_vaddr = NULL;
  565. if (smmu_en) {
  566. pipe_smmu = (struct ipa_wigig_pipe_setup_info_smmu *)pipe_info;
  567. evt_props.ring_base_addr =
  568. pipe_smmu->desc_ring_base_iova;
  569. evt_props.ring_len = pipe_smmu->desc_ring_size;
  570. evt_props.msi_addr = pipe_smmu->desc_ring_HWTAIL_pa;
  571. } else {
  572. pipe = (struct ipa_wigig_pipe_setup_info *)pipe_info;
  573. evt_props.ring_base_addr = pipe->desc_ring_base_pa;
  574. evt_props.ring_len = pipe->desc_ring_size;
  575. evt_props.msi_addr = pipe->desc_ring_HWTAIL_pa;
  576. }
  577. gsi_res = gsi_alloc_evt_ring(&evt_props,
  578. ipa3_ctx->gsi_dev_hdl,
  579. &ep->gsi_evt_ring_hdl);
  580. if (gsi_res != GSI_STATUS_SUCCESS) {
  581. IPAERR("Error allocating event ring: %d\n", gsi_res);
  582. return -EFAULT;
  583. }
  584. /* event scratch not configured by SW for TX channels */
  585. if (Rx) {
  586. union __packed gsi_evt_scratch evt_scratch;
  587. memset(&evt_scratch, 0, sizeof(evt_scratch));
  588. evt_scratch.w11ad.update_status_hwtail_mod_threshold =
  589. rx_hwtail_mod_threshold;
  590. gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
  591. evt_scratch);
  592. if (gsi_res != GSI_STATUS_SUCCESS) {
  593. IPAERR("Error writing WIGIG event ring scratch: %d\n",
  594. gsi_res);
  595. goto fail_write_evt_scratch;
  596. }
  597. }
  598. ep->gsi_mem_info.evt_ring_len = evt_props.ring_len;
  599. ep->gsi_mem_info.evt_ring_base_addr = evt_props.ring_base_addr;
  600. ep->gsi_mem_info.evt_ring_base_vaddr = evt_props.ring_base_vaddr;
  601. /* alloc channel ring */
  602. memset(&channel_props, 0, sizeof(channel_props));
  603. memset(&gsi_scratch, 0, sizeof(gsi_scratch));
  604. if (Rx)
  605. channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  606. else
  607. channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
  608. channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  609. channel_props.prot = GSI_CHAN_PROT_11AD;
  610. channel_props.ch_id = ep_gsi->ipa_gsi_chan_num;
  611. channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
  612. channel_props.xfer_cb = NULL;
  613. channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  614. channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  615. channel_props.prefetch_mode = ep_gsi->prefetch_mode;
  616. channel_props.empty_lvl_threshold = ep_gsi->prefetch_threshold;
  617. channel_props.low_weight = 1;
  618. channel_props.err_cb = ipa_gsi_chan_err_cb;
  619. channel_props.ring_base_vaddr = NULL;
  620. if (Rx) {
  621. if (smmu_en) {
  622. rx_dbuff_smmu =
  623. (struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
  624. channel_props.ring_base_addr =
  625. pipe_smmu->status_ring_base_iova;
  626. channel_props.ring_len =
  627. pipe_smmu->status_ring_size;
  628. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
  629. IPA_WIGIG_LSB(
  630. pipe_smmu->status_ring_HWTAIL_pa);
  631. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
  632. IPA_WIGIG_MSB(
  633. pipe_smmu->status_ring_HWTAIL_pa);
  634. gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
  635. IPA_WIGIG_LSB(
  636. rx_dbuff_smmu->data_buffer_base_iova);
  637. gsi_scratch.rx_11ad.data_buffers_base_address_msb =
  638. IPA_WIGIG_MSB(
  639. rx_dbuff_smmu->data_buffer_base_iova);
  640. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
  641. ilog2(rx_dbuff_smmu->data_buffer_size);
  642. } else {
  643. rx_dbuff =
  644. (struct ipa_wigig_rx_pipe_data_buffer_info *)buff;
  645. channel_props.ring_base_addr =
  646. pipe->status_ring_base_pa;
  647. channel_props.ring_len = pipe->status_ring_size;
  648. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
  649. IPA_WIGIG_LSB(pipe->status_ring_HWTAIL_pa);
  650. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
  651. IPA_WIGIG_MSB(pipe->status_ring_HWTAIL_pa);
  652. gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
  653. IPA_WIGIG_LSB(rx_dbuff->data_buffer_base_pa);
  654. gsi_scratch.rx_11ad.data_buffers_base_address_msb =
  655. IPA_WIGIG_MSB(rx_dbuff->data_buffer_base_pa);
  656. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
  657. ilog2(rx_dbuff->data_buffer_size);
  658. }
  659. IPADBG("rx scratch: status_ring_hwtail_address_lsb 0x%X\n",
  660. gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb);
  661. IPADBG("rx scratch: status_ring_hwtail_address_msb 0x%X\n",
  662. gsi_scratch.rx_11ad.status_ring_hwtail_address_msb);
  663. IPADBG("rx scratch: data_buffers_base_address_lsb 0x%X\n",
  664. gsi_scratch.rx_11ad.data_buffers_base_address_lsb);
  665. IPADBG("rx scratch: data_buffers_base_address_msb 0x%X\n",
  666. gsi_scratch.rx_11ad.data_buffers_base_address_msb);
  667. IPADBG("rx scratch: fixed_data_buffer_size_pow_2 %d\n",
  668. gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2);
  669. IPADBG("rx scratch 0x[%X][%X][%X][%X]\n",
  670. gsi_scratch.data.word1,
  671. gsi_scratch.data.word2,
  672. gsi_scratch.data.word3,
  673. gsi_scratch.data.word4);
  674. } else {
  675. if (smmu_en) {
  676. tx_dbuff_smmu =
  677. (struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
  678. channel_props.ring_base_addr =
  679. pipe_smmu->desc_ring_base_iova;
  680. channel_props.ring_len =
  681. pipe_smmu->desc_ring_size;
  682. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
  683. IPA_WIGIG_LSB(
  684. pipe_smmu->status_ring_HWTAIL_pa);
  685. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb =
  686. IPA_WIGIG_LSB(
  687. pipe_smmu->status_ring_HWHEAD_pa);
  688. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb =
  689. IPA_WIGIG_8_MSB(
  690. pipe_smmu->status_ring_HWHEAD_pa);
  691. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
  692. ilog2(tx_dbuff_smmu->data_buffer_size);
  693. gsi_scratch.tx_11ad.status_ring_num_elem =
  694. pipe_smmu->status_ring_size /
  695. IPA_WIGIG_STATUS_RING_EL_SIZE;
  696. } else {
  697. tx_dbuff =
  698. (struct ipa_wigig_tx_pipe_data_buffer_info *)buff;
  699. channel_props.ring_base_addr = pipe->desc_ring_base_pa;
  700. channel_props.ring_len = pipe->desc_ring_size;
  701. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
  702. IPA_WIGIG_LSB(
  703. pipe->status_ring_HWTAIL_pa);
  704. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb =
  705. IPA_WIGIG_LSB(
  706. pipe->status_ring_HWHEAD_pa);
  707. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb =
  708. IPA_WIGIG_8_MSB(pipe->status_ring_HWHEAD_pa);
  709. gsi_scratch.tx_11ad.status_ring_num_elem =
  710. pipe->status_ring_size /
  711. IPA_WIGIG_STATUS_RING_EL_SIZE;
  712. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
  713. ilog2(tx_dbuff->data_buffer_size);
  714. }
  715. gsi_scratch.tx_11ad.update_status_hwtail_mod_threshold =
  716. tx_hwtail_mod_threshold;
  717. IPADBG("tx scratch: status_ring_hwtail_address_lsb 0x%X\n",
  718. gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb);
  719. IPADBG("tx scratch: status_ring_hwhead_address_lsb 0x%X\n",
  720. gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb);
  721. IPADBG("tx scratch: status_ring_hwhead_hwtail_8_msb 0x%X\n",
  722. gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb);
  723. IPADBG("tx scratch:status_ring_num_elem %d\n",
  724. gsi_scratch.tx_11ad.status_ring_num_elem);
  725. IPADBG("tx scratch:fixed_data_buffer_size_pow_2 %d\n",
  726. gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2);
  727. IPADBG("tx scratch 0x[%X][%X][%X][%X]\n",
  728. gsi_scratch.data.word1,
  729. gsi_scratch.data.word2,
  730. gsi_scratch.data.word3,
  731. gsi_scratch.data.word4);
  732. }
  733. IPADBG("ch_id: %d\n", channel_props.ch_id);
  734. IPADBG("evt_ring_hdl: %ld\n", channel_props.evt_ring_hdl);
  735. IPADBG("re_size: %d\n", channel_props.re_size);
  736. IPADBG("GSI channel ring len: %d\n", channel_props.ring_len);
  737. IPADBG("channel ring base addr = 0x%llX\n",
  738. (unsigned long long)channel_props.ring_base_addr);
  739. IPADBG("Allocating GSI channel\n");
  740. gsi_res = gsi_alloc_channel(&channel_props,
  741. ipa3_ctx->gsi_dev_hdl,
  742. &ep->gsi_chan_hdl);
  743. if (gsi_res != GSI_STATUS_SUCCESS) {
  744. IPAERR("gsi_alloc_channel failed %d\n", gsi_res);
  745. goto fail_alloc_channel;
  746. }
  747. IPADBG("Writing Channel scratch\n");
  748. ep->gsi_mem_info.chan_ring_len = channel_props.ring_len;
  749. ep->gsi_mem_info.chan_ring_base_addr = channel_props.ring_base_addr;
  750. ep->gsi_mem_info.chan_ring_base_vaddr =
  751. channel_props.ring_base_vaddr;
  752. gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
  753. gsi_scratch);
  754. if (gsi_res != GSI_STATUS_SUCCESS) {
  755. IPAERR("gsi_write_channel_scratch failed %d\n",
  756. gsi_res);
  757. goto fail_write_channel_scratch;
  758. }
  759. IPADBG("exit\n");
  760. return 0;
  761. fail_write_channel_scratch:
  762. gsi_dealloc_channel(ep->gsi_chan_hdl);
  763. fail_alloc_channel:
  764. fail_write_evt_scratch:
  765. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  766. return -EFAULT;
  767. }
  768. static int ipa3_wigig_config_uc(bool init,
  769. bool Rx,
  770. u8 wifi_ch,
  771. u8 gsi_ch,
  772. phys_addr_t HWHEAD)
  773. {
  774. struct ipa_mem_buffer cmd;
  775. enum ipa_cpu_2_hw_offload_commands command;
  776. int result;
  777. IPADBG("%s\n", init ? "init" : "Deinit");
  778. if (init) {
  779. struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data;
  780. cmd.size = sizeof(*cmd_data);
  781. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  782. &cmd.phys_base, GFP_KERNEL);
  783. if (cmd.base == NULL) {
  784. IPAERR("fail to get DMA memory.\n");
  785. return -ENOMEM;
  786. }
  787. cmd_data =
  788. (struct IpaHwOffloadSetUpCmdData_t_v4_0 *)cmd.base;
  789. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  790. cmd_data->SetupCh_params.W11AdSetupCh_params.dir =
  791. Rx ? W11AD_RX : W11AD_TX;
  792. cmd_data->SetupCh_params.W11AdSetupCh_params.gsi_ch = gsi_ch;
  793. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_ch = wifi_ch;
  794. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_msb =
  795. IPA_WIGIG_MSB(HWHEAD);
  796. cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_lsb =
  797. IPA_WIGIG_LSB(HWHEAD);
  798. command = IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP;
  799. } else {
  800. struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data;
  801. cmd.size = sizeof(*cmd_data);
  802. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  803. &cmd.phys_base, GFP_KERNEL);
  804. if (cmd.base == NULL) {
  805. IPAERR("fail to get DMA memory.\n");
  806. return -ENOMEM;
  807. }
  808. cmd_data =
  809. (struct IpaHwOffloadCommonChCmdData_t_v4_0 *)cmd.base;
  810. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  811. cmd_data->CommonCh_params.W11AdCommonCh_params.gsi_ch = gsi_ch;
  812. command = IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN;
  813. }
  814. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  815. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  816. command,
  817. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  818. false, 10 * HZ);
  819. if (result) {
  820. IPAERR("fail to %s uc for %s gsi channel %d\n",
  821. init ? "init" : "deinit",
  822. Rx ? "Rx" : "Tx", gsi_ch);
  823. }
  824. dma_free_coherent(ipa3_ctx->uc_pdev,
  825. cmd.size, cmd.base, cmd.phys_base);
  826. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  827. IPADBG("exit\n");
  828. return result;
  829. }
  830. int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
  831. struct dentry **parent)
  832. {
  833. int ipa_ep_idx;
  834. struct ipa3_ep_context *ep;
  835. struct ipa_ep_cfg ep_cfg;
  836. enum ipa_client_type rx_client = IPA_CLIENT_WIGIG_PROD;
  837. bool is_smmu_enabled;
  838. struct ipa_wigig_conn_rx_in_params_smmu *input_smmu = NULL;
  839. struct ipa_wigig_conn_rx_in_params *input = NULL;
  840. const struct ipa_gsi_ep_config *ep_gsi;
  841. void *pipe_info;
  842. void *buff;
  843. phys_addr_t status_ring_HWHEAD_pa;
  844. int result;
  845. IPADBG("\n");
  846. *parent = wigig_dent;
  847. ipa_ep_idx = ipa_get_ep_mapping(rx_client);
  848. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  849. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  850. IPAERR("fail to get ep (IPA_CLIENT_WIGIG_PROD) %d.\n",
  851. ipa_ep_idx);
  852. return -EFAULT;
  853. }
  854. ep = &ipa3_ctx->ep[ipa_ep_idx];
  855. if (ep->valid) {
  856. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  857. return -EFAULT;
  858. }
  859. if (ep->gsi_offload_state) {
  860. IPAERR("WIGIG channel bad state 0x%X\n",
  861. ep->gsi_offload_state);
  862. return -EFAULT;
  863. }
  864. ep_gsi = ipa3_get_gsi_ep_info(rx_client);
  865. if (!ep_gsi) {
  866. IPAERR("Failed getting GSI EP info for client=%d\n",
  867. rx_client);
  868. return -EPERM;
  869. }
  870. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  871. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  872. /* setup rx ep cfg */
  873. ep->valid = 1;
  874. ep->client = rx_client;
  875. result = ipa3_disable_data_path(ipa_ep_idx);
  876. if (result) {
  877. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  878. ipa_ep_idx);
  879. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  880. return -EFAULT;
  881. }
  882. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  883. if (is_smmu_enabled) {
  884. struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu;
  885. input_smmu = (struct ipa_wigig_conn_rx_in_params_smmu *)in;
  886. dbuff_smmu = &input_smmu->dbuff_smmu;
  887. ep->client_notify = input_smmu->notify;
  888. ep->priv = input_smmu->priv;
  889. IPADBG(
  890. "desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
  891. (unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
  892. input_smmu->pipe_smmu.desc_ring_size,
  893. (unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
  894. input_smmu->pipe_smmu.status_ring_size);
  895. IPADBG("data_buffer_base_iova 0x%llX data_buffer_size %d",
  896. (unsigned long long)dbuff_smmu->data_buffer_base_iova,
  897. input_smmu->dbuff_smmu.data_buffer_size);
  898. if (IPA_WIGIG_MSB(
  899. dbuff_smmu->data_buffer_base_iova) &
  900. 0xFFFFFF00) {
  901. IPAERR(
  902. "data_buffers_base_address_msb is over the 8 bit limit (0x%llX)\n",
  903. (unsigned long long)dbuff_smmu->data_buffer_base_iova);
  904. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  905. return -EFAULT;
  906. }
  907. if (dbuff_smmu->data_buffer_size >> 16) {
  908. IPAERR(
  909. "data_buffer_size is over the 16 bit limit (%d)\n"
  910. , dbuff_smmu->data_buffer_size);
  911. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  912. return -EFAULT;
  913. }
  914. } else {
  915. input = (struct ipa_wigig_conn_rx_in_params *)in;
  916. ep->client_notify = input->notify;
  917. ep->priv = input->priv;
  918. IPADBG(
  919. "desc_ring_base_pa %pa desc_ring_size %d status_ring_base_pa %pa status_ring_size %d",
  920. &input->pipe.desc_ring_base_pa,
  921. input->pipe.desc_ring_size,
  922. &input->pipe.status_ring_base_pa,
  923. input->pipe.status_ring_size);
  924. IPADBG("data_buffer_base_pa %pa data_buffer_size %d",
  925. &input->dbuff.data_buffer_base_pa,
  926. input->dbuff.data_buffer_size);
  927. if (
  928. IPA_WIGIG_MSB(input->dbuff.data_buffer_base_pa) & 0xFFFFFF00) {
  929. IPAERR(
  930. "data_buffers_base_address_msb is over the 8 bit limit (0x%pa)\n"
  931. , &input->dbuff.data_buffer_base_pa);
  932. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  933. return -EFAULT;
  934. }
  935. if (input->dbuff.data_buffer_size >> 16) {
  936. IPAERR(
  937. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  938. , input->dbuff.data_buffer_size);
  939. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  940. return -EFAULT;
  941. }
  942. }
  943. memset(&ep_cfg, 0, sizeof(ep_cfg));
  944. ep_cfg.nat.nat_en = IPA_SRC_NAT;
  945. ep_cfg.hdr.hdr_len = ETH_HLEN;
  946. ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
  947. ep_cfg.hdr.hdr_ofst_pkt_size = 0;
  948. ep_cfg.hdr.hdr_additional_const_len = 0;
  949. ep_cfg.hdr_ext.hdr_little_endian = true;
  950. ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
  951. ep_cfg.hdr.hdr_metadata_reg_valid = 1;
  952. ep_cfg.mode.mode = IPA_BASIC;
  953. if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
  954. IPAERR("fail to setup rx pipe cfg\n");
  955. result = -EFAULT;
  956. goto fail;
  957. }
  958. if (is_smmu_enabled) {
  959. result = ipa3_wigig_smmu_map_channel(true,
  960. &input_smmu->pipe_smmu,
  961. &input_smmu->dbuff_smmu,
  962. true);
  963. if (result) {
  964. IPAERR("failed to setup rx pipe smmu map\n");
  965. result = -EFAULT;
  966. goto fail;
  967. }
  968. pipe_info = &input_smmu->pipe_smmu;
  969. buff = &input_smmu->dbuff_smmu;
  970. status_ring_HWHEAD_pa =
  971. input_smmu->pipe_smmu.status_ring_HWHEAD_pa;
  972. } else {
  973. pipe_info = &input->pipe;
  974. buff = &input->dbuff;
  975. status_ring_HWHEAD_pa =
  976. input->pipe.status_ring_HWHEAD_pa;
  977. }
  978. result = ipa3_wigig_config_gsi(true,
  979. is_smmu_enabled,
  980. pipe_info,
  981. buff,
  982. ep_gsi, ep);
  983. if (result)
  984. goto fail_gsi;
  985. result = ipa3_wigig_config_uc(
  986. true, true, 0,
  987. ep_gsi->ipa_gsi_chan_num,
  988. status_ring_HWHEAD_pa);
  989. if (result)
  990. goto fail_uc_config;
  991. ipa3_install_dflt_flt_rules(ipa_ep_idx);
  992. out->client = IPA_CLIENT_WIGIG_PROD;
  993. ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
  994. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  995. IPADBG("wigig rx pipe connected successfully\n");
  996. IPADBG("exit\n");
  997. return 0;
  998. fail_uc_config:
  999. /* Release channel and evt*/
  1000. ipa3_release_gsi_channel(ipa_ep_idx);
  1001. fail_gsi:
  1002. if (input_smmu)
  1003. ipa3_wigig_smmu_map_channel(true, &input_smmu->pipe_smmu,
  1004. &input_smmu->dbuff_smmu, false);
  1005. fail:
  1006. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1007. return result;
  1008. }
  1009. int ipa3_conn_wigig_client_i(void *in,
  1010. struct ipa_wigig_conn_out_params *out,
  1011. ipa_notify_cb tx_notify,
  1012. void *priv)
  1013. {
  1014. int ipa_ep_idx;
  1015. struct ipa3_ep_context *ep;
  1016. struct ipa_ep_cfg ep_cfg;
  1017. enum ipa_client_type tx_client;
  1018. bool is_smmu_enabled;
  1019. struct ipa_wigig_conn_tx_in_params_smmu *input_smmu = NULL;
  1020. struct ipa_wigig_conn_tx_in_params *input = NULL;
  1021. const struct ipa_gsi_ep_config *ep_gsi;
  1022. u32 aggr_byte_limit;
  1023. int result;
  1024. void *pipe_info;
  1025. void *buff;
  1026. phys_addr_t desc_ring_HWHEAD_pa;
  1027. u8 wifi_ch;
  1028. IPADBG("\n");
  1029. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  1030. if (is_smmu_enabled) {
  1031. input_smmu = (struct ipa_wigig_conn_tx_in_params_smmu *)in;
  1032. IPADBG(
  1033. "desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
  1034. (unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
  1035. input_smmu->pipe_smmu.desc_ring_size,
  1036. (unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
  1037. input_smmu->pipe_smmu.status_ring_size);
  1038. IPADBG("num buffers %d, data buffer size %d\n",
  1039. input_smmu->dbuff_smmu.num_buffers,
  1040. input_smmu->dbuff_smmu.data_buffer_size);
  1041. if (ipa3_wigig_tx_bit_to_ep(input_smmu->int_gen_tx_bit_num,
  1042. &tx_client)) {
  1043. return -EINVAL;
  1044. }
  1045. if (input_smmu->dbuff_smmu.data_buffer_size >> 16) {
  1046. IPAERR(
  1047. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  1048. , input_smmu->dbuff_smmu.data_buffer_size);
  1049. return -EFAULT;
  1050. }
  1051. if (IPA_WIGIG_8_MSB(
  1052. input_smmu->pipe_smmu.status_ring_HWHEAD_pa)
  1053. != IPA_WIGIG_8_MSB(
  1054. input_smmu->pipe_smmu.status_ring_HWTAIL_pa)) {
  1055. IPAERR(
  1056. "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
  1057. , input_smmu->pipe_smmu.status_ring_HWHEAD_pa,
  1058. input_smmu->pipe_smmu.status_ring_HWTAIL_pa);
  1059. return -EFAULT;
  1060. }
  1061. wifi_ch = input_smmu->int_gen_tx_bit_num;
  1062. /* convert to kBytes */
  1063. aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
  1064. input_smmu->dbuff_smmu.data_buffer_size);
  1065. } else {
  1066. input = (struct ipa_wigig_conn_tx_in_params *)in;
  1067. IPADBG(
  1068. "desc_ring_base_pa %pa desc_ring_size %d status_ring_base_pa %pa status_ring_size %d",
  1069. &input->pipe.desc_ring_base_pa,
  1070. input->pipe.desc_ring_size,
  1071. &input->pipe.status_ring_base_pa,
  1072. input->pipe.status_ring_size);
  1073. IPADBG("data_buffer_size %d", input->dbuff.data_buffer_size);
  1074. if (ipa3_wigig_tx_bit_to_ep(input->int_gen_tx_bit_num,
  1075. &tx_client)) {
  1076. return -EINVAL;
  1077. }
  1078. if (input->dbuff.data_buffer_size >> 16) {
  1079. IPAERR(
  1080. "data_buffer_size is over the 16 bit limit (0x%X)\n"
  1081. , input->dbuff.data_buffer_size);
  1082. return -EFAULT;
  1083. }
  1084. if (IPA_WIGIG_8_MSB(
  1085. input->pipe.status_ring_HWHEAD_pa)
  1086. != IPA_WIGIG_8_MSB(
  1087. input->pipe.status_ring_HWTAIL_pa)) {
  1088. IPAERR(
  1089. "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
  1090. , input->pipe.status_ring_HWHEAD_pa,
  1091. input->pipe.status_ring_HWTAIL_pa);
  1092. return -EFAULT;
  1093. }
  1094. wifi_ch = input->int_gen_tx_bit_num;
  1095. /* convert to kBytes */
  1096. aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
  1097. input->dbuff.data_buffer_size);
  1098. }
  1099. IPADBG("client type is %d\n", tx_client);
  1100. ipa_ep_idx = ipa_get_ep_mapping(tx_client);
  1101. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1102. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1103. IPAERR("fail to get ep (%d) %d.\n",
  1104. tx_client, ipa_ep_idx);
  1105. return -EFAULT;
  1106. }
  1107. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1108. if (ep->valid) {
  1109. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  1110. return -EFAULT;
  1111. }
  1112. if (ep->gsi_offload_state) {
  1113. IPAERR("WIGIG channel bad state 0x%X\n",
  1114. ep->gsi_offload_state);
  1115. return -EFAULT;
  1116. }
  1117. ep_gsi = ipa3_get_gsi_ep_info(tx_client);
  1118. if (!ep_gsi) {
  1119. IPAERR("Failed getting GSI EP info for client=%d\n",
  1120. tx_client);
  1121. return -EFAULT;
  1122. }
  1123. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  1124. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1125. /* setup tx ep cfg */
  1126. ep->valid = 1;
  1127. ep->client = tx_client;
  1128. result = ipa3_disable_data_path(ipa_ep_idx);
  1129. if (result) {
  1130. IPAERR("disable data path failed res=%d clnt=%d.\n", result,
  1131. ipa_ep_idx);
  1132. goto fail;
  1133. }
  1134. ep->client_notify = tx_notify;
  1135. ep->priv = priv;
  1136. memset(&ep_cfg, 0, sizeof(ep_cfg));
  1137. ep_cfg.nat.nat_en = IPA_DST_NAT;
  1138. ep_cfg.hdr.hdr_len = ETH_HLEN;
  1139. ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
  1140. ep_cfg.hdr.hdr_ofst_pkt_size = 0;
  1141. ep_cfg.hdr.hdr_additional_const_len = 0;
  1142. ep_cfg.hdr_ext.hdr_little_endian = true;
  1143. ep_cfg.mode.mode = IPA_BASIC;
  1144. /* config hard byte limit, max is the buffer size (in kB)*/
  1145. ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
  1146. ep_cfg.aggr.aggr = IPA_GENERIC;
  1147. ep_cfg.aggr.aggr_pkt_limit = 1;
  1148. ep_cfg.aggr.aggr_byte_limit = aggr_byte_limit;
  1149. ep_cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
  1150. if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
  1151. IPAERR("fail to setup rx pipe cfg\n");
  1152. result = -EFAULT;
  1153. goto fail;
  1154. }
  1155. if (is_smmu_enabled) {
  1156. result = ipa3_wigig_smmu_map_channel(false,
  1157. &input_smmu->pipe_smmu,
  1158. &input_smmu->dbuff_smmu,
  1159. true);
  1160. if (result) {
  1161. IPAERR(
  1162. "failed to setup tx pipe smmu map client %d (ep %d)\n"
  1163. , tx_client, ipa_ep_idx);
  1164. result = -EFAULT;
  1165. goto fail;
  1166. }
  1167. pipe_info = &input_smmu->pipe_smmu;
  1168. buff = &input_smmu->dbuff_smmu;
  1169. desc_ring_HWHEAD_pa =
  1170. input_smmu->pipe_smmu.desc_ring_HWHEAD_pa;
  1171. } else {
  1172. pipe_info = &input->pipe;
  1173. buff = &input->dbuff;
  1174. desc_ring_HWHEAD_pa =
  1175. input->pipe.desc_ring_HWHEAD_pa;
  1176. }
  1177. result = ipa3_wigig_config_gsi(false,
  1178. is_smmu_enabled,
  1179. pipe_info,
  1180. buff,
  1181. ep_gsi, ep);
  1182. if (result)
  1183. goto fail_gsi;
  1184. result = ipa3_wigig_config_uc(
  1185. true, false, wifi_ch,
  1186. ep_gsi->ipa_gsi_chan_num,
  1187. desc_ring_HWHEAD_pa);
  1188. if (result)
  1189. goto fail_uc_config;
  1190. out->client = tx_client;
  1191. ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
  1192. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1193. IPADBG("wigig client %d (ep %d) connected successfully\n", tx_client,
  1194. ipa_ep_idx);
  1195. return 0;
  1196. fail_uc_config:
  1197. /* Release channel and evt*/
  1198. ipa3_release_gsi_channel(ipa_ep_idx);
  1199. fail_gsi:
  1200. if (input_smmu)
  1201. ipa3_wigig_smmu_map_channel(false, &input_smmu->pipe_smmu,
  1202. &input_smmu->dbuff_smmu, false);
  1203. fail:
  1204. ep->valid = 0;
  1205. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1206. return result;
  1207. }
  1208. int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
  1209. struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
  1210. void *dbuff)
  1211. {
  1212. bool is_smmu_enabled;
  1213. int ipa_ep_idx;
  1214. struct ipa3_ep_context *ep;
  1215. const struct ipa_gsi_ep_config *ep_gsi;
  1216. int result;
  1217. bool rx = false;
  1218. IPADBG("\n");
  1219. ipa_ep_idx = ipa_get_ep_mapping(client);
  1220. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1221. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1222. IPAERR("fail to get ep (%d) %d.\n",
  1223. client, ipa_ep_idx);
  1224. return -EFAULT;
  1225. }
  1226. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1227. if (!ep->valid) {
  1228. IPAERR("Invalid EP\n");
  1229. return -EFAULT;
  1230. }
  1231. ep_gsi = ipa3_get_gsi_ep_info(client);
  1232. if (!ep_gsi) {
  1233. IPAERR("Failed getting GSI EP info for client=%d\n",
  1234. client);
  1235. return -EFAULT;
  1236. }
  1237. if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
  1238. IPAERR("client in bad state(client %d) 0x%X\n",
  1239. client, ep->gsi_offload_state);
  1240. return -EFAULT;
  1241. }
  1242. if (client == IPA_CLIENT_WIGIG_PROD)
  1243. rx = true;
  1244. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1245. /* Release channel and evt*/
  1246. result = ipa3_release_gsi_channel(ipa_ep_idx);
  1247. if (result) {
  1248. IPAERR("failed to deallocate channel\n");
  1249. goto fail;
  1250. }
  1251. /* only gsi ch number and dir are necessary */
  1252. result = ipa3_wigig_config_uc(
  1253. false, rx, 0,
  1254. ep_gsi->ipa_gsi_chan_num, 0);
  1255. if (result) {
  1256. IPAERR("failed uC channel teardown %d\n", result);
  1257. WARN_ON(1);
  1258. }
  1259. is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
  1260. if (is_smmu_enabled) {
  1261. if (!pipe_smmu || !dbuff) {
  1262. IPAERR("smmu input is null %pK %pK\n",
  1263. pipe_smmu, dbuff);
  1264. WARN_ON(1);
  1265. } else {
  1266. result = ipa3_wigig_smmu_map_channel(rx,
  1267. pipe_smmu,
  1268. dbuff,
  1269. false);
  1270. if (result) {
  1271. IPAERR(
  1272. "failed to unmap pipe smmu %d (ep %d)\n"
  1273. , client, ipa_ep_idx);
  1274. result = -EFAULT;
  1275. goto fail;
  1276. }
  1277. }
  1278. if (rx) {
  1279. if (!list_empty(&smmu_reg_addr_list)) {
  1280. IPAERR("smmu_reg_addr_list not empty\n");
  1281. WARN_ON(1);
  1282. }
  1283. if (!list_empty(&smmu_ring_addr_list)) {
  1284. IPAERR("smmu_ring_addr_list not empty\n");
  1285. WARN_ON(1);
  1286. }
  1287. }
  1288. } else if (pipe_smmu || dbuff) {
  1289. IPAERR("smmu input is not null %pK %pK\n",
  1290. pipe_smmu, dbuff);
  1291. WARN_ON(1);
  1292. }
  1293. memset(ep, 0, sizeof(struct ipa3_ep_context));
  1294. ep->gsi_offload_state = 0;
  1295. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1296. IPADBG("client (ep: %d) disconnected\n", ipa_ep_idx);
  1297. IPADBG("exit\n");
  1298. return 0;
  1299. fail:
  1300. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1301. return result;
  1302. }
  1303. int ipa3_wigig_uc_msi_init(bool init,
  1304. phys_addr_t periph_baddr_pa,
  1305. phys_addr_t pseudo_cause_pa,
  1306. phys_addr_t int_gen_tx_pa,
  1307. phys_addr_t int_gen_rx_pa,
  1308. phys_addr_t dma_ep_misc_pa)
  1309. {
  1310. int result;
  1311. struct ipa_mem_buffer cmd;
  1312. enum ipa_cpu_2_hw_offload_commands command;
  1313. bool map = false;
  1314. IPADBG("params: %s, %pa, %pa, %pa, %pa, %pa\n",
  1315. init ? "init" : "deInit",
  1316. &periph_baddr_pa,
  1317. &pseudo_cause_pa,
  1318. &int_gen_tx_pa,
  1319. &int_gen_rx_pa,
  1320. &dma_ep_misc_pa);
  1321. /* first make sure registers are SMMU mapped if necessary*/
  1322. if ((!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC])) {
  1323. if (init)
  1324. map = true;
  1325. IPADBG("SMMU enabled, map %d\n", map);
  1326. result = ipa3_smmu_map_peer_reg(
  1327. rounddown(pseudo_cause_pa, PAGE_SIZE),
  1328. map,
  1329. IPA_SMMU_CB_UC);
  1330. if (result) {
  1331. IPAERR(
  1332. "failed to %s pseudo_cause reg %d\n",
  1333. map ? "map" : "unmap",
  1334. result);
  1335. goto fail;
  1336. }
  1337. result = ipa3_smmu_map_peer_reg(
  1338. rounddown(int_gen_tx_pa, PAGE_SIZE),
  1339. map,
  1340. IPA_SMMU_CB_UC);
  1341. if (result) {
  1342. IPAERR(
  1343. "failed to %s int_gen_tx reg %d\n",
  1344. map ? "map" : "unmap",
  1345. result);
  1346. goto fail_gen_tx;
  1347. }
  1348. result = ipa3_smmu_map_peer_reg(
  1349. rounddown(int_gen_rx_pa, PAGE_SIZE),
  1350. map,
  1351. IPA_SMMU_CB_UC);
  1352. if (result) {
  1353. IPAERR(
  1354. "failed to %s int_gen_rx reg %d\n",
  1355. map ? "map" : "unmap",
  1356. result);
  1357. goto fail_gen_rx;
  1358. }
  1359. result = ipa3_smmu_map_peer_reg(
  1360. rounddown(dma_ep_misc_pa, PAGE_SIZE),
  1361. map,
  1362. IPA_SMMU_CB_UC);
  1363. if (result) {
  1364. IPAERR(
  1365. "failed to %s dma_ep_misc reg %d\n",
  1366. map ? "map" : "unmap",
  1367. result);
  1368. goto fail_dma_ep_misc;
  1369. }
  1370. }
  1371. /* now send the wigig hw base address to uC*/
  1372. if (init) {
  1373. struct IpaHwPeripheralInitCmdData_t *cmd_data;
  1374. cmd.size = sizeof(*cmd_data);
  1375. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1376. &cmd.phys_base, GFP_KERNEL);
  1377. if (cmd.base == NULL) {
  1378. IPAERR("fail to get DMA memory.\n");
  1379. result = -ENOMEM;
  1380. if (map)
  1381. goto fail_alloc;
  1382. return result;
  1383. }
  1384. cmd_data = (struct IpaHwPeripheralInitCmdData_t *)cmd.base;
  1385. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  1386. cmd_data->Init_params.W11AdInit_params.periph_baddr_msb =
  1387. IPA_WIGIG_MSB(periph_baddr_pa);
  1388. cmd_data->Init_params.W11AdInit_params.periph_baddr_lsb =
  1389. IPA_WIGIG_LSB(periph_baddr_pa);
  1390. command = IPA_CPU_2_HW_CMD_PERIPHERAL_INIT;
  1391. } else {
  1392. struct IpaHwPeripheralDeinitCmdData_t *cmd_data;
  1393. cmd.size = sizeof(*cmd_data);
  1394. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1395. &cmd.phys_base, GFP_KERNEL);
  1396. if (cmd.base == NULL) {
  1397. IPAERR("fail to get DMA memory.\n");
  1398. result = -ENOMEM;
  1399. if (map)
  1400. goto fail_alloc;
  1401. return result;
  1402. }
  1403. cmd_data = (struct IpaHwPeripheralDeinitCmdData_t *)cmd.base;
  1404. cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
  1405. command = IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT;
  1406. }
  1407. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1408. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  1409. command,
  1410. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  1411. false, 10 * HZ);
  1412. if (result) {
  1413. IPAERR("fail to %s uc MSI config\n", init ? "init" : "deinit");
  1414. goto fail_command;
  1415. }
  1416. dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
  1417. cmd.base, cmd.phys_base);
  1418. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1419. IPADBG("exit\n");
  1420. return 0;
  1421. fail_command:
  1422. dma_free_coherent(ipa3_ctx->uc_pdev,
  1423. cmd.size,
  1424. cmd.base, cmd.phys_base);
  1425. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1426. fail_alloc:
  1427. ipa3_smmu_map_peer_reg(
  1428. rounddown(dma_ep_misc_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1429. fail_dma_ep_misc:
  1430. ipa3_smmu_map_peer_reg(
  1431. rounddown(int_gen_rx_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1432. fail_gen_rx:
  1433. ipa3_smmu_map_peer_reg(
  1434. rounddown(int_gen_tx_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1435. fail_gen_tx:
  1436. ipa3_smmu_map_peer_reg(
  1437. rounddown(pseudo_cause_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
  1438. fail:
  1439. return result;
  1440. }
  1441. int ipa3_enable_wigig_pipe_i(enum ipa_client_type client)
  1442. {
  1443. int ipa_ep_idx, res;
  1444. struct ipa3_ep_context *ep;
  1445. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1446. int retry_cnt = 0;
  1447. uint64_t val;
  1448. IPADBG("\n");
  1449. ipa_ep_idx = ipa_get_ep_mapping(client);
  1450. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1451. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1452. IPAERR("fail to get ep (%d) %d.\n",
  1453. client, ipa_ep_idx);
  1454. return -EFAULT;
  1455. }
  1456. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1457. if (!ep->valid) {
  1458. IPAERR("Invalid EP\n");
  1459. return -EFAULT;
  1460. }
  1461. if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
  1462. IPAERR("WIGIG channel bad state 0x%X\n",
  1463. ep->gsi_offload_state);
  1464. return -EFAULT;
  1465. }
  1466. IPA_ACTIVE_CLIENTS_INC_EP(client);
  1467. res = ipa3_enable_data_path(ipa_ep_idx);
  1468. if (res)
  1469. goto fail_enable_datapath;
  1470. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1471. ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
  1472. /* ring the event db (outside the ring boundary)*/
  1473. val = ep->gsi_mem_info.evt_ring_base_addr +
  1474. ep->gsi_mem_info.evt_ring_len;
  1475. res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl, val);
  1476. if (res) {
  1477. IPAERR(
  1478. "fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n"
  1479. , res, ep->gsi_evt_ring_hdl,
  1480. (unsigned long long)val);
  1481. res = -EFAULT;
  1482. goto fail_ring_evt;
  1483. }
  1484. IPADBG("start channel\n");
  1485. res = gsi_start_channel(ep->gsi_chan_hdl);
  1486. if (res != GSI_STATUS_SUCCESS) {
  1487. IPAERR("gsi_start_channel failed %d\n", res);
  1488. WARN_ON(1);
  1489. res = -EFAULT;
  1490. goto fail_gsi_start;
  1491. }
  1492. /* for TX we have to ring the channel db (last desc in the ring) */
  1493. if (client != IPA_CLIENT_WIGIG_PROD) {
  1494. uint64_t val;
  1495. val = ep->gsi_mem_info.chan_ring_base_addr +
  1496. ep->gsi_mem_info.chan_ring_len -
  1497. IPA_WIGIG_DESC_RING_EL_SIZE;
  1498. IPADBG("ring ch doorbell (0x%llX) TX %ld\n", val,
  1499. ep->gsi_chan_hdl);
  1500. res = gsi_ring_ch_ring_db(ep->gsi_chan_hdl, val);
  1501. if (res) {
  1502. IPAERR(
  1503. "fail to ring channel db %d. hdl=%lu wp=0x%llx\n"
  1504. , res, ep->gsi_chan_hdl,
  1505. (unsigned long long)val);
  1506. res = -EFAULT;
  1507. goto fail_ring_ch;
  1508. }
  1509. }
  1510. ep->gsi_offload_state |= IPA_WIGIG_ENABLED;
  1511. IPADBG("exit\n");
  1512. return 0;
  1513. fail_ring_ch:
  1514. res = ipa3_stop_gsi_channel(ipa_ep_idx);
  1515. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1516. res != -GSI_STATUS_TIMED_OUT) {
  1517. IPAERR("failed to stop channel res = %d\n", res);
  1518. } else if (res == -GSI_STATUS_AGAIN) {
  1519. IPADBG("GSI stop channel failed retry cnt = %d\n",
  1520. retry_cnt);
  1521. retry_cnt++;
  1522. if (retry_cnt < GSI_STOP_MAX_RETRY_CNT)
  1523. goto fail_ring_ch;
  1524. } else {
  1525. IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
  1526. }
  1527. res = -EFAULT;
  1528. fail_gsi_start:
  1529. fail_ring_evt:
  1530. ipa3_disable_data_path(ipa_ep_idx);
  1531. fail_enable_datapath:
  1532. IPA_ACTIVE_CLIENTS_DEC_EP(client);
  1533. return res;
  1534. }
  1535. int ipa3_disable_wigig_pipe_i(enum ipa_client_type client)
  1536. {
  1537. int ipa_ep_idx, res;
  1538. struct ipa3_ep_context *ep;
  1539. struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
  1540. struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
  1541. bool disable_force_clear = false;
  1542. u32 source_pipe_bitmask = 0;
  1543. int retry_cnt = 0;
  1544. IPADBG("\n");
  1545. ipa_ep_idx = ipa_get_ep_mapping(client);
  1546. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
  1547. ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1548. IPAERR("fail to get ep (%d) %d.\n",
  1549. client, ipa_ep_idx);
  1550. return -EFAULT;
  1551. }
  1552. if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
  1553. IPAERR("ep %d out of range.\n", ipa_ep_idx);
  1554. return -EFAULT;
  1555. }
  1556. ep = &ipa3_ctx->ep[ipa_ep_idx];
  1557. if (!ep->valid) {
  1558. IPAERR("Invalid EP\n");
  1559. return -EFAULT;
  1560. }
  1561. if (ep->gsi_offload_state !=
  1562. (IPA_WIGIG_CONNECTED | IPA_WIGIG_ENABLED)) {
  1563. IPAERR("WIGIG channel bad state 0x%X\n",
  1564. ep->gsi_offload_state);
  1565. return -EFAULT;
  1566. }
  1567. IPADBG("pipe %d\n", ipa_ep_idx);
  1568. source_pipe_bitmask = 1 << ipa_ep_idx;
  1569. res = ipa3_enable_force_clear(ipa_ep_idx,
  1570. false, source_pipe_bitmask);
  1571. if (res) {
  1572. /*
  1573. * assuming here modem SSR, AP can remove
  1574. * the delay in this case
  1575. */
  1576. IPAERR("failed to force clear %d\n", res);
  1577. IPAERR("remove delay from SCND reg\n");
  1578. ep_ctrl_scnd.endp_delay = false;
  1579. ipahal_write_reg_n_fields(
  1580. IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx,
  1581. &ep_ctrl_scnd);
  1582. } else {
  1583. disable_force_clear = true;
  1584. }
  1585. retry_gsi_stop:
  1586. res = ipa3_stop_gsi_channel(ipa_ep_idx);
  1587. if (res != 0 && res != -GSI_STATUS_AGAIN &&
  1588. res != -GSI_STATUS_TIMED_OUT) {
  1589. IPAERR("failed to stop channel res = %d\n", res);
  1590. goto fail_stop_channel;
  1591. } else if (res == -GSI_STATUS_AGAIN) {
  1592. IPADBG("GSI stop channel failed retry cnt = %d\n",
  1593. retry_cnt);
  1594. retry_cnt++;
  1595. if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT)
  1596. goto fail_stop_channel;
  1597. goto retry_gsi_stop;
  1598. } else {
  1599. IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
  1600. }
  1601. res = ipa3_reset_gsi_channel(ipa_ep_idx);
  1602. if (res != GSI_STATUS_SUCCESS) {
  1603. IPAERR("Failed to reset chan: %d.\n", res);
  1604. goto fail_stop_channel;
  1605. }
  1606. if (disable_force_clear)
  1607. ipa3_disable_force_clear(ipa_ep_idx);
  1608. res = ipa3_disable_data_path(ipa_ep_idx);
  1609. if (res) {
  1610. WARN_ON(1);
  1611. return res;
  1612. }
  1613. /* Set the delay after disabling IPA Producer pipe */
  1614. if (IPA_CLIENT_IS_PROD(ep->client)) {
  1615. memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  1616. ep_cfg_ctrl.ipa_ep_delay = true;
  1617. ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
  1618. }
  1619. ep->gsi_offload_state &= ~IPA_WIGIG_ENABLED;
  1620. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx));
  1621. IPADBG("exit\n");
  1622. return 0;
  1623. fail_stop_channel:
  1624. ipa_assert();
  1625. return res;
  1626. }
  1627. #ifndef CONFIG_DEBUG_FS
  1628. int ipa3_wigig_init_debugfs_i(struct dentry *parent) { return 0; }
  1629. #else
  1630. int ipa3_wigig_init_debugfs_i(struct dentry *parent)
  1631. {
  1632. const mode_t read_write_mode = 0664;
  1633. struct dentry *file = NULL;
  1634. struct dentry *dent;
  1635. dent = debugfs_create_dir("ipa_wigig", parent);
  1636. if (IS_ERR_OR_NULL(dent)) {
  1637. IPAERR("fail to create folder in debug_fs\n");
  1638. return -EFAULT;
  1639. }
  1640. wigig_dent = dent;
  1641. file = debugfs_create_u8("modc", read_write_mode, dent,
  1642. &int_modc);
  1643. if (IS_ERR_OR_NULL(file)) {
  1644. IPAERR("fail to create file modc\n");
  1645. goto fail;
  1646. }
  1647. file = debugfs_create_u16("modt", read_write_mode, dent,
  1648. &int_modt);
  1649. if (IS_ERR_OR_NULL(file)) {
  1650. IPAERR("fail to create file modt\n");
  1651. goto fail;
  1652. }
  1653. file = debugfs_create_u8("rx_mod_th", read_write_mode, dent,
  1654. &rx_hwtail_mod_threshold);
  1655. if (IS_ERR_OR_NULL(file)) {
  1656. IPAERR("fail to create file rx_mod_th\n");
  1657. goto fail;
  1658. }
  1659. file = debugfs_create_u8("tx_mod_th", read_write_mode, dent,
  1660. &tx_hwtail_mod_threshold);
  1661. if (IS_ERR_OR_NULL(file)) {
  1662. IPAERR("fail to create file tx_mod_th\n");
  1663. goto fail;
  1664. }
  1665. return 0;
  1666. fail:
  1667. debugfs_remove_recursive(dent);
  1668. wigig_dent = NULL;
  1669. return -EFAULT;
  1670. }
  1671. #endif