dp_ipa.c 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877
  1. /*
  2. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #ifdef IPA_OFFLOAD
  17. #include <qdf_ipa_wdi3.h>
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_hw_headers.h>
  21. #include <hal_api.h>
  22. #include <hif.h>
  23. #include <htt.h>
  24. #include <wdi_event.h>
  25. #include <queue.h>
  26. #include "dp_types.h"
  27. #include "dp_htt.h"
  28. #include "dp_tx.h"
  29. #include "dp_rx.h"
  30. #include "dp_ipa.h"
  31. /* Ring index for WBM2SW2 release ring */
  32. #define IPA_TX_COMP_RING_IDX HAL_IPA_TX_COMP_RING_IDX
  33. /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
  34. #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048)
  35. /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
  36. * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
  37. * This causes back pressure, resulting in a FW crash.
  38. * By leaving some entries with no buffer attached, WBM will be able to write
  39. * to the ring, and from dumps we can figure out the buffer which is causing
  40. * this issue.
  41. */
  42. #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
  43. /**
  44. *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
  45. * @ix0_reg: reo destination ring IX0 value
  46. * @ix2_reg: reo destination ring IX2 value
  47. * @ix3_reg: reo destination ring IX3 value
  48. */
  49. struct dp_ipa_reo_remap_record {
  50. uint64_t timestamp;
  51. uint32_t ix0_reg;
  52. uint32_t ix2_reg;
  53. uint32_t ix3_reg;
  54. };
  55. #define REO_REMAP_HISTORY_SIZE 32
  56. struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
  57. static qdf_atomic_t dp_ipa_reo_remap_history_index;
  58. static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
  59. {
  60. int next = qdf_atomic_inc_return(index);
  61. if (next == REO_REMAP_HISTORY_SIZE)
  62. qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
  63. return next % REO_REMAP_HISTORY_SIZE;
  64. }
  65. /**
  66. * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
  67. * @ix0_val: reo destination ring IX0 value
  68. * @ix2_val: reo destination ring IX2 value
  69. * @ix3_val: reo destination ring IX3 value
  70. *
  71. * Return: None
  72. */
  73. static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
  74. uint32_t ix3_val)
  75. {
  76. int idx = dp_ipa_reo_remap_record_index_next(
  77. &dp_ipa_reo_remap_history_index);
  78. struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
  79. record->timestamp = qdf_get_log_timestamp();
  80. record->ix0_reg = ix0_val;
  81. record->ix2_reg = ix2_val;
  82. record->ix3_reg = ix3_val;
  83. }
  84. static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
  85. qdf_nbuf_t nbuf,
  86. uint32_t size,
  87. bool create)
  88. {
  89. qdf_mem_info_t mem_map_table = {0};
  90. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  91. qdf_update_mem_map_table(soc->osdev, &mem_map_table,
  92. qdf_nbuf_get_frag_paddr(nbuf, 0),
  93. size);
  94. if (create) {
  95. /* Assert if PA is zero */
  96. qdf_assert_always(mem_map_table.pa);
  97. ret = qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
  98. } else {
  99. ret = qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
  100. }
  101. qdf_assert_always(!ret);
  102. /* Return status of mapping/unmapping is stored in
  103. * mem_map_table.result field, assert if the result
  104. * is failure
  105. */
  106. if (create)
  107. qdf_assert_always(!mem_map_table.result);
  108. else
  109. qdf_assert_always(mem_map_table.result >= mem_map_table.size);
  110. return ret;
  111. }
  112. QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
  113. qdf_nbuf_t nbuf,
  114. uint32_t size,
  115. bool create)
  116. {
  117. struct dp_pdev *pdev;
  118. int i;
  119. for (i = 0; i < soc->pdev_count; i++) {
  120. pdev = soc->pdev_list[i];
  121. if (pdev && pdev->monitor_configured)
  122. return QDF_STATUS_SUCCESS;
  123. }
  124. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
  125. !qdf_mem_smmu_s1_enabled(soc->osdev))
  126. return QDF_STATUS_SUCCESS;
  127. /**
  128. * Even if ipa pipes is disabled, but if it's unmap
  129. * operation and nbuf has done ipa smmu map before,
  130. * do ipa smmu unmap as well.
  131. */
  132. if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) {
  133. if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
  134. DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
  135. } else {
  136. return QDF_STATUS_SUCCESS;
  137. }
  138. }
  139. if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
  140. if (create) {
  141. DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
  142. } else {
  143. DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
  144. }
  145. return QDF_STATUS_E_INVAL;
  146. }
  147. qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
  148. return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create);
  149. }
  150. static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
  151. struct dp_soc *soc,
  152. struct dp_pdev *pdev,
  153. bool create)
  154. {
  155. uint32_t index;
  156. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  157. uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  158. qdf_nbuf_t nbuf;
  159. uint32_t buf_len;
  160. if (!ipa_is_ready()) {
  161. dp_info("IPA is not READY");
  162. return 0;
  163. }
  164. for (index = 0; index < tx_buffer_cnt; index++) {
  165. nbuf = (qdf_nbuf_t)
  166. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index];
  167. if (!nbuf)
  168. continue;
  169. buf_len = qdf_nbuf_get_data_len(nbuf);
  170. return __dp_ipa_handle_buf_smmu_mapping(
  171. soc, nbuf, buf_len, create);
  172. }
  173. return ret;
  174. }
  175. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  176. static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
  177. struct dp_pdev *pdev,
  178. bool create)
  179. {
  180. struct rx_desc_pool *rx_pool;
  181. uint8_t pdev_id;
  182. uint32_t num_desc, page_id, offset, i;
  183. uint16_t num_desc_per_page;
  184. union dp_rx_desc_list_elem_t *rx_desc_elem;
  185. struct dp_rx_desc *rx_desc;
  186. qdf_nbuf_t nbuf;
  187. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  188. if (!qdf_ipa_is_ready())
  189. return ret;
  190. if (!qdf_mem_smmu_s1_enabled(soc->osdev))
  191. return ret;
  192. pdev_id = pdev->pdev_id;
  193. rx_pool = &soc->rx_desc_buf[pdev_id];
  194. qdf_spin_lock_bh(&rx_pool->lock);
  195. num_desc = rx_pool->pool_size;
  196. num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
  197. for (i = 0; i < num_desc; i++) {
  198. page_id = i / num_desc_per_page;
  199. offset = i % num_desc_per_page;
  200. if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
  201. break;
  202. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
  203. rx_desc = &rx_desc_elem->rx_desc;
  204. if ((!(rx_desc->in_use)) || rx_desc->unmapped)
  205. continue;
  206. nbuf = rx_desc->nbuf;
  207. if (qdf_unlikely(create ==
  208. qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
  209. if (create) {
  210. DP_STATS_INC(soc,
  211. rx.err.ipa_smmu_map_dup, 1);
  212. } else {
  213. DP_STATS_INC(soc,
  214. rx.err.ipa_smmu_unmap_dup, 1);
  215. }
  216. continue;
  217. }
  218. qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
  219. ret = __dp_ipa_handle_buf_smmu_mapping(
  220. soc, nbuf, rx_pool->buf_size, create);
  221. }
  222. qdf_spin_unlock_bh(&rx_pool->lock);
  223. return ret;
  224. }
  225. #else
  226. static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
  227. struct dp_pdev *pdev,
  228. bool create)
  229. {
  230. struct rx_desc_pool *rx_pool;
  231. uint8_t pdev_id;
  232. qdf_nbuf_t nbuf;
  233. int i;
  234. if (!qdf_ipa_is_ready())
  235. return QDF_STATUS_SUCCESS;
  236. if (!qdf_mem_smmu_s1_enabled(soc->osdev))
  237. return QDF_STATUS_SUCCESS;
  238. pdev_id = pdev->pdev_id;
  239. rx_pool = &soc->rx_desc_buf[pdev_id];
  240. qdf_spin_lock_bh(&rx_pool->lock);
  241. for (i = 0; i < rx_pool->pool_size; i++) {
  242. if ((!(rx_pool->array[i].rx_desc.in_use)) ||
  243. rx_pool->array[i].rx_desc.unmapped)
  244. continue;
  245. nbuf = rx_pool->array[i].rx_desc.nbuf;
  246. if (qdf_unlikely(create ==
  247. qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
  248. if (create) {
  249. DP_STATS_INC(soc,
  250. rx.err.ipa_smmu_map_dup, 1);
  251. } else {
  252. DP_STATS_INC(soc,
  253. rx.err.ipa_smmu_unmap_dup, 1);
  254. }
  255. continue;
  256. }
  257. qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
  258. __dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
  259. rx_pool->buf_size, create);
  260. }
  261. qdf_spin_unlock_bh(&rx_pool->lock);
  262. return QDF_STATUS_SUCCESS;
  263. }
  264. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  265. static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
  266. qdf_shared_mem_t *shared_mem,
  267. void *cpu_addr,
  268. qdf_dma_addr_t dma_addr,
  269. uint32_t size)
  270. {
  271. qdf_dma_addr_t paddr;
  272. int ret;
  273. shared_mem->vaddr = cpu_addr;
  274. qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
  275. *qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
  276. paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
  277. qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
  278. ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
  279. shared_mem->vaddr, dma_addr, size);
  280. if (ret) {
  281. dp_err("Unable to get DMA sgtable");
  282. return QDF_STATUS_E_NOMEM;
  283. }
  284. qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
  285. return QDF_STATUS_SUCCESS;
  286. }
  287. #ifdef IPA_WDI3_TX_TWO_PIPES
  288. static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  289. {
  290. struct dp_ipa_resources *ipa_res;
  291. qdf_nbuf_t nbuf;
  292. int idx;
  293. for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) {
  294. nbuf = (qdf_nbuf_t)
  295. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx];
  296. if (!nbuf)
  297. continue;
  298. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
  299. qdf_mem_dp_tx_skb_cnt_dec();
  300. qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
  301. qdf_nbuf_free(nbuf);
  302. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] =
  303. (void *)NULL;
  304. }
  305. qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
  306. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
  307. ipa_res = &pdev->ipa_resource;
  308. if (!ipa_res->is_db_ddr_mapped)
  309. iounmap(ipa_res->tx_alt_comp_doorbell_vaddr);
  310. qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable);
  311. qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable);
  312. }
  313. static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
  314. {
  315. uint32_t tx_buffer_count;
  316. uint32_t ring_base_align = 8;
  317. qdf_dma_addr_t buffer_paddr;
  318. struct hal_srng *wbm_srng = (struct hal_srng *)
  319. soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  320. struct hal_srng_params srng_params;
  321. uint32_t paddr_lo;
  322. uint32_t paddr_hi;
  323. void *ring_entry;
  324. int num_entries;
  325. qdf_nbuf_t nbuf;
  326. int retval = QDF_STATUS_SUCCESS;
  327. int max_alloc_count = 0;
  328. /*
  329. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  330. * unsigned int uc_tx_buf_sz =
  331. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  332. */
  333. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  334. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  335. hal_get_srng_params(soc->hal_soc,
  336. hal_srng_to_hal_ring_handle(wbm_srng),
  337. &srng_params);
  338. num_entries = srng_params.num_entries;
  339. max_alloc_count =
  340. num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
  341. if (max_alloc_count <= 0) {
  342. dp_err("incorrect value for buffer count %u", max_alloc_count);
  343. return -EINVAL;
  344. }
  345. dp_info("requested %d buffers to be posted to wbm ring",
  346. max_alloc_count);
  347. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned =
  348. qdf_mem_malloc(num_entries *
  349. sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned));
  350. if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) {
  351. dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
  352. return -ENOMEM;
  353. }
  354. hal_srng_access_start_unlocked(soc->hal_soc,
  355. hal_srng_to_hal_ring_handle(wbm_srng));
  356. /*
  357. * Allocate Tx buffers as many as possible.
  358. * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
  359. * Populate Tx buffers into WBM2IPA ring
  360. * This initial buffer population will simulate H/W as source ring,
  361. * and update HP
  362. */
  363. for (tx_buffer_count = 0;
  364. tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
  365. nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
  366. if (!nbuf)
  367. break;
  368. ring_entry = hal_srng_dst_get_next_hp(
  369. soc->hal_soc,
  370. hal_srng_to_hal_ring_handle(wbm_srng));
  371. if (!ring_entry) {
  372. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  373. "%s: Failed to get WBM ring entry",
  374. __func__);
  375. qdf_nbuf_free(nbuf);
  376. break;
  377. }
  378. qdf_nbuf_map_single(soc->osdev, nbuf,
  379. QDF_DMA_BIDIRECTIONAL);
  380. buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  381. qdf_mem_dp_tx_skb_cnt_inc();
  382. qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
  383. paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
  384. paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
  385. HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
  386. HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
  387. HAL_RXDMA_MANAGER_SET(ring_entry, HAL_WBM_SW4_BM_ID);
  388. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[
  389. tx_buffer_count] = (void *)nbuf;
  390. }
  391. hal_srng_access_end_unlocked(soc->hal_soc,
  392. hal_srng_to_hal_ring_handle(wbm_srng));
  393. soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count;
  394. if (tx_buffer_count) {
  395. dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count);
  396. } else {
  397. dp_err("Failed to allocate IPA TX buffer pool2");
  398. qdf_mem_free(
  399. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
  400. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
  401. retval = -ENOMEM;
  402. }
  403. return retval;
  404. }
  405. static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
  406. {
  407. struct dp_soc *soc = pdev->soc;
  408. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  409. ipa_res->tx_alt_ring_num_alloc_buffer =
  410. (uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt;
  411. dp_ipa_get_shared_mem_info(
  412. soc->osdev, &ipa_res->tx_alt_ring,
  413. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
  414. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
  415. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
  416. dp_ipa_get_shared_mem_info(
  417. soc->osdev, &ipa_res->tx_alt_comp_ring,
  418. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
  419. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
  420. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
  421. if (!qdf_mem_get_dma_addr(soc->osdev,
  422. &ipa_res->tx_alt_comp_ring.mem_info))
  423. return QDF_STATUS_E_FAILURE;
  424. return QDF_STATUS_SUCCESS;
  425. }
  426. static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
  427. {
  428. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  429. struct hal_srng *hal_srng;
  430. struct hal_srng_params srng_params;
  431. unsigned long addr_offset, dev_base_paddr;
  432. /* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */
  433. hal_srng = (struct hal_srng *)
  434. soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng;
  435. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  436. hal_srng_to_hal_ring_handle(hal_srng),
  437. &srng_params);
  438. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr =
  439. srng_params.ring_base_paddr;
  440. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr =
  441. srng_params.ring_base_vaddr;
  442. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size =
  443. (srng_params.num_entries * srng_params.entry_size) << 2;
  444. /*
  445. * For the register backed memory addresses, use the scn->mem_pa to
  446. * calculate the physical address of the shadow registers
  447. */
  448. dev_base_paddr =
  449. (unsigned long)
  450. ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
  451. addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
  452. (unsigned long)(hal_soc->dev_base_addr);
  453. soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr =
  454. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  455. dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
  456. (unsigned int)addr_offset,
  457. (unsigned int)dev_base_paddr,
  458. (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr),
  459. (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
  460. (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
  461. srng_params.num_entries,
  462. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
  463. /* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */
  464. hal_srng = (struct hal_srng *)
  465. soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  466. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  467. hal_srng_to_hal_ring_handle(hal_srng),
  468. &srng_params);
  469. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr =
  470. srng_params.ring_base_paddr;
  471. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr =
  472. srng_params.ring_base_vaddr;
  473. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size =
  474. (srng_params.num_entries * srng_params.entry_size) << 2;
  475. soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr =
  476. hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
  477. hal_srng_to_hal_ring_handle(hal_srng));
  478. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  479. (unsigned long)(hal_soc->dev_base_addr);
  480. soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr =
  481. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  482. dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
  483. (unsigned int)addr_offset,
  484. (unsigned int)dev_base_paddr,
  485. (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr),
  486. (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
  487. (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
  488. srng_params.num_entries,
  489. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
  490. }
  491. static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
  492. {
  493. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  494. uint32_t rx_ready_doorbell_dmaaddr;
  495. uint32_t tx_comp_doorbell_dmaaddr;
  496. struct dp_soc *soc = pdev->soc;
  497. int ret = 0;
  498. if (ipa_res->is_db_ddr_mapped)
  499. ipa_res->tx_comp_doorbell_vaddr =
  500. phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
  501. else
  502. ipa_res->tx_comp_doorbell_vaddr =
  503. ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
  504. if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
  505. ret = pld_smmu_map(soc->osdev->dev,
  506. ipa_res->tx_comp_doorbell_paddr,
  507. &tx_comp_doorbell_dmaaddr,
  508. sizeof(uint32_t));
  509. ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
  510. qdf_assert_always(!ret);
  511. ret = pld_smmu_map(soc->osdev->dev,
  512. ipa_res->rx_ready_doorbell_paddr,
  513. &rx_ready_doorbell_dmaaddr,
  514. sizeof(uint32_t));
  515. ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
  516. qdf_assert_always(!ret);
  517. }
  518. /* Setup for alternative TX pipe */
  519. if (!ipa_res->tx_alt_comp_doorbell_paddr)
  520. return;
  521. if (ipa_res->is_db_ddr_mapped)
  522. ipa_res->tx_alt_comp_doorbell_vaddr =
  523. phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr);
  524. else
  525. ipa_res->tx_alt_comp_doorbell_vaddr =
  526. ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4);
  527. if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
  528. ret = pld_smmu_map(soc->osdev->dev,
  529. ipa_res->tx_alt_comp_doorbell_paddr,
  530. &tx_comp_doorbell_dmaaddr,
  531. sizeof(uint32_t));
  532. ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
  533. qdf_assert_always(!ret);
  534. }
  535. }
  536. static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
  537. {
  538. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  539. struct dp_soc *soc = pdev->soc;
  540. int ret = 0;
  541. if (!qdf_mem_smmu_s1_enabled(soc->osdev))
  542. return;
  543. /* Unmap must be in reverse order of map */
  544. if (ipa_res->tx_alt_comp_doorbell_paddr) {
  545. ret = pld_smmu_unmap(soc->osdev->dev,
  546. ipa_res->tx_alt_comp_doorbell_paddr,
  547. sizeof(uint32_t));
  548. qdf_assert_always(!ret);
  549. }
  550. ret = pld_smmu_unmap(soc->osdev->dev,
  551. ipa_res->rx_ready_doorbell_paddr,
  552. sizeof(uint32_t));
  553. qdf_assert_always(!ret);
  554. ret = pld_smmu_unmap(soc->osdev->dev,
  555. ipa_res->tx_comp_doorbell_paddr,
  556. sizeof(uint32_t));
  557. qdf_assert_always(!ret);
  558. }
  559. static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
  560. struct dp_pdev *pdev,
  561. bool create)
  562. {
  563. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  564. struct ipa_dp_tx_rsc *rsc;
  565. uint32_t tx_buffer_cnt;
  566. uint32_t buf_len;
  567. qdf_nbuf_t nbuf;
  568. uint32_t index;
  569. if (!ipa_is_ready()) {
  570. dp_info("IPA is not READY");
  571. return QDF_STATUS_SUCCESS;
  572. }
  573. rsc = &soc->ipa_uc_tx_rsc_alt;
  574. tx_buffer_cnt = rsc->alloc_tx_buf_cnt;
  575. for (index = 0; index < tx_buffer_cnt; index++) {
  576. nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index];
  577. if (!nbuf)
  578. continue;
  579. buf_len = qdf_nbuf_get_data_len(nbuf);
  580. ret = __dp_ipa_handle_buf_smmu_mapping(
  581. soc, nbuf, buf_len, create);
  582. qdf_assert_always(!ret);
  583. }
  584. return ret;
  585. }
  586. static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc,
  587. struct dp_ipa_resources *ipa_res,
  588. qdf_ipa_wdi_pipe_setup_info_t *tx)
  589. {
  590. struct tcl_data_cmd *tcl_desc_ptr;
  591. uint8_t *desc_addr;
  592. uint32_t desc_size;
  593. QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1;
  594. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
  595. qdf_mem_get_dma_addr(soc->osdev,
  596. &ipa_res->tx_alt_comp_ring.mem_info);
  597. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
  598. qdf_mem_get_dma_size(soc->osdev,
  599. &ipa_res->tx_alt_comp_ring.mem_info);
  600. /* WBM Tail Pointer Address */
  601. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
  602. soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
  603. QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
  604. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
  605. qdf_mem_get_dma_addr(soc->osdev,
  606. &ipa_res->tx_alt_ring.mem_info);
  607. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
  608. qdf_mem_get_dma_size(soc->osdev,
  609. &ipa_res->tx_alt_ring.mem_info);
  610. /* TCL Head Pointer Address */
  611. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
  612. soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
  613. QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
  614. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
  615. ipa_res->tx_alt_ring_num_alloc_buffer;
  616. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
  617. /* Preprogram TCL descriptor */
  618. desc_addr =
  619. (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
  620. desc_size = sizeof(struct tcl_data_cmd);
  621. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  622. tcl_desc_ptr = (struct tcl_data_cmd *)
  623. (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
  624. tcl_desc_ptr->buf_addr_info.return_buffer_manager = HAL_WBM_SW4_BM_ID;
  625. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  626. tcl_desc_ptr->addry_en = 1; /* Address X search enable in ASE */
  627. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  628. tcl_desc_ptr->packet_offset = 0; /* padding for alignment */
  629. }
  630. static void
  631. dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc,
  632. struct dp_ipa_resources *ipa_res,
  633. qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
  634. {
  635. struct tcl_data_cmd *tcl_desc_ptr;
  636. uint8_t *desc_addr;
  637. uint32_t desc_size;
  638. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1;
  639. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
  640. &ipa_res->tx_alt_comp_ring.sgtable,
  641. sizeof(sgtable_t));
  642. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
  643. qdf_mem_get_dma_size(soc->osdev,
  644. &ipa_res->tx_alt_comp_ring.mem_info);
  645. /* WBM Tail Pointer Address */
  646. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
  647. soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
  648. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
  649. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
  650. &ipa_res->tx_alt_ring.sgtable,
  651. sizeof(sgtable_t));
  652. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
  653. qdf_mem_get_dma_size(soc->osdev,
  654. &ipa_res->tx_alt_ring.mem_info);
  655. /* TCL Head Pointer Address */
  656. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
  657. soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
  658. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
  659. QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
  660. ipa_res->tx_alt_ring_num_alloc_buffer;
  661. QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
  662. /* Preprogram TCL descriptor */
  663. desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
  664. tx_smmu);
  665. desc_size = sizeof(struct tcl_data_cmd);
  666. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  667. tcl_desc_ptr = (struct tcl_data_cmd *)
  668. (QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
  669. tcl_desc_ptr->buf_addr_info.return_buffer_manager = HAL_WBM_SW4_BM_ID;
  670. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  671. tcl_desc_ptr->addry_en = 1; /* Address Y search enable in ASE */
  672. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  673. tcl_desc_ptr->packet_offset = 0; /* padding for alignment */
  674. }
  675. static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc,
  676. struct dp_ipa_resources *res,
  677. qdf_ipa_wdi_conn_in_params_t *in)
  678. {
  679. qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL;
  680. qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
  681. qdf_ipa_ep_cfg_t *tx_cfg;
  682. QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true;
  683. if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
  684. tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in);
  685. tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
  686. dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu);
  687. } else {
  688. tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in);
  689. tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx);
  690. dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx);
  691. }
  692. QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
  693. QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  694. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
  695. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
  696. QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
  697. QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
  698. QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
  699. }
  700. static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
  701. qdf_ipa_wdi_conn_out_params_t *out)
  702. {
  703. res->tx_comp_doorbell_paddr =
  704. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
  705. res->rx_ready_doorbell_paddr =
  706. QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
  707. res->tx_alt_comp_doorbell_paddr =
  708. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out);
  709. }
  710. static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
  711. uint8_t session_id)
  712. {
  713. bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT;
  714. session_id = session_id >> IPA_SESSION_ID_SHIFT;
  715. dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface);
  716. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
  717. QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface;
  718. }
  719. static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
  720. struct dp_ipa_resources *res)
  721. {
  722. struct hal_srng *wbm_srng;
  723. /* Init first TX comp ring */
  724. wbm_srng = (struct hal_srng *)
  725. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  726. hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
  727. res->tx_comp_doorbell_vaddr);
  728. /* Init the alternate TX comp ring */
  729. wbm_srng = (struct hal_srng *)
  730. soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  731. hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
  732. res->tx_alt_comp_doorbell_vaddr);
  733. }
  734. static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
  735. struct dp_ipa_resources *ipa_res)
  736. {
  737. struct hal_srng *wbm_srng;
  738. wbm_srng = (struct hal_srng *)
  739. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  740. hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
  741. ipa_res->tx_comp_doorbell_paddr);
  742. dp_info("paddr %pK vaddr %pK",
  743. (void *)ipa_res->tx_comp_doorbell_paddr,
  744. (void *)ipa_res->tx_comp_doorbell_vaddr);
  745. /* Setup for alternative TX comp ring */
  746. wbm_srng = (struct hal_srng *)
  747. soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  748. hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
  749. ipa_res->tx_alt_comp_doorbell_paddr);
  750. dp_info("paddr %pK vaddr %pK",
  751. (void *)ipa_res->tx_alt_comp_doorbell_paddr,
  752. (void *)ipa_res->tx_alt_comp_doorbell_vaddr);
  753. }
  754. #ifdef IPA_SET_RESET_TX_DB_PA
  755. static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
  756. struct dp_ipa_resources *ipa_res)
  757. {
  758. hal_ring_handle_t wbm_srng;
  759. qdf_dma_addr_t hp_addr;
  760. wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  761. if (!wbm_srng)
  762. return QDF_STATUS_E_FAILURE;
  763. hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
  764. hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
  765. dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
  766. /* Reset alternative TX comp ring */
  767. wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  768. if (!wbm_srng)
  769. return QDF_STATUS_E_FAILURE;
  770. hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr;
  771. hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
  772. dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
  773. return QDF_STATUS_SUCCESS;
  774. }
  775. #endif /* IPA_SET_RESET_TX_DB_PA */
  776. #else /* !IPA_WDI3_TX_TWO_PIPES */
  777. static inline
  778. void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  779. {
  780. }
  781. static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
  782. {
  783. }
  784. static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
  785. {
  786. return 0;
  787. }
  788. static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
  789. {
  790. return QDF_STATUS_SUCCESS;
  791. }
  792. static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
  793. {
  794. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  795. uint32_t rx_ready_doorbell_dmaaddr;
  796. uint32_t tx_comp_doorbell_dmaaddr;
  797. struct dp_soc *soc = pdev->soc;
  798. int ret = 0;
  799. if (ipa_res->is_db_ddr_mapped)
  800. ipa_res->tx_comp_doorbell_vaddr =
  801. phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
  802. else
  803. ipa_res->tx_comp_doorbell_vaddr =
  804. ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
  805. if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
  806. ret = pld_smmu_map(soc->osdev->dev,
  807. ipa_res->tx_comp_doorbell_paddr,
  808. &tx_comp_doorbell_dmaaddr,
  809. sizeof(uint32_t));
  810. ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
  811. qdf_assert_always(!ret);
  812. ret = pld_smmu_map(soc->osdev->dev,
  813. ipa_res->rx_ready_doorbell_paddr,
  814. &rx_ready_doorbell_dmaaddr,
  815. sizeof(uint32_t));
  816. ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
  817. qdf_assert_always(!ret);
  818. }
  819. }
  820. static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
  821. {
  822. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  823. struct dp_soc *soc = pdev->soc;
  824. int ret = 0;
  825. if (!qdf_mem_smmu_s1_enabled(soc->osdev))
  826. return;
  827. ret = pld_smmu_unmap(soc->osdev->dev,
  828. ipa_res->rx_ready_doorbell_paddr,
  829. sizeof(uint32_t));
  830. qdf_assert_always(!ret);
  831. ret = pld_smmu_unmap(soc->osdev->dev,
  832. ipa_res->tx_comp_doorbell_paddr,
  833. sizeof(uint32_t));
  834. qdf_assert_always(!ret);
  835. }
  836. static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
  837. struct dp_pdev *pdev,
  838. bool create)
  839. {
  840. return QDF_STATUS_SUCCESS;
  841. }
  842. static inline
  843. void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res,
  844. qdf_ipa_wdi_conn_in_params_t *in)
  845. {
  846. }
  847. static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
  848. qdf_ipa_wdi_conn_out_params_t *out)
  849. {
  850. res->tx_comp_doorbell_paddr =
  851. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
  852. res->rx_ready_doorbell_paddr =
  853. QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
  854. }
  855. static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
  856. uint8_t session_id)
  857. {
  858. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) = htonl(session_id << 16);
  859. }
  860. static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
  861. struct dp_ipa_resources *res)
  862. {
  863. struct hal_srng *wbm_srng = (struct hal_srng *)
  864. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  865. hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
  866. res->tx_comp_doorbell_vaddr);
  867. }
  868. static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
  869. struct dp_ipa_resources *ipa_res)
  870. {
  871. struct hal_srng *wbm_srng = (struct hal_srng *)
  872. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  873. hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
  874. ipa_res->tx_comp_doorbell_paddr);
  875. dp_info("paddr %pK vaddr %pK",
  876. (void *)ipa_res->tx_comp_doorbell_paddr,
  877. (void *)ipa_res->tx_comp_doorbell_vaddr);
  878. }
  879. #ifdef IPA_SET_RESET_TX_DB_PA
  880. static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
  881. struct dp_ipa_resources *ipa_res)
  882. {
  883. hal_ring_handle_t wbm_srng =
  884. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  885. qdf_dma_addr_t hp_addr;
  886. if (!wbm_srng)
  887. return QDF_STATUS_E_FAILURE;
  888. hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
  889. hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
  890. dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
  891. return QDF_STATUS_SUCCESS;
  892. }
  893. #endif /* IPA_SET_RESET_TX_DB_PA */
  894. #endif /* IPA_WDI3_TX_TWO_PIPES */
  895. /**
  896. * dp_tx_ipa_uc_detach - Free autonomy TX resources
  897. * @soc: data path instance
  898. * @pdev: core txrx pdev context
  899. *
  900. * Free allocated TX buffers with WBM SRNG
  901. *
  902. * Return: none
  903. */
  904. static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  905. {
  906. int idx;
  907. qdf_nbuf_t nbuf;
  908. struct dp_ipa_resources *ipa_res;
  909. for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
  910. nbuf = (qdf_nbuf_t)
  911. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
  912. if (!nbuf)
  913. continue;
  914. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
  915. qdf_mem_dp_tx_skb_cnt_dec();
  916. qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
  917. qdf_nbuf_free(nbuf);
  918. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
  919. (void *)NULL;
  920. }
  921. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  922. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
  923. ipa_res = &pdev->ipa_resource;
  924. if (!ipa_res->is_db_ddr_mapped)
  925. iounmap(ipa_res->tx_comp_doorbell_vaddr);
  926. qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
  927. qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
  928. }
  929. /**
  930. * dp_rx_ipa_uc_detach - free autonomy RX resources
  931. * @soc: data path instance
  932. * @pdev: core txrx pdev context
  933. *
  934. * This function will detach DP RX into main device context
  935. * will free DP Rx resources.
  936. *
  937. * Return: none
  938. */
  939. static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  940. {
  941. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  942. qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
  943. qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
  944. }
  945. int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  946. {
  947. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  948. return QDF_STATUS_SUCCESS;
  949. /* TX resource detach */
  950. dp_tx_ipa_uc_detach(soc, pdev);
  951. /* Cleanup 2nd TX pipe resources */
  952. dp_ipa_tx_alt_pool_detach(soc, pdev);
  953. /* RX resource detach */
  954. dp_rx_ipa_uc_detach(soc, pdev);
  955. return QDF_STATUS_SUCCESS; /* success */
  956. }
  957. /**
  958. * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
  959. * @soc: data path instance
  960. * @pdev: Physical device handle
  961. *
  962. * Allocate TX buffer from non-cacheable memory
  963. * Attache allocated TX buffers with WBM SRNG
  964. *
  965. * Return: int
  966. */
  967. static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  968. {
  969. uint32_t tx_buffer_count;
  970. uint32_t ring_base_align = 8;
  971. qdf_dma_addr_t buffer_paddr;
  972. struct hal_srng *wbm_srng = (struct hal_srng *)
  973. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  974. struct hal_srng_params srng_params;
  975. uint32_t paddr_lo;
  976. uint32_t paddr_hi;
  977. void *ring_entry;
  978. int num_entries;
  979. qdf_nbuf_t nbuf;
  980. int retval = QDF_STATUS_SUCCESS;
  981. int max_alloc_count = 0;
  982. /*
  983. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  984. * unsigned int uc_tx_buf_sz =
  985. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  986. */
  987. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  988. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  989. hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
  990. &srng_params);
  991. num_entries = srng_params.num_entries;
  992. max_alloc_count =
  993. num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
  994. if (max_alloc_count <= 0) {
  995. dp_err("incorrect value for buffer count %u", max_alloc_count);
  996. return -EINVAL;
  997. }
  998. dp_info("requested %d buffers to be posted to wbm ring",
  999. max_alloc_count);
  1000. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
  1001. qdf_mem_malloc(num_entries *
  1002. sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
  1003. if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
  1004. dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
  1005. return -ENOMEM;
  1006. }
  1007. hal_srng_access_start_unlocked(soc->hal_soc,
  1008. hal_srng_to_hal_ring_handle(wbm_srng));
  1009. /*
  1010. * Allocate Tx buffers as many as possible.
  1011. * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
  1012. * Populate Tx buffers into WBM2IPA ring
  1013. * This initial buffer population will simulate H/W as source ring,
  1014. * and update HP
  1015. */
  1016. for (tx_buffer_count = 0;
  1017. tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
  1018. nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
  1019. if (!nbuf)
  1020. break;
  1021. ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
  1022. hal_srng_to_hal_ring_handle(wbm_srng));
  1023. if (!ring_entry) {
  1024. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1025. "%s: Failed to get WBM ring entry",
  1026. __func__);
  1027. qdf_nbuf_free(nbuf);
  1028. break;
  1029. }
  1030. qdf_nbuf_map_single(soc->osdev, nbuf,
  1031. QDF_DMA_BIDIRECTIONAL);
  1032. buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1033. qdf_mem_dp_tx_skb_cnt_inc();
  1034. qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
  1035. paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
  1036. paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
  1037. HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
  1038. HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
  1039. HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
  1040. HAL_WBM_SW0_BM_ID));
  1041. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
  1042. = (void *)nbuf;
  1043. }
  1044. hal_srng_access_end_unlocked(soc->hal_soc,
  1045. hal_srng_to_hal_ring_handle(wbm_srng));
  1046. soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
  1047. if (tx_buffer_count) {
  1048. dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
  1049. } else {
  1050. dp_err("No IPA WDI TX buffer allocated!");
  1051. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  1052. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
  1053. retval = -ENOMEM;
  1054. }
  1055. return retval;
  1056. }
  1057. /**
  1058. * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
  1059. * @soc: data path instance
  1060. * @pdev: core txrx pdev context
  1061. *
  1062. * This function will attach a DP RX instance into the main
  1063. * device (SOC) context.
  1064. *
  1065. * Return: QDF_STATUS_SUCCESS: success
  1066. * QDF_STATUS_E_RESOURCES: Error return
  1067. */
  1068. static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  1069. {
  1070. return QDF_STATUS_SUCCESS;
  1071. }
  1072. int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  1073. {
  1074. int error;
  1075. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1076. return QDF_STATUS_SUCCESS;
  1077. /* TX resource attach */
  1078. error = dp_tx_ipa_uc_attach(soc, pdev);
  1079. if (error) {
  1080. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1081. "%s: DP IPA UC TX attach fail code %d",
  1082. __func__, error);
  1083. return error;
  1084. }
  1085. /* Setup 2nd TX pipe */
  1086. error = dp_ipa_tx_alt_pool_attach(soc);
  1087. if (error) {
  1088. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1089. "%s: DP IPA TX pool2 attach fail code %d",
  1090. __func__, error);
  1091. dp_tx_ipa_uc_detach(soc, pdev);
  1092. return error;
  1093. }
  1094. /* RX resource attach */
  1095. error = dp_rx_ipa_uc_attach(soc, pdev);
  1096. if (error) {
  1097. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1098. "%s: DP IPA UC RX attach fail code %d",
  1099. __func__, error);
  1100. dp_ipa_tx_alt_pool_detach(soc, pdev);
  1101. dp_tx_ipa_uc_detach(soc, pdev);
  1102. return error;
  1103. }
  1104. return QDF_STATUS_SUCCESS; /* success */
  1105. }
  1106. /*
  1107. * dp_ipa_ring_resource_setup() - setup IPA ring resources
  1108. * @soc: data path SoC handle
  1109. *
  1110. * Return: none
  1111. */
  1112. int dp_ipa_ring_resource_setup(struct dp_soc *soc,
  1113. struct dp_pdev *pdev)
  1114. {
  1115. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  1116. struct hal_srng *hal_srng;
  1117. struct hal_srng_params srng_params;
  1118. qdf_dma_addr_t hp_addr;
  1119. unsigned long addr_offset, dev_base_paddr;
  1120. uint32_t ix0;
  1121. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1122. return QDF_STATUS_SUCCESS;
  1123. /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
  1124. hal_srng = (struct hal_srng *)
  1125. soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
  1126. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  1127. hal_srng_to_hal_ring_handle(hal_srng),
  1128. &srng_params);
  1129. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
  1130. srng_params.ring_base_paddr;
  1131. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
  1132. srng_params.ring_base_vaddr;
  1133. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
  1134. (srng_params.num_entries * srng_params.entry_size) << 2;
  1135. /*
  1136. * For the register backed memory addresses, use the scn->mem_pa to
  1137. * calculate the physical address of the shadow registers
  1138. */
  1139. dev_base_paddr =
  1140. (unsigned long)
  1141. ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
  1142. addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
  1143. (unsigned long)(hal_soc->dev_base_addr);
  1144. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
  1145. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  1146. dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
  1147. (unsigned int)addr_offset,
  1148. (unsigned int)dev_base_paddr,
  1149. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
  1150. (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
  1151. (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
  1152. srng_params.num_entries,
  1153. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
  1154. /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
  1155. hal_srng = (struct hal_srng *)
  1156. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  1157. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  1158. hal_srng_to_hal_ring_handle(hal_srng),
  1159. &srng_params);
  1160. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
  1161. srng_params.ring_base_paddr;
  1162. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
  1163. srng_params.ring_base_vaddr;
  1164. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
  1165. (srng_params.num_entries * srng_params.entry_size) << 2;
  1166. soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr =
  1167. hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
  1168. hal_srng_to_hal_ring_handle(hal_srng));
  1169. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  1170. (unsigned long)(hal_soc->dev_base_addr);
  1171. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
  1172. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  1173. dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
  1174. (unsigned int)addr_offset,
  1175. (unsigned int)dev_base_paddr,
  1176. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
  1177. (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
  1178. (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
  1179. srng_params.num_entries,
  1180. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
  1181. dp_ipa_tx_alt_ring_resource_setup(soc);
  1182. /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
  1183. hal_srng = (struct hal_srng *)
  1184. soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  1185. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  1186. hal_srng_to_hal_ring_handle(hal_srng),
  1187. &srng_params);
  1188. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
  1189. srng_params.ring_base_paddr;
  1190. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
  1191. srng_params.ring_base_vaddr;
  1192. soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
  1193. (srng_params.num_entries * srng_params.entry_size) << 2;
  1194. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  1195. (unsigned long)(hal_soc->dev_base_addr);
  1196. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
  1197. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  1198. dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
  1199. (unsigned int)addr_offset,
  1200. (unsigned int)dev_base_paddr,
  1201. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
  1202. (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
  1203. (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
  1204. srng_params.num_entries,
  1205. soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
  1206. hal_srng = (struct hal_srng *)
  1207. pdev->rx_refill_buf_ring2.hal_srng;
  1208. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  1209. hal_srng_to_hal_ring_handle(hal_srng),
  1210. &srng_params);
  1211. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
  1212. srng_params.ring_base_paddr;
  1213. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
  1214. srng_params.ring_base_vaddr;
  1215. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
  1216. (srng_params.num_entries * srng_params.entry_size) << 2;
  1217. hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
  1218. hal_srng_to_hal_ring_handle(hal_srng));
  1219. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
  1220. qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
  1221. dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
  1222. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
  1223. (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
  1224. (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
  1225. srng_params.num_entries,
  1226. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
  1227. /*
  1228. * Set DEST_RING_MAPPING_4 to SW2 as default value for
  1229. * DESTINATION_RING_CTRL_IX_0.
  1230. */
  1231. ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
  1232. HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
  1233. HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
  1234. HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
  1235. HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
  1236. HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
  1237. HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
  1238. HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
  1239. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
  1240. return 0;
  1241. }
  1242. QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1243. {
  1244. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1245. struct dp_pdev *pdev =
  1246. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1247. struct dp_ipa_resources *ipa_res;
  1248. if (!pdev) {
  1249. dp_err("Invalid instance");
  1250. return QDF_STATUS_E_FAILURE;
  1251. }
  1252. ipa_res = &pdev->ipa_resource;
  1253. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1254. return QDF_STATUS_SUCCESS;
  1255. ipa_res->tx_num_alloc_buffer =
  1256. (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  1257. dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
  1258. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
  1259. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
  1260. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
  1261. dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
  1262. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
  1263. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
  1264. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
  1265. dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
  1266. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
  1267. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
  1268. soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
  1269. dp_ipa_get_shared_mem_info(
  1270. soc->osdev, &ipa_res->rx_refill_ring,
  1271. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
  1272. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
  1273. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
  1274. if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) ||
  1275. !qdf_mem_get_dma_addr(soc->osdev,
  1276. &ipa_res->tx_comp_ring.mem_info) ||
  1277. !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) ||
  1278. !qdf_mem_get_dma_addr(soc->osdev,
  1279. &ipa_res->rx_refill_ring.mem_info))
  1280. return QDF_STATUS_E_FAILURE;
  1281. if (dp_ipa_tx_alt_ring_get_resource(pdev))
  1282. return QDF_STATUS_E_FAILURE;
  1283. return QDF_STATUS_SUCCESS;
  1284. }
  1285. #ifdef IPA_SET_RESET_TX_DB_PA
  1286. #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res)
  1287. #else
  1288. #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \
  1289. dp_ipa_set_tx_doorbell_paddr(soc, ipa_res)
  1290. #endif
  1291. QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1292. {
  1293. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1294. struct dp_pdev *pdev =
  1295. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1296. struct dp_ipa_resources *ipa_res;
  1297. struct hal_srng *reo_srng = (struct hal_srng *)
  1298. soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  1299. if (!pdev) {
  1300. dp_err("Invalid instance");
  1301. return QDF_STATUS_E_FAILURE;
  1302. }
  1303. ipa_res = &pdev->ipa_resource;
  1304. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1305. return QDF_STATUS_SUCCESS;
  1306. dp_ipa_map_ring_doorbell_paddr(pdev);
  1307. DP_IPA_SET_TX_DB_PADDR(soc, ipa_res);
  1308. /*
  1309. * For RX, REO module on Napier/Hastings does reordering on incoming
  1310. * Ethernet packets and writes one or more descriptors to REO2IPA Rx
  1311. * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
  1312. * to IPA.
  1313. * Set the doorbell addr for the REO ring.
  1314. */
  1315. hal_srng_dst_set_hp_paddr_confirm(reo_srng,
  1316. ipa_res->rx_ready_doorbell_paddr);
  1317. return QDF_STATUS_SUCCESS;
  1318. }
  1319. QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1320. uint8_t *op_msg)
  1321. {
  1322. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1323. struct dp_pdev *pdev =
  1324. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1325. if (!pdev) {
  1326. dp_err("Invalid instance");
  1327. return QDF_STATUS_E_FAILURE;
  1328. }
  1329. if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
  1330. return QDF_STATUS_SUCCESS;
  1331. if (pdev->ipa_uc_op_cb) {
  1332. pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
  1333. } else {
  1334. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1335. "%s: IPA callback function is not registered", __func__);
  1336. qdf_mem_free(op_msg);
  1337. return QDF_STATUS_E_FAILURE;
  1338. }
  1339. return QDF_STATUS_SUCCESS;
  1340. }
  1341. QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1342. ipa_uc_op_cb_type op_cb,
  1343. void *usr_ctxt)
  1344. {
  1345. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1346. struct dp_pdev *pdev =
  1347. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1348. if (!pdev) {
  1349. dp_err("Invalid instance");
  1350. return QDF_STATUS_E_FAILURE;
  1351. }
  1352. if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
  1353. return QDF_STATUS_SUCCESS;
  1354. pdev->ipa_uc_op_cb = op_cb;
  1355. pdev->usr_ctxt = usr_ctxt;
  1356. return QDF_STATUS_SUCCESS;
  1357. }
  1358. void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1359. {
  1360. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1361. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1362. if (!pdev) {
  1363. dp_err("Invalid instance");
  1364. return;
  1365. }
  1366. dp_debug("Deregister OP handler callback");
  1367. pdev->ipa_uc_op_cb = NULL;
  1368. pdev->usr_ctxt = NULL;
  1369. }
  1370. QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1371. {
  1372. /* TBD */
  1373. return QDF_STATUS_SUCCESS;
  1374. }
  1375. /**
  1376. * dp_tx_send_ipa_data_frame() - send IPA data frame
  1377. * @soc_hdl: datapath soc handle
  1378. * @vdev_id: id of the virtual device
  1379. * @skb: skb to transmit
  1380. *
  1381. * Return: skb/ NULL is for success
  1382. */
  1383. qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1384. qdf_nbuf_t skb)
  1385. {
  1386. qdf_nbuf_t ret;
  1387. /* Terminate the (single-element) list of tx frames */
  1388. qdf_nbuf_set_next(skb, NULL);
  1389. ret = dp_tx_send(soc_hdl, vdev_id, skb);
  1390. if (ret) {
  1391. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1392. "%s: Failed to tx", __func__);
  1393. return ret;
  1394. }
  1395. return NULL;
  1396. }
  1397. QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1398. {
  1399. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1400. struct dp_pdev *pdev =
  1401. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1402. uint32_t ix0;
  1403. uint32_t ix2;
  1404. if (!pdev) {
  1405. dp_err("Invalid instance");
  1406. return QDF_STATUS_E_FAILURE;
  1407. }
  1408. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1409. return QDF_STATUS_SUCCESS;
  1410. if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  1411. return QDF_STATUS_E_AGAIN;
  1412. /* Call HAL API to remap REO rings to REO2IPA ring */
  1413. ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
  1414. HAL_REO_REMAP_IX0(REO_REMAP_SW4, 1) |
  1415. HAL_REO_REMAP_IX0(REO_REMAP_SW1, 2) |
  1416. HAL_REO_REMAP_IX0(REO_REMAP_SW4, 3) |
  1417. HAL_REO_REMAP_IX0(REO_REMAP_SW4, 4) |
  1418. HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
  1419. HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
  1420. HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
  1421. if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  1422. ix2 = HAL_REO_REMAP_IX2(REO_REMAP_SW4, 16) |
  1423. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 17) |
  1424. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
  1425. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
  1426. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 20) |
  1427. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
  1428. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 22) |
  1429. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
  1430. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
  1431. &ix2, &ix2);
  1432. dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
  1433. } else {
  1434. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
  1435. NULL, NULL);
  1436. dp_ipa_reo_remap_history_add(ix0, 0, 0);
  1437. }
  1438. return QDF_STATUS_SUCCESS;
  1439. }
  1440. QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1441. {
  1442. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1443. struct dp_pdev *pdev =
  1444. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1445. uint32_t ix0;
  1446. uint32_t ix2;
  1447. uint32_t ix3;
  1448. if (!pdev) {
  1449. dp_err("Invalid instance");
  1450. return QDF_STATUS_E_FAILURE;
  1451. }
  1452. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1453. return QDF_STATUS_SUCCESS;
  1454. if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  1455. return QDF_STATUS_E_AGAIN;
  1456. /* Call HAL API to remap REO rings to REO2IPA ring */
  1457. ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
  1458. HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
  1459. HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
  1460. HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
  1461. HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
  1462. HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
  1463. HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
  1464. HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
  1465. if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  1466. dp_reo_remap_config(soc, &ix2, &ix3);
  1467. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
  1468. &ix2, &ix3);
  1469. dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
  1470. } else {
  1471. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
  1472. NULL, NULL);
  1473. dp_ipa_reo_remap_history_add(ix0, 0, 0);
  1474. }
  1475. return QDF_STATUS_SUCCESS;
  1476. }
  1477. /* This should be configurable per H/W configuration enable status */
  1478. #define L3_HEADER_PADDING 2
  1479. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  1480. #ifndef QCA_LL_TX_FLOW_CONTROL_V2
  1481. static inline void dp_setup_mcc_sys_pipes(
  1482. qdf_ipa_sys_connect_params_t *sys_in,
  1483. qdf_ipa_wdi_conn_in_params_t *pipe_in)
  1484. {
  1485. /* Setup MCC sys pipe */
  1486. QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
  1487. DP_IPA_MAX_IFACE;
  1488. for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
  1489. memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
  1490. &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
  1491. }
  1492. #else
  1493. static inline void dp_setup_mcc_sys_pipes(
  1494. qdf_ipa_sys_connect_params_t *sys_in,
  1495. qdf_ipa_wdi_conn_in_params_t *pipe_in)
  1496. {
  1497. QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
  1498. }
  1499. #endif
  1500. static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
  1501. struct dp_ipa_resources *ipa_res,
  1502. qdf_ipa_wdi_pipe_setup_info_t *tx,
  1503. bool over_gsi)
  1504. {
  1505. struct tcl_data_cmd *tcl_desc_ptr;
  1506. uint8_t *desc_addr;
  1507. uint32_t desc_size;
  1508. if (over_gsi)
  1509. QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
  1510. else
  1511. QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
  1512. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
  1513. qdf_mem_get_dma_addr(soc->osdev,
  1514. &ipa_res->tx_comp_ring.mem_info);
  1515. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
  1516. qdf_mem_get_dma_size(soc->osdev,
  1517. &ipa_res->tx_comp_ring.mem_info);
  1518. /* WBM Tail Pointer Address */
  1519. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
  1520. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  1521. QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
  1522. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
  1523. qdf_mem_get_dma_addr(soc->osdev,
  1524. &ipa_res->tx_ring.mem_info);
  1525. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
  1526. qdf_mem_get_dma_size(soc->osdev,
  1527. &ipa_res->tx_ring.mem_info);
  1528. /* TCL Head Pointer Address */
  1529. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
  1530. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  1531. QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
  1532. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
  1533. ipa_res->tx_num_alloc_buffer;
  1534. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
  1535. /* Preprogram TCL descriptor */
  1536. desc_addr =
  1537. (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
  1538. desc_size = sizeof(struct tcl_data_cmd);
  1539. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  1540. tcl_desc_ptr = (struct tcl_data_cmd *)
  1541. (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
  1542. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  1543. HAL_RX_BUF_RBM_SW2_BM;
  1544. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  1545. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  1546. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  1547. }
  1548. static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
  1549. struct dp_ipa_resources *ipa_res,
  1550. qdf_ipa_wdi_pipe_setup_info_t *rx,
  1551. bool over_gsi)
  1552. {
  1553. if (over_gsi)
  1554. QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
  1555. IPA_CLIENT_WLAN2_PROD;
  1556. else
  1557. QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
  1558. IPA_CLIENT_WLAN1_PROD;
  1559. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
  1560. qdf_mem_get_dma_addr(soc->osdev,
  1561. &ipa_res->rx_rdy_ring.mem_info);
  1562. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
  1563. qdf_mem_get_dma_size(soc->osdev,
  1564. &ipa_res->rx_rdy_ring.mem_info);
  1565. /* REO Tail Pointer Address */
  1566. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
  1567. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  1568. QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
  1569. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
  1570. qdf_mem_get_dma_addr(soc->osdev,
  1571. &ipa_res->rx_refill_ring.mem_info);
  1572. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
  1573. qdf_mem_get_dma_size(soc->osdev,
  1574. &ipa_res->rx_refill_ring.mem_info);
  1575. /* FW Head Pointer Address */
  1576. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
  1577. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  1578. QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
  1579. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
  1580. RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
  1581. }
  1582. static void
  1583. dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
  1584. struct dp_ipa_resources *ipa_res,
  1585. qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
  1586. bool over_gsi)
  1587. {
  1588. struct tcl_data_cmd *tcl_desc_ptr;
  1589. uint8_t *desc_addr;
  1590. uint32_t desc_size;
  1591. if (over_gsi)
  1592. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
  1593. IPA_CLIENT_WLAN2_CONS;
  1594. else
  1595. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
  1596. IPA_CLIENT_WLAN1_CONS;
  1597. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
  1598. &ipa_res->tx_comp_ring.sgtable,
  1599. sizeof(sgtable_t));
  1600. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
  1601. qdf_mem_get_dma_size(soc->osdev,
  1602. &ipa_res->tx_comp_ring.mem_info);
  1603. /* WBM Tail Pointer Address */
  1604. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
  1605. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  1606. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
  1607. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
  1608. &ipa_res->tx_ring.sgtable,
  1609. sizeof(sgtable_t));
  1610. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
  1611. qdf_mem_get_dma_size(soc->osdev,
  1612. &ipa_res->tx_ring.mem_info);
  1613. /* TCL Head Pointer Address */
  1614. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
  1615. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  1616. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
  1617. QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
  1618. ipa_res->tx_num_alloc_buffer;
  1619. QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
  1620. /* Preprogram TCL descriptor */
  1621. desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
  1622. tx_smmu);
  1623. desc_size = sizeof(struct tcl_data_cmd);
  1624. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  1625. tcl_desc_ptr = (struct tcl_data_cmd *)
  1626. (QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
  1627. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  1628. HAL_RX_BUF_RBM_SW2_BM;
  1629. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  1630. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  1631. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  1632. }
  1633. static void
  1634. dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
  1635. struct dp_ipa_resources *ipa_res,
  1636. qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
  1637. bool over_gsi)
  1638. {
  1639. if (over_gsi)
  1640. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
  1641. IPA_CLIENT_WLAN2_PROD;
  1642. else
  1643. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
  1644. IPA_CLIENT_WLAN1_PROD;
  1645. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
  1646. &ipa_res->rx_rdy_ring.sgtable,
  1647. sizeof(sgtable_t));
  1648. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
  1649. qdf_mem_get_dma_size(soc->osdev,
  1650. &ipa_res->rx_rdy_ring.mem_info);
  1651. /* REO Tail Pointer Address */
  1652. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
  1653. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  1654. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
  1655. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
  1656. &ipa_res->rx_refill_ring.sgtable,
  1657. sizeof(sgtable_t));
  1658. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
  1659. qdf_mem_get_dma_size(soc->osdev,
  1660. &ipa_res->rx_refill_ring.mem_info);
  1661. /* FW Head Pointer Address */
  1662. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
  1663. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  1664. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
  1665. QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
  1666. RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
  1667. }
  1668. QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1669. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1670. void *ipa_wdi_meter_notifier_cb,
  1671. uint32_t ipa_desc_size, void *ipa_priv,
  1672. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1673. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1674. qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
  1675. {
  1676. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1677. struct dp_pdev *pdev =
  1678. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1679. struct dp_ipa_resources *ipa_res;
  1680. qdf_ipa_ep_cfg_t *tx_cfg;
  1681. qdf_ipa_ep_cfg_t *rx_cfg;
  1682. qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
  1683. qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
  1684. qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
  1685. qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL;
  1686. qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL;
  1687. qdf_ipa_wdi_conn_out_params_t pipe_out;
  1688. int ret;
  1689. if (!pdev) {
  1690. dp_err("Invalid instance");
  1691. return QDF_STATUS_E_FAILURE;
  1692. }
  1693. ipa_res = &pdev->ipa_resource;
  1694. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1695. return QDF_STATUS_SUCCESS;
  1696. pipe_in = qdf_mem_malloc(sizeof(*pipe_in));
  1697. if (!pipe_in)
  1698. return QDF_STATUS_E_NOMEM;
  1699. qdf_mem_zero(&pipe_out, sizeof(pipe_out));
  1700. if (is_smmu_enabled)
  1701. QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true;
  1702. else
  1703. QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false;
  1704. dp_setup_mcc_sys_pipes(sys_in, pipe_in);
  1705. /* TX PIPE */
  1706. if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
  1707. tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in);
  1708. tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
  1709. } else {
  1710. tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in);
  1711. tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
  1712. }
  1713. QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
  1714. QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  1715. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
  1716. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
  1717. QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
  1718. QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
  1719. QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
  1720. /**
  1721. * Transfer Ring: WBM Ring
  1722. * Transfer Ring Doorbell PA: WBM Tail Pointer Address
  1723. * Event Ring: TCL ring
  1724. * Event Ring Doorbell PA: TCL Head Pointer Address
  1725. */
  1726. if (is_smmu_enabled)
  1727. dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
  1728. else
  1729. dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
  1730. dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in);
  1731. /* RX PIPE */
  1732. if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
  1733. rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in);
  1734. rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
  1735. } else {
  1736. rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in);
  1737. rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
  1738. }
  1739. QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
  1740. QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
  1741. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
  1742. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
  1743. QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
  1744. QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
  1745. QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
  1746. QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
  1747. QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
  1748. /**
  1749. * Transfer Ring: REO Ring
  1750. * Transfer Ring Doorbell PA: REO Tail Pointer Address
  1751. * Event Ring: FW ring
  1752. * Event Ring Doorbell PA: FW Head Pointer Address
  1753. */
  1754. if (is_smmu_enabled)
  1755. dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
  1756. else
  1757. dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
  1758. QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb;
  1759. QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv;
  1760. /* Connect WDI IPA PIPEs */
  1761. ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out);
  1762. if (ret) {
  1763. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1764. "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
  1765. __func__, ret);
  1766. qdf_mem_free(pipe_in);
  1767. return QDF_STATUS_E_FAILURE;
  1768. }
  1769. /* IPA uC Doorbell registers */
  1770. dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
  1771. (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
  1772. (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
  1773. dp_ipa_set_pipe_db(ipa_res, &pipe_out);
  1774. ipa_res->is_db_ddr_mapped =
  1775. QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out);
  1776. soc->ipa_first_tx_db_access = true;
  1777. qdf_mem_free(pipe_in);
  1778. return QDF_STATUS_SUCCESS;
  1779. }
  1780. /**
  1781. * dp_ipa_setup_iface() - Setup IPA header and register interface
  1782. * @ifname: Interface name
  1783. * @mac_addr: Interface MAC address
  1784. * @prod_client: IPA prod client type
  1785. * @cons_client: IPA cons client type
  1786. * @session_id: Session ID
  1787. * @is_ipv6_enabled: Is IPV6 enabled or not
  1788. *
  1789. * Return: QDF_STATUS
  1790. */
  1791. QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
  1792. qdf_ipa_client_type_t prod_client,
  1793. qdf_ipa_client_type_t cons_client,
  1794. uint8_t session_id, bool is_ipv6_enabled)
  1795. {
  1796. qdf_ipa_wdi_reg_intf_in_params_t in;
  1797. qdf_ipa_wdi_hdr_info_t hdr_info;
  1798. struct dp_ipa_uc_tx_hdr uc_tx_hdr;
  1799. struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
  1800. int ret = -EINVAL;
  1801. qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t));
  1802. dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname,
  1803. QDF_MAC_ADDR_REF(mac_addr));
  1804. qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  1805. qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
  1806. /* IPV4 header */
  1807. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
  1808. QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
  1809. QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  1810. QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
  1811. QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
  1812. DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
  1813. QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
  1814. qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
  1815. &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  1816. QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
  1817. QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
  1818. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
  1819. dp_ipa_setup_iface_session_id(&in, session_id);
  1820. /* IPV6 header */
  1821. if (is_ipv6_enabled) {
  1822. qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
  1823. DP_IPA_UC_WLAN_TX_HDR_LEN);
  1824. uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
  1825. QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
  1826. qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
  1827. &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  1828. }
  1829. dp_debug("registering for session_id: %u", session_id);
  1830. ret = qdf_ipa_wdi_reg_intf(&in);
  1831. if (ret) {
  1832. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1833. "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
  1834. __func__, ret);
  1835. return QDF_STATUS_E_FAILURE;
  1836. }
  1837. return QDF_STATUS_SUCCESS;
  1838. }
  1839. #else /* !CONFIG_IPA_WDI_UNIFIED_API */
  1840. QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1841. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1842. void *ipa_wdi_meter_notifier_cb,
  1843. uint32_t ipa_desc_size, void *ipa_priv,
  1844. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1845. uint32_t *rx_pipe_handle)
  1846. {
  1847. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1848. struct dp_pdev *pdev =
  1849. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1850. struct dp_ipa_resources *ipa_res;
  1851. qdf_ipa_wdi_pipe_setup_info_t *tx;
  1852. qdf_ipa_wdi_pipe_setup_info_t *rx;
  1853. qdf_ipa_wdi_conn_in_params_t pipe_in;
  1854. qdf_ipa_wdi_conn_out_params_t pipe_out;
  1855. struct tcl_data_cmd *tcl_desc_ptr;
  1856. uint8_t *desc_addr;
  1857. uint32_t desc_size;
  1858. int ret;
  1859. if (!pdev) {
  1860. dp_err("Invalid instance");
  1861. return QDF_STATUS_E_FAILURE;
  1862. }
  1863. ipa_res = &pdev->ipa_resource;
  1864. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1865. return QDF_STATUS_SUCCESS;
  1866. qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
  1867. qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
  1868. qdf_mem_zero(&pipe_in, sizeof(pipe_in));
  1869. qdf_mem_zero(&pipe_out, sizeof(pipe_out));
  1870. /* TX PIPE */
  1871. /**
  1872. * Transfer Ring: WBM Ring
  1873. * Transfer Ring Doorbell PA: WBM Tail Pointer Address
  1874. * Event Ring: TCL ring
  1875. * Event Ring Doorbell PA: TCL Head Pointer Address
  1876. */
  1877. tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
  1878. QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
  1879. QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  1880. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
  1881. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
  1882. QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
  1883. QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
  1884. QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
  1885. QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
  1886. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
  1887. ipa_res->tx_comp_ring_base_paddr;
  1888. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
  1889. ipa_res->tx_comp_ring_size;
  1890. /* WBM Tail Pointer Address */
  1891. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
  1892. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  1893. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
  1894. ipa_res->tx_ring_base_paddr;
  1895. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
  1896. /* TCL Head Pointer Address */
  1897. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
  1898. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  1899. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
  1900. ipa_res->tx_num_alloc_buffer;
  1901. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
  1902. /* Preprogram TCL descriptor */
  1903. desc_addr =
  1904. (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
  1905. desc_size = sizeof(struct tcl_data_cmd);
  1906. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  1907. tcl_desc_ptr = (struct tcl_data_cmd *)
  1908. (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
  1909. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  1910. HAL_RX_BUF_RBM_SW2_BM;
  1911. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  1912. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  1913. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  1914. /* RX PIPE */
  1915. /**
  1916. * Transfer Ring: REO Ring
  1917. * Transfer Ring Doorbell PA: REO Tail Pointer Address
  1918. * Event Ring: FW ring
  1919. * Event Ring Doorbell PA: FW Head Pointer Address
  1920. */
  1921. rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
  1922. QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
  1923. QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
  1924. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
  1925. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
  1926. QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
  1927. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
  1928. QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
  1929. QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
  1930. QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
  1931. QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
  1932. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
  1933. ipa_res->rx_rdy_ring_base_paddr;
  1934. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
  1935. ipa_res->rx_rdy_ring_size;
  1936. /* REO Tail Pointer Address */
  1937. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
  1938. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  1939. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
  1940. ipa_res->rx_refill_ring_base_paddr;
  1941. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
  1942. ipa_res->rx_refill_ring_size;
  1943. /* FW Head Pointer Address */
  1944. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
  1945. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  1946. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN +
  1947. L3_HEADER_PADDING;
  1948. QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
  1949. QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
  1950. /* Connect WDI IPA PIPE */
  1951. ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
  1952. if (ret) {
  1953. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1954. "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
  1955. __func__, ret);
  1956. return QDF_STATUS_E_FAILURE;
  1957. }
  1958. /* IPA uC Doorbell registers */
  1959. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1960. "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
  1961. __func__,
  1962. (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
  1963. (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
  1964. ipa_res->tx_comp_doorbell_paddr =
  1965. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
  1966. ipa_res->tx_comp_doorbell_vaddr =
  1967. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
  1968. ipa_res->rx_ready_doorbell_paddr =
  1969. QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
  1970. soc->ipa_first_tx_db_access = true;
  1971. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1972. "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  1973. __func__,
  1974. "transfer_ring_base_pa",
  1975. (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
  1976. "transfer_ring_size",
  1977. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
  1978. "transfer_ring_doorbell_pa",
  1979. (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
  1980. "event_ring_base_pa",
  1981. (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
  1982. "event_ring_size",
  1983. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
  1984. "event_ring_doorbell_pa",
  1985. (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
  1986. "num_pkt_buffers",
  1987. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
  1988. "tx_comp_doorbell_paddr",
  1989. (void *)ipa_res->tx_comp_doorbell_paddr);
  1990. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1991. "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  1992. __func__,
  1993. "transfer_ring_base_pa",
  1994. (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
  1995. "transfer_ring_size",
  1996. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
  1997. "transfer_ring_doorbell_pa",
  1998. (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
  1999. "event_ring_base_pa",
  2000. (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
  2001. "event_ring_size",
  2002. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
  2003. "event_ring_doorbell_pa",
  2004. (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
  2005. "num_pkt_buffers",
  2006. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
  2007. "tx_comp_doorbell_paddr",
  2008. (void *)ipa_res->rx_ready_doorbell_paddr);
  2009. return QDF_STATUS_SUCCESS;
  2010. }
  2011. /**
  2012. * dp_ipa_setup_iface() - Setup IPA header and register interface
  2013. * @ifname: Interface name
  2014. * @mac_addr: Interface MAC address
  2015. * @prod_client: IPA prod client type
  2016. * @cons_client: IPA cons client type
  2017. * @session_id: Session ID
  2018. * @is_ipv6_enabled: Is IPV6 enabled or not
  2019. *
  2020. * Return: QDF_STATUS
  2021. */
  2022. QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
  2023. qdf_ipa_client_type_t prod_client,
  2024. qdf_ipa_client_type_t cons_client,
  2025. uint8_t session_id, bool is_ipv6_enabled)
  2026. {
  2027. qdf_ipa_wdi_reg_intf_in_params_t in;
  2028. qdf_ipa_wdi_hdr_info_t hdr_info;
  2029. struct dp_ipa_uc_tx_hdr uc_tx_hdr;
  2030. struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
  2031. int ret = -EINVAL;
  2032. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  2033. "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT,
  2034. __func__, ifname, QDF_MAC_ADDR_REF(mac_addr));
  2035. qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  2036. qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
  2037. /* IPV4 header */
  2038. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
  2039. QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
  2040. QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  2041. QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
  2042. QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
  2043. DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
  2044. QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
  2045. qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
  2046. &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  2047. QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
  2048. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
  2049. htonl(session_id << 16);
  2050. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
  2051. /* IPV6 header */
  2052. if (is_ipv6_enabled) {
  2053. qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
  2054. DP_IPA_UC_WLAN_TX_HDR_LEN);
  2055. uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
  2056. QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
  2057. qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
  2058. &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  2059. }
  2060. ret = qdf_ipa_wdi_reg_intf(&in);
  2061. if (ret) {
  2062. dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
  2063. ret);
  2064. return QDF_STATUS_E_FAILURE;
  2065. }
  2066. return QDF_STATUS_SUCCESS;
  2067. }
  2068. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  2069. /**
  2070. * dp_ipa_cleanup() - Disconnect IPA pipes
  2071. * @soc_hdl: dp soc handle
  2072. * @pdev_id: dp pdev id
  2073. * @tx_pipe_handle: Tx pipe handle
  2074. * @rx_pipe_handle: Rx pipe handle
  2075. *
  2076. * Return: QDF_STATUS
  2077. */
  2078. QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  2079. uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
  2080. {
  2081. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2082. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2083. struct dp_pdev *pdev;
  2084. int ret;
  2085. ret = qdf_ipa_wdi_disconn_pipes();
  2086. if (ret) {
  2087. dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
  2088. ret);
  2089. status = QDF_STATUS_E_FAILURE;
  2090. }
  2091. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2092. if (qdf_unlikely(!pdev)) {
  2093. dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);
  2094. status = QDF_STATUS_E_FAILURE;
  2095. goto exit;
  2096. }
  2097. dp_ipa_unmap_ring_doorbell_paddr(pdev);
  2098. exit:
  2099. return status;
  2100. }
  2101. /**
  2102. * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
  2103. * @ifname: Interface name
  2104. * @is_ipv6_enabled: Is IPV6 enabled or not
  2105. *
  2106. * Return: QDF_STATUS
  2107. */
  2108. QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
  2109. {
  2110. int ret;
  2111. ret = qdf_ipa_wdi_dereg_intf(ifname);
  2112. if (ret) {
  2113. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2114. "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
  2115. __func__, ret);
  2116. return QDF_STATUS_E_FAILURE;
  2117. }
  2118. return QDF_STATUS_SUCCESS;
  2119. }
  2120. #ifdef IPA_SET_RESET_TX_DB_PA
  2121. #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \
  2122. dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res))
  2123. #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \
  2124. dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res))
  2125. #else
  2126. #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res)
  2127. #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res)
  2128. #endif
  2129. QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  2130. {
  2131. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2132. struct dp_pdev *pdev =
  2133. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2134. struct dp_ipa_resources *ipa_res;
  2135. QDF_STATUS result;
  2136. if (!pdev) {
  2137. dp_err("Invalid instance");
  2138. return QDF_STATUS_E_FAILURE;
  2139. }
  2140. ipa_res = &pdev->ipa_resource;
  2141. qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
  2142. DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res);
  2143. dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
  2144. result = qdf_ipa_wdi_enable_pipes();
  2145. if (result) {
  2146. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2147. "%s: Enable WDI PIPE fail, code %d",
  2148. __func__, result);
  2149. qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
  2150. DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
  2151. dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
  2152. return QDF_STATUS_E_FAILURE;
  2153. }
  2154. if (soc->ipa_first_tx_db_access) {
  2155. dp_ipa_tx_comp_ring_init_hp(soc, ipa_res);
  2156. soc->ipa_first_tx_db_access = false;
  2157. }
  2158. return QDF_STATUS_SUCCESS;
  2159. }
  2160. QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  2161. {
  2162. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2163. struct dp_pdev *pdev =
  2164. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2165. QDF_STATUS result;
  2166. struct dp_ipa_resources *ipa_res;
  2167. if (!pdev) {
  2168. dp_err("Invalid instance");
  2169. return QDF_STATUS_E_FAILURE;
  2170. }
  2171. ipa_res = &pdev->ipa_resource;
  2172. qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS);
  2173. /*
  2174. * Reset the tx completion doorbell address before invoking IPA disable
  2175. * pipes API to ensure that there is no access to IPA tx doorbell
  2176. * address post disable pipes.
  2177. */
  2178. DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
  2179. result = qdf_ipa_wdi_disable_pipes();
  2180. if (result) {
  2181. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2182. "%s: Disable WDI PIPE fail, code %d",
  2183. __func__, result);
  2184. qdf_assert_always(0);
  2185. return QDF_STATUS_E_FAILURE;
  2186. }
  2187. qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
  2188. dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
  2189. return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
  2190. }
  2191. /**
  2192. * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
  2193. * @client: Client type
  2194. * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
  2195. *
  2196. * Return: QDF_STATUS
  2197. */
  2198. QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
  2199. {
  2200. qdf_ipa_wdi_perf_profile_t profile;
  2201. QDF_STATUS result;
  2202. profile.client = client;
  2203. profile.max_supported_bw_mbps = max_supported_bw_mbps;
  2204. result = qdf_ipa_wdi_set_perf_profile(&profile);
  2205. if (result) {
  2206. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2207. "%s: ipa_wdi_set_perf_profile fail, code %d",
  2208. __func__, result);
  2209. return QDF_STATUS_E_FAILURE;
  2210. }
  2211. return QDF_STATUS_SUCCESS;
  2212. }
  2213. /**
  2214. * dp_ipa_intrabss_send - send IPA RX intra-bss frames
  2215. * @pdev: pdev
  2216. * @vdev: vdev
  2217. * @nbuf: skb
  2218. *
  2219. * Return: nbuf if TX fails and NULL if TX succeeds
  2220. */
  2221. static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
  2222. struct dp_vdev *vdev,
  2223. qdf_nbuf_t nbuf)
  2224. {
  2225. struct dp_peer *vdev_peer;
  2226. uint16_t len;
  2227. vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA);
  2228. if (qdf_unlikely(!vdev_peer))
  2229. return nbuf;
  2230. qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
  2231. len = qdf_nbuf_len(nbuf);
  2232. if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
  2233. DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
  2234. dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
  2235. return nbuf;
  2236. }
  2237. DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
  2238. dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
  2239. return NULL;
  2240. }
  2241. bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2242. qdf_nbuf_t nbuf, bool *fwd_success)
  2243. {
  2244. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2245. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2246. DP_MOD_ID_IPA);
  2247. struct dp_pdev *pdev;
  2248. struct dp_peer *da_peer;
  2249. struct dp_peer *sa_peer;
  2250. qdf_nbuf_t nbuf_copy;
  2251. uint8_t da_is_bcmc;
  2252. struct ethhdr *eh;
  2253. bool status = false;
  2254. *fwd_success = false; /* set default as failure */
  2255. /*
  2256. * WDI 3.0 skb->cb[] info from IPA driver
  2257. * skb->cb[0] = vdev_id
  2258. * skb->cb[1].bit#1 = da_is_bcmc
  2259. */
  2260. da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
  2261. if (qdf_unlikely(!vdev))
  2262. return false;
  2263. pdev = vdev->pdev;
  2264. if (qdf_unlikely(!pdev))
  2265. goto out;
  2266. /* no fwd for station mode and just pass up to stack */
  2267. if (vdev->opmode == wlan_op_mode_sta)
  2268. goto out;
  2269. if (da_is_bcmc) {
  2270. nbuf_copy = qdf_nbuf_copy(nbuf);
  2271. if (!nbuf_copy)
  2272. goto out;
  2273. if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
  2274. qdf_nbuf_free(nbuf_copy);
  2275. else
  2276. *fwd_success = true;
  2277. /* return false to pass original pkt up to stack */
  2278. goto out;
  2279. }
  2280. eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
  2281. if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
  2282. goto out;
  2283. da_peer = dp_peer_find_hash_find(soc, eh->h_dest, 0, vdev->vdev_id,
  2284. DP_MOD_ID_IPA);
  2285. if (!da_peer)
  2286. goto out;
  2287. dp_peer_unref_delete(da_peer, DP_MOD_ID_IPA);
  2288. sa_peer = dp_peer_find_hash_find(soc, eh->h_source, 0, vdev->vdev_id,
  2289. DP_MOD_ID_IPA);
  2290. if (!sa_peer)
  2291. goto out;
  2292. dp_peer_unref_delete(sa_peer, DP_MOD_ID_IPA);
  2293. /*
  2294. * In intra-bss forwarding scenario, skb is allocated by IPA driver.
  2295. * Need to add skb to internal tracking table to avoid nbuf memory
  2296. * leak check for unallocated skb.
  2297. */
  2298. qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
  2299. if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
  2300. qdf_nbuf_free(nbuf);
  2301. else
  2302. *fwd_success = true;
  2303. status = true;
  2304. out:
  2305. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
  2306. return status;
  2307. }
  2308. #ifdef MDM_PLATFORM
  2309. bool dp_ipa_is_mdm_platform(void)
  2310. {
  2311. return true;
  2312. }
  2313. #else
  2314. bool dp_ipa_is_mdm_platform(void)
  2315. {
  2316. return false;
  2317. }
  2318. #endif
  2319. /**
  2320. * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA
  2321. * @soc: soc
  2322. * @nbuf: source skb
  2323. *
  2324. * Return: new nbuf if success and otherwise NULL
  2325. */
  2326. static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
  2327. qdf_nbuf_t nbuf)
  2328. {
  2329. uint8_t *src_nbuf_data;
  2330. uint8_t *dst_nbuf_data;
  2331. qdf_nbuf_t dst_nbuf;
  2332. qdf_nbuf_t temp_nbuf = nbuf;
  2333. uint32_t nbuf_len = qdf_nbuf_len(nbuf);
  2334. bool is_nbuf_head = true;
  2335. uint32_t copy_len = 0;
  2336. dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE,
  2337. RX_BUFFER_RESERVATION,
  2338. RX_DATA_BUFFER_ALIGNMENT, FALSE);
  2339. if (!dst_nbuf) {
  2340. dp_err_rl("nbuf allocate fail");
  2341. return NULL;
  2342. }
  2343. if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) {
  2344. qdf_nbuf_free(dst_nbuf);
  2345. dp_err_rl("nbuf is jumbo data");
  2346. return NULL;
  2347. }
  2348. /* prepeare to copy all data into new skb */
  2349. dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
  2350. while (temp_nbuf) {
  2351. src_nbuf_data = qdf_nbuf_data(temp_nbuf);
  2352. /* first head nbuf */
  2353. if (is_nbuf_head) {
  2354. qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
  2355. RX_PKT_TLVS_LEN);
  2356. /* leave extra 2 bytes L3_HEADER_PADDING */
  2357. dst_nbuf_data += (RX_PKT_TLVS_LEN + L3_HEADER_PADDING);
  2358. src_nbuf_data += RX_PKT_TLVS_LEN;
  2359. copy_len = qdf_nbuf_headlen(temp_nbuf) -
  2360. RX_PKT_TLVS_LEN;
  2361. temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
  2362. is_nbuf_head = false;
  2363. } else {
  2364. copy_len = qdf_nbuf_len(temp_nbuf);
  2365. temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
  2366. }
  2367. qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
  2368. dst_nbuf_data += copy_len;
  2369. }
  2370. qdf_nbuf_set_len(dst_nbuf, nbuf_len);
  2371. /* copy is done, free original nbuf */
  2372. qdf_nbuf_free(nbuf);
  2373. return dst_nbuf;
  2374. }
  2375. /**
  2376. * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
  2377. * @soc: soc
  2378. * @nbuf: skb
  2379. *
  2380. * Return: nbuf if success and otherwise NULL
  2381. */
  2382. qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2383. {
  2384. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  2385. return nbuf;
  2386. /* WLAN IPA is run-time disabled */
  2387. if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
  2388. return nbuf;
  2389. if (!qdf_nbuf_is_frag(nbuf))
  2390. return nbuf;
  2391. /* linearize skb for IPA */
  2392. return dp_ipa_frag_nbuf_linearize(soc, nbuf);
  2393. }
  2394. QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
  2395. struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  2396. {
  2397. QDF_STATUS ret;
  2398. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2399. struct dp_pdev *pdev =
  2400. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2401. if (!pdev) {
  2402. dp_err("%s invalid instance", __func__);
  2403. return QDF_STATUS_E_FAILURE;
  2404. }
  2405. if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
  2406. dp_debug("SMMU S1 disabled");
  2407. return QDF_STATUS_SUCCESS;
  2408. }
  2409. ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true);
  2410. if (ret)
  2411. return ret;
  2412. ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true);
  2413. if (ret)
  2414. __dp_ipa_tx_buf_smmu_mapping(soc, pdev, false);
  2415. return ret;
  2416. }
  2417. QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
  2418. struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  2419. {
  2420. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2421. struct dp_pdev *pdev =
  2422. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2423. if (!pdev) {
  2424. dp_err("%s invalid instance", __func__);
  2425. return QDF_STATUS_E_FAILURE;
  2426. }
  2427. if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
  2428. dp_debug("SMMU S1 disabled");
  2429. return QDF_STATUS_SUCCESS;
  2430. }
  2431. if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false) ||
  2432. dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false))
  2433. return QDF_STATUS_E_FAILURE;
  2434. return QDF_STATUS_SUCCESS;
  2435. }
  2436. #endif