dp_ipa.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878
  1. /*
  2. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #ifdef IPA_OFFLOAD
  17. #include <qdf_ipa_wdi3.h>
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_hw_headers.h>
  21. #include <hal_api.h>
  22. #include <hif.h>
  23. #include <htt.h>
  24. #include <wdi_event.h>
  25. #include <queue.h>
  26. #include "dp_types.h"
  27. #include "dp_htt.h"
  28. #include "dp_tx.h"
  29. #include "dp_rx.h"
  30. #include "dp_ipa.h"
  31. /* Ring index for WBM2SW2 release ring */
  32. #define IPA_TX_COMP_RING_IDX HAL_IPA_TX_COMP_RING_IDX
  33. /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
  34. #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048)
  35. /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
  36. * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
  37. * This causes back pressure, resulting in a FW crash.
  38. * By leaving some entries with no buffer attached, WBM will be able to write
  39. * to the ring, and from dumps we can figure out the buffer which is causing
  40. * this issue.
  41. */
  42. #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
  43. /**
  44. *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
  45. * @ix0_reg: reo destination ring IX0 value
  46. * @ix2_reg: reo destination ring IX2 value
  47. * @ix3_reg: reo destination ring IX3 value
  48. */
  49. struct dp_ipa_reo_remap_record {
  50. uint64_t timestamp;
  51. uint32_t ix0_reg;
  52. uint32_t ix2_reg;
  53. uint32_t ix3_reg;
  54. };
  55. #define REO_REMAP_HISTORY_SIZE 32
  56. struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
  57. static qdf_atomic_t dp_ipa_reo_remap_history_index;
  58. static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
  59. {
  60. int next = qdf_atomic_inc_return(index);
  61. if (next == REO_REMAP_HISTORY_SIZE)
  62. qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
  63. return next % REO_REMAP_HISTORY_SIZE;
  64. }
  65. /**
  66. * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
  67. * @ix0_val: reo destination ring IX0 value
  68. * @ix2_val: reo destination ring IX2 value
  69. * @ix3_val: reo destination ring IX3 value
  70. *
  71. * Return: None
  72. */
  73. static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
  74. uint32_t ix3_val)
  75. {
  76. int idx = dp_ipa_reo_remap_record_index_next(
  77. &dp_ipa_reo_remap_history_index);
  78. struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
  79. record->timestamp = qdf_get_log_timestamp();
  80. record->ix0_reg = ix0_val;
  81. record->ix2_reg = ix2_val;
  82. record->ix3_reg = ix3_val;
  83. }
  84. static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
  85. qdf_nbuf_t nbuf,
  86. uint32_t size,
  87. bool create)
  88. {
  89. qdf_mem_info_t mem_map_table = {0};
  90. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  91. qdf_update_mem_map_table(soc->osdev, &mem_map_table,
  92. qdf_nbuf_get_frag_paddr(nbuf, 0),
  93. size);
  94. if (create) {
  95. /* Assert if PA is zero */
  96. qdf_assert_always(mem_map_table.pa);
  97. ret = qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
  98. } else {
  99. ret = qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
  100. }
  101. qdf_assert_always(!ret);
  102. /* Return status of mapping/unmapping is stored in
  103. * mem_map_table.result field, assert if the result
  104. * is failure
  105. */
  106. if (create)
  107. qdf_assert_always(!mem_map_table.result);
  108. else
  109. qdf_assert_always(mem_map_table.result >= mem_map_table.size);
  110. return ret;
  111. }
  112. QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
  113. qdf_nbuf_t nbuf,
  114. uint32_t size,
  115. bool create)
  116. {
  117. struct dp_pdev *pdev;
  118. int i;
  119. for (i = 0; i < soc->pdev_count; i++) {
  120. pdev = soc->pdev_list[i];
  121. if (pdev && pdev->monitor_configured)
  122. return QDF_STATUS_SUCCESS;
  123. }
  124. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
  125. !qdf_mem_smmu_s1_enabled(soc->osdev))
  126. return QDF_STATUS_SUCCESS;
  127. /**
  128. * Even if ipa pipes is disabled, but if it's unmap
  129. * operation and nbuf has done ipa smmu map before,
  130. * do ipa smmu unmap as well.
  131. */
  132. if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) {
  133. if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
  134. DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
  135. } else {
  136. return QDF_STATUS_SUCCESS;
  137. }
  138. }
  139. if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
  140. if (create) {
  141. DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
  142. } else {
  143. DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
  144. }
  145. return QDF_STATUS_E_INVAL;
  146. }
  147. qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
  148. return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create);
  149. }
  150. static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
  151. struct dp_soc *soc,
  152. struct dp_pdev *pdev,
  153. bool create)
  154. {
  155. uint32_t index;
  156. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  157. uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  158. qdf_nbuf_t nbuf;
  159. uint32_t buf_len;
  160. if (!ipa_is_ready()) {
  161. dp_info("IPA is not READY");
  162. return 0;
  163. }
  164. for (index = 0; index < tx_buffer_cnt; index++) {
  165. nbuf = (qdf_nbuf_t)
  166. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index];
  167. if (!nbuf)
  168. continue;
  169. buf_len = qdf_nbuf_get_data_len(nbuf);
  170. return __dp_ipa_handle_buf_smmu_mapping(
  171. soc, nbuf, buf_len, create);
  172. }
  173. return ret;
  174. }
  175. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  176. static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
  177. struct dp_pdev *pdev,
  178. bool create)
  179. {
  180. struct rx_desc_pool *rx_pool;
  181. uint8_t pdev_id;
  182. uint32_t num_desc, page_id, offset, i;
  183. uint16_t num_desc_per_page;
  184. union dp_rx_desc_list_elem_t *rx_desc_elem;
  185. struct dp_rx_desc *rx_desc;
  186. qdf_nbuf_t nbuf;
  187. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  188. if (!qdf_ipa_is_ready())
  189. return ret;
  190. if (!qdf_mem_smmu_s1_enabled(soc->osdev))
  191. return ret;
  192. pdev_id = pdev->pdev_id;
  193. rx_pool = &soc->rx_desc_buf[pdev_id];
  194. qdf_spin_lock_bh(&rx_pool->lock);
  195. num_desc = rx_pool->pool_size;
  196. num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
  197. for (i = 0; i < num_desc; i++) {
  198. page_id = i / num_desc_per_page;
  199. offset = i % num_desc_per_page;
  200. if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
  201. break;
  202. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
  203. rx_desc = &rx_desc_elem->rx_desc;
  204. if ((!(rx_desc->in_use)) || rx_desc->unmapped)
  205. continue;
  206. nbuf = rx_desc->nbuf;
  207. if (qdf_unlikely(create ==
  208. qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
  209. if (create) {
  210. DP_STATS_INC(soc,
  211. rx.err.ipa_smmu_map_dup, 1);
  212. } else {
  213. DP_STATS_INC(soc,
  214. rx.err.ipa_smmu_unmap_dup, 1);
  215. }
  216. continue;
  217. }
  218. qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
  219. ret = __dp_ipa_handle_buf_smmu_mapping(
  220. soc, nbuf, rx_pool->buf_size, create);
  221. }
  222. qdf_spin_unlock_bh(&rx_pool->lock);
  223. return ret;
  224. }
  225. #else
  226. static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
  227. struct dp_pdev *pdev,
  228. bool create)
  229. {
  230. struct rx_desc_pool *rx_pool;
  231. uint8_t pdev_id;
  232. qdf_nbuf_t nbuf;
  233. int i;
  234. if (!qdf_ipa_is_ready())
  235. return QDF_STATUS_SUCCESS;
  236. if (!qdf_mem_smmu_s1_enabled(soc->osdev))
  237. return QDF_STATUS_SUCCESS;
  238. pdev_id = pdev->pdev_id;
  239. rx_pool = &soc->rx_desc_buf[pdev_id];
  240. qdf_spin_lock_bh(&rx_pool->lock);
  241. for (i = 0; i < rx_pool->pool_size; i++) {
  242. if ((!(rx_pool->array[i].rx_desc.in_use)) ||
  243. rx_pool->array[i].rx_desc.unmapped)
  244. continue;
  245. nbuf = rx_pool->array[i].rx_desc.nbuf;
  246. if (qdf_unlikely(create ==
  247. qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
  248. if (create) {
  249. DP_STATS_INC(soc,
  250. rx.err.ipa_smmu_map_dup, 1);
  251. } else {
  252. DP_STATS_INC(soc,
  253. rx.err.ipa_smmu_unmap_dup, 1);
  254. }
  255. continue;
  256. }
  257. qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
  258. __dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
  259. rx_pool->buf_size, create);
  260. }
  261. qdf_spin_unlock_bh(&rx_pool->lock);
  262. return QDF_STATUS_SUCCESS;
  263. }
  264. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  265. static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
  266. qdf_shared_mem_t *shared_mem,
  267. void *cpu_addr,
  268. qdf_dma_addr_t dma_addr,
  269. uint32_t size)
  270. {
  271. qdf_dma_addr_t paddr;
  272. int ret;
  273. shared_mem->vaddr = cpu_addr;
  274. qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
  275. *qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
  276. paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
  277. qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
  278. ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
  279. shared_mem->vaddr, dma_addr, size);
  280. if (ret) {
  281. dp_err("Unable to get DMA sgtable");
  282. return QDF_STATUS_E_NOMEM;
  283. }
  284. qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
  285. return QDF_STATUS_SUCCESS;
  286. }
  287. #ifdef IPA_WDI3_TX_TWO_PIPES
  288. static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  289. {
  290. struct dp_ipa_resources *ipa_res;
  291. qdf_nbuf_t nbuf;
  292. int idx;
  293. for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) {
  294. nbuf = (qdf_nbuf_t)
  295. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx];
  296. if (!nbuf)
  297. continue;
  298. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
  299. qdf_mem_dp_tx_skb_cnt_dec();
  300. qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
  301. qdf_nbuf_free(nbuf);
  302. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] =
  303. (void *)NULL;
  304. }
  305. qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
  306. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
  307. ipa_res = &pdev->ipa_resource;
  308. if (!ipa_res->is_db_ddr_mapped)
  309. iounmap(ipa_res->tx_alt_comp_doorbell_vaddr);
  310. qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable);
  311. qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable);
  312. }
  313. static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
  314. {
  315. uint32_t tx_buffer_count;
  316. uint32_t ring_base_align = 8;
  317. qdf_dma_addr_t buffer_paddr;
  318. struct hal_srng *wbm_srng = (struct hal_srng *)
  319. soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  320. struct hal_srng_params srng_params;
  321. uint32_t paddr_lo;
  322. uint32_t paddr_hi;
  323. void *ring_entry;
  324. int num_entries;
  325. qdf_nbuf_t nbuf;
  326. int retval = QDF_STATUS_SUCCESS;
  327. int max_alloc_count = 0;
  328. /*
  329. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  330. * unsigned int uc_tx_buf_sz =
  331. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  332. */
  333. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  334. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  335. hal_get_srng_params(soc->hal_soc,
  336. hal_srng_to_hal_ring_handle(wbm_srng),
  337. &srng_params);
  338. num_entries = srng_params.num_entries;
  339. max_alloc_count =
  340. num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
  341. if (max_alloc_count <= 0) {
  342. dp_err("incorrect value for buffer count %u", max_alloc_count);
  343. return -EINVAL;
  344. }
  345. dp_info("requested %d buffers to be posted to wbm ring",
  346. max_alloc_count);
  347. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned =
  348. qdf_mem_malloc(num_entries *
  349. sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned));
  350. if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) {
  351. dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
  352. return -ENOMEM;
  353. }
  354. hal_srng_access_start_unlocked(soc->hal_soc,
  355. hal_srng_to_hal_ring_handle(wbm_srng));
  356. /*
  357. * Allocate Tx buffers as many as possible.
  358. * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
  359. * Populate Tx buffers into WBM2IPA ring
  360. * This initial buffer population will simulate H/W as source ring,
  361. * and update HP
  362. */
  363. for (tx_buffer_count = 0;
  364. tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
  365. nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
  366. if (!nbuf)
  367. break;
  368. ring_entry = hal_srng_dst_get_next_hp(
  369. soc->hal_soc,
  370. hal_srng_to_hal_ring_handle(wbm_srng));
  371. if (!ring_entry) {
  372. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  373. "%s: Failed to get WBM ring entry",
  374. __func__);
  375. qdf_nbuf_free(nbuf);
  376. break;
  377. }
  378. qdf_nbuf_map_single(soc->osdev, nbuf,
  379. QDF_DMA_BIDIRECTIONAL);
  380. buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  381. qdf_mem_dp_tx_skb_cnt_inc();
  382. qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
  383. paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
  384. paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
  385. HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
  386. HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
  387. HAL_RXDMA_MANAGER_SET(ring_entry, HAL_WBM_SW4_BM_ID);
  388. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[
  389. tx_buffer_count] = (void *)nbuf;
  390. }
  391. hal_srng_access_end_unlocked(soc->hal_soc,
  392. hal_srng_to_hal_ring_handle(wbm_srng));
  393. soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count;
  394. if (tx_buffer_count) {
  395. dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count);
  396. } else {
  397. dp_err("Failed to allocate IPA TX buffer pool2");
  398. qdf_mem_free(
  399. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
  400. soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
  401. retval = -ENOMEM;
  402. }
  403. return retval;
  404. }
  405. static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
  406. {
  407. struct dp_soc *soc = pdev->soc;
  408. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  409. ipa_res->tx_alt_ring_num_alloc_buffer =
  410. (uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt;
  411. dp_ipa_get_shared_mem_info(
  412. soc->osdev, &ipa_res->tx_alt_ring,
  413. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
  414. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
  415. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
  416. dp_ipa_get_shared_mem_info(
  417. soc->osdev, &ipa_res->tx_alt_comp_ring,
  418. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
  419. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
  420. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
  421. if (!qdf_mem_get_dma_addr(soc->osdev,
  422. &ipa_res->tx_alt_comp_ring.mem_info))
  423. return QDF_STATUS_E_FAILURE;
  424. return QDF_STATUS_SUCCESS;
  425. }
  426. static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
  427. {
  428. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  429. struct hal_srng *hal_srng;
  430. struct hal_srng_params srng_params;
  431. unsigned long addr_offset, dev_base_paddr;
  432. /* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */
  433. hal_srng = (struct hal_srng *)
  434. soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng;
  435. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  436. hal_srng_to_hal_ring_handle(hal_srng),
  437. &srng_params);
  438. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr =
  439. srng_params.ring_base_paddr;
  440. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr =
  441. srng_params.ring_base_vaddr;
  442. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size =
  443. (srng_params.num_entries * srng_params.entry_size) << 2;
  444. /*
  445. * For the register backed memory addresses, use the scn->mem_pa to
  446. * calculate the physical address of the shadow registers
  447. */
  448. dev_base_paddr =
  449. (unsigned long)
  450. ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
  451. addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
  452. (unsigned long)(hal_soc->dev_base_addr);
  453. soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr =
  454. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  455. dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
  456. (unsigned int)addr_offset,
  457. (unsigned int)dev_base_paddr,
  458. (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr),
  459. (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
  460. (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
  461. srng_params.num_entries,
  462. soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
  463. /* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */
  464. hal_srng = (struct hal_srng *)
  465. soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  466. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  467. hal_srng_to_hal_ring_handle(hal_srng),
  468. &srng_params);
  469. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr =
  470. srng_params.ring_base_paddr;
  471. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr =
  472. srng_params.ring_base_vaddr;
  473. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size =
  474. (srng_params.num_entries * srng_params.entry_size) << 2;
  475. soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr =
  476. hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
  477. hal_srng_to_hal_ring_handle(hal_srng));
  478. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  479. (unsigned long)(hal_soc->dev_base_addr);
  480. soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr =
  481. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  482. dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
  483. (unsigned int)addr_offset,
  484. (unsigned int)dev_base_paddr,
  485. (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr),
  486. (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
  487. (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
  488. srng_params.num_entries,
  489. soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
  490. }
  491. static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
  492. {
  493. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  494. uint32_t rx_ready_doorbell_dmaaddr;
  495. uint32_t tx_comp_doorbell_dmaaddr;
  496. struct dp_soc *soc = pdev->soc;
  497. int ret = 0;
  498. if (ipa_res->is_db_ddr_mapped)
  499. ipa_res->tx_comp_doorbell_vaddr =
  500. phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
  501. else
  502. ipa_res->tx_comp_doorbell_vaddr =
  503. ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
  504. if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
  505. ret = pld_smmu_map(soc->osdev->dev,
  506. ipa_res->tx_comp_doorbell_paddr,
  507. &tx_comp_doorbell_dmaaddr,
  508. sizeof(uint32_t));
  509. ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
  510. qdf_assert_always(!ret);
  511. ret = pld_smmu_map(soc->osdev->dev,
  512. ipa_res->rx_ready_doorbell_paddr,
  513. &rx_ready_doorbell_dmaaddr,
  514. sizeof(uint32_t));
  515. ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
  516. qdf_assert_always(!ret);
  517. }
  518. /* Setup for alternative TX pipe */
  519. if (!ipa_res->tx_alt_comp_doorbell_paddr)
  520. return;
  521. if (ipa_res->is_db_ddr_mapped)
  522. ipa_res->tx_alt_comp_doorbell_vaddr =
  523. phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr);
  524. else
  525. ipa_res->tx_alt_comp_doorbell_vaddr =
  526. ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4);
  527. if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
  528. ret = pld_smmu_map(soc->osdev->dev,
  529. ipa_res->tx_alt_comp_doorbell_paddr,
  530. &tx_comp_doorbell_dmaaddr,
  531. sizeof(uint32_t));
  532. ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
  533. qdf_assert_always(!ret);
  534. }
  535. }
  536. static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
  537. {
  538. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  539. struct dp_soc *soc = pdev->soc;
  540. int ret = 0;
  541. if (!qdf_mem_smmu_s1_enabled(soc->osdev))
  542. return;
  543. /* Unmap must be in reverse order of map */
  544. if (ipa_res->tx_alt_comp_doorbell_paddr) {
  545. ret = pld_smmu_unmap(soc->osdev->dev,
  546. ipa_res->tx_alt_comp_doorbell_paddr,
  547. sizeof(uint32_t));
  548. qdf_assert_always(!ret);
  549. }
  550. ret = pld_smmu_unmap(soc->osdev->dev,
  551. ipa_res->rx_ready_doorbell_paddr,
  552. sizeof(uint32_t));
  553. qdf_assert_always(!ret);
  554. ret = pld_smmu_unmap(soc->osdev->dev,
  555. ipa_res->tx_comp_doorbell_paddr,
  556. sizeof(uint32_t));
  557. qdf_assert_always(!ret);
  558. }
  559. static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
  560. struct dp_pdev *pdev,
  561. bool create)
  562. {
  563. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  564. struct ipa_dp_tx_rsc *rsc;
  565. uint32_t tx_buffer_cnt;
  566. uint32_t buf_len;
  567. qdf_nbuf_t nbuf;
  568. uint32_t index;
  569. if (!ipa_is_ready()) {
  570. dp_info("IPA is not READY");
  571. return QDF_STATUS_SUCCESS;
  572. }
  573. rsc = &soc->ipa_uc_tx_rsc_alt;
  574. tx_buffer_cnt = rsc->alloc_tx_buf_cnt;
  575. for (index = 0; index < tx_buffer_cnt; index++) {
  576. nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index];
  577. if (!nbuf)
  578. continue;
  579. buf_len = qdf_nbuf_get_data_len(nbuf);
  580. ret = __dp_ipa_handle_buf_smmu_mapping(
  581. soc, nbuf, buf_len, create);
  582. qdf_assert_always(!ret);
  583. }
  584. return ret;
  585. }
  586. static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc,
  587. struct dp_ipa_resources *ipa_res,
  588. qdf_ipa_wdi_pipe_setup_info_t *tx)
  589. {
  590. struct tcl_data_cmd *tcl_desc_ptr;
  591. uint8_t *desc_addr;
  592. uint32_t desc_size;
  593. QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1;
  594. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
  595. qdf_mem_get_dma_addr(soc->osdev,
  596. &ipa_res->tx_alt_comp_ring.mem_info);
  597. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
  598. qdf_mem_get_dma_size(soc->osdev,
  599. &ipa_res->tx_alt_comp_ring.mem_info);
  600. /* WBM Tail Pointer Address */
  601. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
  602. soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
  603. QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
  604. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
  605. qdf_mem_get_dma_addr(soc->osdev,
  606. &ipa_res->tx_alt_ring.mem_info);
  607. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
  608. qdf_mem_get_dma_size(soc->osdev,
  609. &ipa_res->tx_alt_ring.mem_info);
  610. /* TCL Head Pointer Address */
  611. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
  612. soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
  613. QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
  614. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
  615. ipa_res->tx_alt_ring_num_alloc_buffer;
  616. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
  617. /* Preprogram TCL descriptor */
  618. desc_addr =
  619. (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
  620. desc_size = sizeof(struct tcl_data_cmd);
  621. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  622. tcl_desc_ptr = (struct tcl_data_cmd *)
  623. (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
  624. tcl_desc_ptr->buf_addr_info.return_buffer_manager = HAL_WBM_SW4_BM_ID;
  625. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  626. tcl_desc_ptr->addry_en = 1; /* Address X search enable in ASE */
  627. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  628. tcl_desc_ptr->packet_offset = 0; /* padding for alignment */
  629. }
  630. static void
  631. dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc,
  632. struct dp_ipa_resources *ipa_res,
  633. qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
  634. {
  635. struct tcl_data_cmd *tcl_desc_ptr;
  636. uint8_t *desc_addr;
  637. uint32_t desc_size;
  638. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1;
  639. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
  640. &ipa_res->tx_alt_comp_ring.sgtable,
  641. sizeof(sgtable_t));
  642. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
  643. qdf_mem_get_dma_size(soc->osdev,
  644. &ipa_res->tx_alt_comp_ring.mem_info);
  645. /* WBM Tail Pointer Address */
  646. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
  647. soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
  648. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
  649. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
  650. &ipa_res->tx_alt_ring.sgtable,
  651. sizeof(sgtable_t));
  652. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
  653. qdf_mem_get_dma_size(soc->osdev,
  654. &ipa_res->tx_alt_ring.mem_info);
  655. /* TCL Head Pointer Address */
  656. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
  657. soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
  658. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
  659. QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
  660. ipa_res->tx_alt_ring_num_alloc_buffer;
  661. QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
  662. /* Preprogram TCL descriptor */
  663. desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
  664. tx_smmu);
  665. desc_size = sizeof(struct tcl_data_cmd);
  666. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  667. tcl_desc_ptr = (struct tcl_data_cmd *)
  668. (QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
  669. tcl_desc_ptr->buf_addr_info.return_buffer_manager = HAL_WBM_SW4_BM_ID;
  670. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  671. tcl_desc_ptr->addry_en = 1; /* Address Y search enable in ASE */
  672. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  673. tcl_desc_ptr->packet_offset = 0; /* padding for alignment */
  674. }
  675. static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc,
  676. struct dp_ipa_resources *res,
  677. qdf_ipa_wdi_conn_in_params_t *in)
  678. {
  679. qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL;
  680. qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
  681. qdf_ipa_ep_cfg_t *tx_cfg;
  682. QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true;
  683. if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
  684. tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in);
  685. tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
  686. dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu);
  687. } else {
  688. tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in);
  689. tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx);
  690. dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx);
  691. }
  692. QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
  693. QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  694. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
  695. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
  696. QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
  697. QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
  698. QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
  699. }
  700. static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
  701. qdf_ipa_wdi_conn_out_params_t *out)
  702. {
  703. res->tx_comp_doorbell_paddr =
  704. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
  705. res->rx_ready_doorbell_paddr =
  706. QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
  707. res->tx_alt_comp_doorbell_paddr =
  708. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out);
  709. }
  710. static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
  711. uint8_t session_id)
  712. {
  713. bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT;
  714. session_id = session_id >> IPA_SESSION_ID_SHIFT;
  715. dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface);
  716. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
  717. QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface;
  718. }
  719. static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
  720. struct dp_ipa_resources *res)
  721. {
  722. struct hal_srng *wbm_srng;
  723. /* Init first TX comp ring */
  724. wbm_srng = (struct hal_srng *)
  725. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  726. hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
  727. res->tx_comp_doorbell_vaddr);
  728. /* Init the alternate TX comp ring */
  729. wbm_srng = (struct hal_srng *)
  730. soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  731. hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
  732. res->tx_alt_comp_doorbell_vaddr);
  733. }
  734. static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
  735. struct dp_ipa_resources *ipa_res)
  736. {
  737. struct hal_srng *wbm_srng;
  738. wbm_srng = (struct hal_srng *)
  739. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  740. hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
  741. ipa_res->tx_comp_doorbell_paddr);
  742. dp_info("paddr %pK vaddr %pK",
  743. (void *)ipa_res->tx_comp_doorbell_paddr,
  744. (void *)ipa_res->tx_comp_doorbell_vaddr);
  745. /* Setup for alternative TX comp ring */
  746. wbm_srng = (struct hal_srng *)
  747. soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  748. hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
  749. ipa_res->tx_alt_comp_doorbell_paddr);
  750. dp_info("paddr %pK vaddr %pK",
  751. (void *)ipa_res->tx_alt_comp_doorbell_paddr,
  752. (void *)ipa_res->tx_alt_comp_doorbell_vaddr);
  753. }
  754. #ifdef IPA_SET_RESET_TX_DB_PA
  755. static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
  756. struct dp_ipa_resources *ipa_res)
  757. {
  758. hal_ring_handle_t wbm_srng;
  759. qdf_dma_addr_t hp_addr;
  760. wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  761. if (!wbm_srng)
  762. return QDF_STATUS_E_FAILURE;
  763. hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
  764. hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
  765. dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
  766. /* Reset alternative TX comp ring */
  767. wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
  768. if (!wbm_srng)
  769. return QDF_STATUS_E_FAILURE;
  770. hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr;
  771. hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
  772. dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
  773. return QDF_STATUS_SUCCESS;
  774. }
  775. #endif /* IPA_SET_RESET_TX_DB_PA */
  776. #else /* !IPA_WDI3_TX_TWO_PIPES */
  777. static inline
  778. void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  779. {
  780. }
  781. static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
  782. {
  783. }
  784. static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
  785. {
  786. return 0;
  787. }
  788. static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
  789. {
  790. return QDF_STATUS_SUCCESS;
  791. }
  792. static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
  793. {
  794. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  795. uint32_t rx_ready_doorbell_dmaaddr;
  796. uint32_t tx_comp_doorbell_dmaaddr;
  797. struct dp_soc *soc = pdev->soc;
  798. int ret = 0;
  799. if (ipa_res->is_db_ddr_mapped)
  800. ipa_res->tx_comp_doorbell_vaddr =
  801. phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
  802. else
  803. ipa_res->tx_comp_doorbell_vaddr =
  804. ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
  805. if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
  806. ret = pld_smmu_map(soc->osdev->dev,
  807. ipa_res->tx_comp_doorbell_paddr,
  808. &tx_comp_doorbell_dmaaddr,
  809. sizeof(uint32_t));
  810. ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
  811. qdf_assert_always(!ret);
  812. ret = pld_smmu_map(soc->osdev->dev,
  813. ipa_res->rx_ready_doorbell_paddr,
  814. &rx_ready_doorbell_dmaaddr,
  815. sizeof(uint32_t));
  816. ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
  817. qdf_assert_always(!ret);
  818. }
  819. }
  820. static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
  821. {
  822. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  823. struct dp_soc *soc = pdev->soc;
  824. int ret = 0;
  825. if (!qdf_mem_smmu_s1_enabled(soc->osdev))
  826. return;
  827. ret = pld_smmu_unmap(soc->osdev->dev,
  828. ipa_res->rx_ready_doorbell_paddr,
  829. sizeof(uint32_t));
  830. qdf_assert_always(!ret);
  831. ret = pld_smmu_unmap(soc->osdev->dev,
  832. ipa_res->tx_comp_doorbell_paddr,
  833. sizeof(uint32_t));
  834. qdf_assert_always(!ret);
  835. }
  836. static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
  837. struct dp_pdev *pdev,
  838. bool create)
  839. {
  840. return QDF_STATUS_SUCCESS;
  841. }
  842. static inline
  843. void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res,
  844. qdf_ipa_wdi_conn_in_params_t *in)
  845. {
  846. }
  847. static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
  848. qdf_ipa_wdi_conn_out_params_t *out)
  849. {
  850. res->tx_comp_doorbell_paddr =
  851. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
  852. res->rx_ready_doorbell_paddr =
  853. QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
  854. }
  855. static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
  856. uint8_t session_id)
  857. {
  858. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) = htonl(session_id << 16);
  859. }
  860. static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
  861. struct dp_ipa_resources *res)
  862. {
  863. struct hal_srng *wbm_srng = (struct hal_srng *)
  864. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  865. hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
  866. res->tx_comp_doorbell_vaddr);
  867. }
  868. static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
  869. struct dp_ipa_resources *ipa_res)
  870. {
  871. struct hal_srng *wbm_srng = (struct hal_srng *)
  872. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  873. hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
  874. ipa_res->tx_comp_doorbell_paddr);
  875. dp_info("paddr %pK vaddr %pK",
  876. (void *)ipa_res->tx_comp_doorbell_paddr,
  877. (void *)ipa_res->tx_comp_doorbell_vaddr);
  878. }
  879. #ifdef IPA_SET_RESET_TX_DB_PA
  880. static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
  881. struct dp_ipa_resources *ipa_res)
  882. {
  883. hal_ring_handle_t wbm_srng =
  884. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  885. qdf_dma_addr_t hp_addr;
  886. if (!wbm_srng)
  887. return QDF_STATUS_E_FAILURE;
  888. hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
  889. hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
  890. dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
  891. return QDF_STATUS_SUCCESS;
  892. }
  893. #endif /* IPA_SET_RESET_TX_DB_PA */
  894. #endif /* IPA_WDI3_TX_TWO_PIPES */
  895. /**
  896. * dp_tx_ipa_uc_detach - Free autonomy TX resources
  897. * @soc: data path instance
  898. * @pdev: core txrx pdev context
  899. *
  900. * Free allocated TX buffers with WBM SRNG
  901. *
  902. * Return: none
  903. */
  904. static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  905. {
  906. int idx;
  907. qdf_nbuf_t nbuf;
  908. struct dp_ipa_resources *ipa_res;
  909. for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
  910. nbuf = (qdf_nbuf_t)
  911. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
  912. if (!nbuf)
  913. continue;
  914. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
  915. qdf_mem_dp_tx_skb_cnt_dec();
  916. qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
  917. qdf_nbuf_free(nbuf);
  918. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
  919. (void *)NULL;
  920. }
  921. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  922. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
  923. ipa_res = &pdev->ipa_resource;
  924. if (!ipa_res->is_db_ddr_mapped)
  925. iounmap(ipa_res->tx_comp_doorbell_vaddr);
  926. qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
  927. qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
  928. }
  929. /**
  930. * dp_rx_ipa_uc_detach - free autonomy RX resources
  931. * @soc: data path instance
  932. * @pdev: core txrx pdev context
  933. *
  934. * This function will detach DP RX into main device context
  935. * will free DP Rx resources.
  936. *
  937. * Return: none
  938. */
  939. static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  940. {
  941. struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
  942. qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
  943. qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
  944. }
  945. int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
  946. {
  947. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  948. return QDF_STATUS_SUCCESS;
  949. /* TX resource detach */
  950. dp_tx_ipa_uc_detach(soc, pdev);
  951. /* Cleanup 2nd TX pipe resources */
  952. dp_ipa_tx_alt_pool_detach(soc, pdev);
  953. /* RX resource detach */
  954. dp_rx_ipa_uc_detach(soc, pdev);
  955. return QDF_STATUS_SUCCESS; /* success */
  956. }
  957. /**
  958. * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
  959. * @soc: data path instance
  960. * @pdev: Physical device handle
  961. *
  962. * Allocate TX buffer from non-cacheable memory
  963. * Attache allocated TX buffers with WBM SRNG
  964. *
  965. * Return: int
  966. */
  967. static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  968. {
  969. uint32_t tx_buffer_count;
  970. uint32_t ring_base_align = 8;
  971. qdf_dma_addr_t buffer_paddr;
  972. struct hal_srng *wbm_srng = (struct hal_srng *)
  973. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  974. struct hal_srng_params srng_params;
  975. uint32_t paddr_lo;
  976. uint32_t paddr_hi;
  977. void *ring_entry;
  978. int num_entries;
  979. qdf_nbuf_t nbuf;
  980. int retval = QDF_STATUS_SUCCESS;
  981. int max_alloc_count = 0;
  982. /*
  983. * Uncomment when dp_ops_cfg.cfg_attach is implemented
  984. * unsigned int uc_tx_buf_sz =
  985. * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
  986. */
  987. unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
  988. unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
  989. hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
  990. &srng_params);
  991. num_entries = srng_params.num_entries;
  992. max_alloc_count =
  993. num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
  994. if (max_alloc_count <= 0) {
  995. dp_err("incorrect value for buffer count %u", max_alloc_count);
  996. return -EINVAL;
  997. }
  998. dp_info("requested %d buffers to be posted to wbm ring",
  999. max_alloc_count);
  1000. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
  1001. qdf_mem_malloc(num_entries *
  1002. sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
  1003. if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
  1004. dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
  1005. return -ENOMEM;
  1006. }
  1007. hal_srng_access_start_unlocked(soc->hal_soc,
  1008. hal_srng_to_hal_ring_handle(wbm_srng));
  1009. /*
  1010. * Allocate Tx buffers as many as possible.
  1011. * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
  1012. * Populate Tx buffers into WBM2IPA ring
  1013. * This initial buffer population will simulate H/W as source ring,
  1014. * and update HP
  1015. */
  1016. for (tx_buffer_count = 0;
  1017. tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
  1018. nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
  1019. if (!nbuf)
  1020. break;
  1021. ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
  1022. hal_srng_to_hal_ring_handle(wbm_srng));
  1023. if (!ring_entry) {
  1024. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1025. "%s: Failed to get WBM ring entry",
  1026. __func__);
  1027. qdf_nbuf_free(nbuf);
  1028. break;
  1029. }
  1030. qdf_nbuf_map_single(soc->osdev, nbuf,
  1031. QDF_DMA_BIDIRECTIONAL);
  1032. buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1033. qdf_mem_dp_tx_skb_cnt_inc();
  1034. qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
  1035. paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
  1036. paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
  1037. HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
  1038. HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
  1039. HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
  1040. HAL_WBM_SW0_BM_ID));
  1041. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
  1042. = (void *)nbuf;
  1043. }
  1044. hal_srng_access_end_unlocked(soc->hal_soc,
  1045. hal_srng_to_hal_ring_handle(wbm_srng));
  1046. soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
  1047. if (tx_buffer_count) {
  1048. dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
  1049. } else {
  1050. dp_err("No IPA WDI TX buffer allocated!");
  1051. qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
  1052. soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
  1053. retval = -ENOMEM;
  1054. }
  1055. return retval;
  1056. }
  1057. /**
  1058. * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
  1059. * @soc: data path instance
  1060. * @pdev: core txrx pdev context
  1061. *
  1062. * This function will attach a DP RX instance into the main
  1063. * device (SOC) context.
  1064. *
  1065. * Return: QDF_STATUS_SUCCESS: success
  1066. * QDF_STATUS_E_RESOURCES: Error return
  1067. */
  1068. static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  1069. {
  1070. return QDF_STATUS_SUCCESS;
  1071. }
  1072. int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
  1073. {
  1074. int error;
  1075. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1076. return QDF_STATUS_SUCCESS;
  1077. /* TX resource attach */
  1078. error = dp_tx_ipa_uc_attach(soc, pdev);
  1079. if (error) {
  1080. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1081. "%s: DP IPA UC TX attach fail code %d",
  1082. __func__, error);
  1083. return error;
  1084. }
  1085. /* Setup 2nd TX pipe */
  1086. error = dp_ipa_tx_alt_pool_attach(soc);
  1087. if (error) {
  1088. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1089. "%s: DP IPA TX pool2 attach fail code %d",
  1090. __func__, error);
  1091. dp_tx_ipa_uc_detach(soc, pdev);
  1092. return error;
  1093. }
  1094. /* RX resource attach */
  1095. error = dp_rx_ipa_uc_attach(soc, pdev);
  1096. if (error) {
  1097. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1098. "%s: DP IPA UC RX attach fail code %d",
  1099. __func__, error);
  1100. dp_ipa_tx_alt_pool_detach(soc, pdev);
  1101. dp_tx_ipa_uc_detach(soc, pdev);
  1102. return error;
  1103. }
  1104. return QDF_STATUS_SUCCESS; /* success */
  1105. }
  1106. /*
  1107. * dp_ipa_ring_resource_setup() - setup IPA ring resources
  1108. * @soc: data path SoC handle
  1109. *
  1110. * Return: none
  1111. */
  1112. int dp_ipa_ring_resource_setup(struct dp_soc *soc,
  1113. struct dp_pdev *pdev)
  1114. {
  1115. struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
  1116. struct hal_srng *hal_srng;
  1117. struct hal_srng_params srng_params;
  1118. qdf_dma_addr_t hp_addr;
  1119. unsigned long addr_offset, dev_base_paddr;
  1120. uint32_t ix0;
  1121. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1122. return QDF_STATUS_SUCCESS;
  1123. /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
  1124. hal_srng = (struct hal_srng *)
  1125. soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
  1126. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  1127. hal_srng_to_hal_ring_handle(hal_srng),
  1128. &srng_params);
  1129. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
  1130. srng_params.ring_base_paddr;
  1131. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
  1132. srng_params.ring_base_vaddr;
  1133. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
  1134. (srng_params.num_entries * srng_params.entry_size) << 2;
  1135. /*
  1136. * For the register backed memory addresses, use the scn->mem_pa to
  1137. * calculate the physical address of the shadow registers
  1138. */
  1139. dev_base_paddr =
  1140. (unsigned long)
  1141. ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
  1142. addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
  1143. (unsigned long)(hal_soc->dev_base_addr);
  1144. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
  1145. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  1146. dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
  1147. (unsigned int)addr_offset,
  1148. (unsigned int)dev_base_paddr,
  1149. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
  1150. (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
  1151. (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
  1152. srng_params.num_entries,
  1153. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
  1154. /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
  1155. hal_srng = (struct hal_srng *)
  1156. soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
  1157. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  1158. hal_srng_to_hal_ring_handle(hal_srng),
  1159. &srng_params);
  1160. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
  1161. srng_params.ring_base_paddr;
  1162. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
  1163. srng_params.ring_base_vaddr;
  1164. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
  1165. (srng_params.num_entries * srng_params.entry_size) << 2;
  1166. soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr =
  1167. hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
  1168. hal_srng_to_hal_ring_handle(hal_srng));
  1169. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  1170. (unsigned long)(hal_soc->dev_base_addr);
  1171. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
  1172. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  1173. dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
  1174. (unsigned int)addr_offset,
  1175. (unsigned int)dev_base_paddr,
  1176. (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
  1177. (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
  1178. (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
  1179. srng_params.num_entries,
  1180. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
  1181. dp_ipa_tx_alt_ring_resource_setup(soc);
  1182. /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
  1183. hal_srng = (struct hal_srng *)
  1184. soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  1185. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  1186. hal_srng_to_hal_ring_handle(hal_srng),
  1187. &srng_params);
  1188. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
  1189. srng_params.ring_base_paddr;
  1190. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
  1191. srng_params.ring_base_vaddr;
  1192. soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
  1193. (srng_params.num_entries * srng_params.entry_size) << 2;
  1194. addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
  1195. (unsigned long)(hal_soc->dev_base_addr);
  1196. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
  1197. (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
  1198. dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
  1199. (unsigned int)addr_offset,
  1200. (unsigned int)dev_base_paddr,
  1201. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
  1202. (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
  1203. (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
  1204. srng_params.num_entries,
  1205. soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
  1206. hal_srng = (struct hal_srng *)
  1207. pdev->rx_refill_buf_ring2.hal_srng;
  1208. hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
  1209. hal_srng_to_hal_ring_handle(hal_srng),
  1210. &srng_params);
  1211. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
  1212. srng_params.ring_base_paddr;
  1213. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
  1214. srng_params.ring_base_vaddr;
  1215. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
  1216. (srng_params.num_entries * srng_params.entry_size) << 2;
  1217. hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
  1218. hal_srng_to_hal_ring_handle(hal_srng));
  1219. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
  1220. qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
  1221. dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
  1222. (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
  1223. (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
  1224. (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
  1225. srng_params.num_entries,
  1226. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
  1227. /*
  1228. * Set DEST_RING_MAPPING_4 to SW2 as default value for
  1229. * DESTINATION_RING_CTRL_IX_0.
  1230. */
  1231. ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
  1232. HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
  1233. HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
  1234. HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
  1235. HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
  1236. HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
  1237. HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
  1238. HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
  1239. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
  1240. return 0;
  1241. }
  1242. QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1243. {
  1244. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1245. struct dp_pdev *pdev =
  1246. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1247. struct dp_ipa_resources *ipa_res;
  1248. if (!pdev) {
  1249. dp_err("Invalid instance");
  1250. return QDF_STATUS_E_FAILURE;
  1251. }
  1252. ipa_res = &pdev->ipa_resource;
  1253. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1254. return QDF_STATUS_SUCCESS;
  1255. ipa_res->tx_num_alloc_buffer =
  1256. (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
  1257. dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
  1258. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
  1259. soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
  1260. soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
  1261. dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
  1262. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
  1263. soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
  1264. soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
  1265. dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
  1266. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
  1267. soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
  1268. soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
  1269. dp_ipa_get_shared_mem_info(
  1270. soc->osdev, &ipa_res->rx_refill_ring,
  1271. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
  1272. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
  1273. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
  1274. if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) ||
  1275. !qdf_mem_get_dma_addr(soc->osdev,
  1276. &ipa_res->tx_comp_ring.mem_info) ||
  1277. !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) ||
  1278. !qdf_mem_get_dma_addr(soc->osdev,
  1279. &ipa_res->rx_refill_ring.mem_info))
  1280. return QDF_STATUS_E_FAILURE;
  1281. if (dp_ipa_tx_alt_ring_get_resource(pdev))
  1282. return QDF_STATUS_E_FAILURE;
  1283. return QDF_STATUS_SUCCESS;
  1284. }
  1285. #ifdef IPA_SET_RESET_TX_DB_PA
  1286. #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res)
  1287. #else
  1288. #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \
  1289. dp_ipa_set_tx_doorbell_paddr(soc, ipa_res)
  1290. #endif
  1291. QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1292. {
  1293. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1294. struct dp_pdev *pdev =
  1295. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1296. struct dp_ipa_resources *ipa_res;
  1297. struct hal_srng *reo_srng = (struct hal_srng *)
  1298. soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
  1299. if (!pdev) {
  1300. dp_err("Invalid instance");
  1301. return QDF_STATUS_E_FAILURE;
  1302. }
  1303. ipa_res = &pdev->ipa_resource;
  1304. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1305. return QDF_STATUS_SUCCESS;
  1306. dp_ipa_map_ring_doorbell_paddr(pdev);
  1307. DP_IPA_SET_TX_DB_PADDR(soc, ipa_res);
  1308. /*
  1309. * For RX, REO module on Napier/Hastings does reordering on incoming
  1310. * Ethernet packets and writes one or more descriptors to REO2IPA Rx
  1311. * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
  1312. * to IPA.
  1313. * Set the doorbell addr for the REO ring.
  1314. */
  1315. hal_srng_dst_set_hp_paddr_confirm(reo_srng,
  1316. ipa_res->rx_ready_doorbell_paddr);
  1317. return QDF_STATUS_SUCCESS;
  1318. }
  1319. QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1320. uint8_t *op_msg)
  1321. {
  1322. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1323. struct dp_pdev *pdev =
  1324. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1325. if (!pdev) {
  1326. dp_err("Invalid instance");
  1327. return QDF_STATUS_E_FAILURE;
  1328. }
  1329. if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
  1330. return QDF_STATUS_SUCCESS;
  1331. if (pdev->ipa_uc_op_cb) {
  1332. pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
  1333. } else {
  1334. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1335. "%s: IPA callback function is not registered", __func__);
  1336. qdf_mem_free(op_msg);
  1337. return QDF_STATUS_E_FAILURE;
  1338. }
  1339. return QDF_STATUS_SUCCESS;
  1340. }
  1341. QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1342. ipa_uc_op_cb_type op_cb,
  1343. void *usr_ctxt)
  1344. {
  1345. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1346. struct dp_pdev *pdev =
  1347. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1348. if (!pdev) {
  1349. dp_err("Invalid instance");
  1350. return QDF_STATUS_E_FAILURE;
  1351. }
  1352. if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
  1353. return QDF_STATUS_SUCCESS;
  1354. pdev->ipa_uc_op_cb = op_cb;
  1355. pdev->usr_ctxt = usr_ctxt;
  1356. return QDF_STATUS_SUCCESS;
  1357. }
  1358. void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1359. {
  1360. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1361. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1362. if (!pdev) {
  1363. dp_err("Invalid instance");
  1364. return;
  1365. }
  1366. dp_debug("Deregister OP handler callback");
  1367. pdev->ipa_uc_op_cb = NULL;
  1368. pdev->usr_ctxt = NULL;
  1369. }
  1370. QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1371. {
  1372. /* TBD */
  1373. return QDF_STATUS_SUCCESS;
  1374. }
  1375. /**
  1376. * dp_tx_send_ipa_data_frame() - send IPA data frame
  1377. * @soc_hdl: datapath soc handle
  1378. * @vdev_id: id of the virtual device
  1379. * @skb: skb to transmit
  1380. *
  1381. * Return: skb/ NULL is for success
  1382. */
  1383. qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1384. qdf_nbuf_t skb)
  1385. {
  1386. qdf_nbuf_t ret;
  1387. /* Terminate the (single-element) list of tx frames */
  1388. qdf_nbuf_set_next(skb, NULL);
  1389. ret = dp_tx_send(soc_hdl, vdev_id, skb);
  1390. if (ret) {
  1391. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1392. "%s: Failed to tx", __func__);
  1393. return ret;
  1394. }
  1395. return NULL;
  1396. }
  1397. QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1398. {
  1399. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1400. struct dp_pdev *pdev =
  1401. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1402. uint32_t ix0;
  1403. uint32_t ix2;
  1404. if (!pdev) {
  1405. dp_err("Invalid instance");
  1406. return QDF_STATUS_E_FAILURE;
  1407. }
  1408. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1409. return QDF_STATUS_SUCCESS;
  1410. if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  1411. return QDF_STATUS_E_AGAIN;
  1412. /* Call HAL API to remap REO rings to REO2IPA ring */
  1413. ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
  1414. HAL_REO_REMAP_IX0(REO_REMAP_SW4, 1) |
  1415. HAL_REO_REMAP_IX0(REO_REMAP_SW1, 2) |
  1416. HAL_REO_REMAP_IX0(REO_REMAP_SW4, 3) |
  1417. HAL_REO_REMAP_IX0(REO_REMAP_SW4, 4) |
  1418. HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
  1419. HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
  1420. HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
  1421. if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  1422. ix2 = HAL_REO_REMAP_IX2(REO_REMAP_SW4, 16) |
  1423. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 17) |
  1424. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
  1425. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
  1426. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 20) |
  1427. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
  1428. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 22) |
  1429. HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
  1430. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
  1431. &ix2, &ix2);
  1432. dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
  1433. } else {
  1434. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
  1435. NULL, NULL);
  1436. dp_ipa_reo_remap_history_add(ix0, 0, 0);
  1437. }
  1438. return QDF_STATUS_SUCCESS;
  1439. }
  1440. QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1441. {
  1442. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1443. struct dp_pdev *pdev =
  1444. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1445. uint32_t ix0;
  1446. uint32_t ix2;
  1447. uint32_t ix3;
  1448. if (!pdev) {
  1449. dp_err("Invalid instance");
  1450. return QDF_STATUS_E_FAILURE;
  1451. }
  1452. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1453. return QDF_STATUS_SUCCESS;
  1454. if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
  1455. return QDF_STATUS_E_AGAIN;
  1456. /* Call HAL API to remap REO rings to REO2IPA ring */
  1457. ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
  1458. HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
  1459. HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
  1460. HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
  1461. HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
  1462. HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
  1463. HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
  1464. HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
  1465. if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  1466. dp_reo_remap_config(soc, &ix2, &ix3);
  1467. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
  1468. &ix2, &ix3);
  1469. dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
  1470. } else {
  1471. hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
  1472. NULL, NULL);
  1473. dp_ipa_reo_remap_history_add(ix0, 0, 0);
  1474. }
  1475. return QDF_STATUS_SUCCESS;
  1476. }
  1477. /* This should be configurable per H/W configuration enable status */
  1478. #define L3_HEADER_PADDING 2
  1479. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  1480. defined(CONFIG_IPA_WDI_UNIFIED_API)
  1481. #ifndef QCA_LL_TX_FLOW_CONTROL_V2
  1482. static inline void dp_setup_mcc_sys_pipes(
  1483. qdf_ipa_sys_connect_params_t *sys_in,
  1484. qdf_ipa_wdi_conn_in_params_t *pipe_in)
  1485. {
  1486. /* Setup MCC sys pipe */
  1487. QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
  1488. DP_IPA_MAX_IFACE;
  1489. for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
  1490. memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
  1491. &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
  1492. }
  1493. #else
  1494. static inline void dp_setup_mcc_sys_pipes(
  1495. qdf_ipa_sys_connect_params_t *sys_in,
  1496. qdf_ipa_wdi_conn_in_params_t *pipe_in)
  1497. {
  1498. QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
  1499. }
  1500. #endif
  1501. static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
  1502. struct dp_ipa_resources *ipa_res,
  1503. qdf_ipa_wdi_pipe_setup_info_t *tx,
  1504. bool over_gsi)
  1505. {
  1506. struct tcl_data_cmd *tcl_desc_ptr;
  1507. uint8_t *desc_addr;
  1508. uint32_t desc_size;
  1509. if (over_gsi)
  1510. QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
  1511. else
  1512. QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
  1513. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
  1514. qdf_mem_get_dma_addr(soc->osdev,
  1515. &ipa_res->tx_comp_ring.mem_info);
  1516. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
  1517. qdf_mem_get_dma_size(soc->osdev,
  1518. &ipa_res->tx_comp_ring.mem_info);
  1519. /* WBM Tail Pointer Address */
  1520. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
  1521. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  1522. QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
  1523. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
  1524. qdf_mem_get_dma_addr(soc->osdev,
  1525. &ipa_res->tx_ring.mem_info);
  1526. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
  1527. qdf_mem_get_dma_size(soc->osdev,
  1528. &ipa_res->tx_ring.mem_info);
  1529. /* TCL Head Pointer Address */
  1530. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
  1531. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  1532. QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
  1533. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
  1534. ipa_res->tx_num_alloc_buffer;
  1535. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
  1536. /* Preprogram TCL descriptor */
  1537. desc_addr =
  1538. (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
  1539. desc_size = sizeof(struct tcl_data_cmd);
  1540. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  1541. tcl_desc_ptr = (struct tcl_data_cmd *)
  1542. (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
  1543. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  1544. HAL_RX_BUF_RBM_SW2_BM;
  1545. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  1546. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  1547. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  1548. }
  1549. static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
  1550. struct dp_ipa_resources *ipa_res,
  1551. qdf_ipa_wdi_pipe_setup_info_t *rx,
  1552. bool over_gsi)
  1553. {
  1554. if (over_gsi)
  1555. QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
  1556. IPA_CLIENT_WLAN2_PROD;
  1557. else
  1558. QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
  1559. IPA_CLIENT_WLAN1_PROD;
  1560. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
  1561. qdf_mem_get_dma_addr(soc->osdev,
  1562. &ipa_res->rx_rdy_ring.mem_info);
  1563. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
  1564. qdf_mem_get_dma_size(soc->osdev,
  1565. &ipa_res->rx_rdy_ring.mem_info);
  1566. /* REO Tail Pointer Address */
  1567. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
  1568. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  1569. QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
  1570. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
  1571. qdf_mem_get_dma_addr(soc->osdev,
  1572. &ipa_res->rx_refill_ring.mem_info);
  1573. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
  1574. qdf_mem_get_dma_size(soc->osdev,
  1575. &ipa_res->rx_refill_ring.mem_info);
  1576. /* FW Head Pointer Address */
  1577. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
  1578. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  1579. QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
  1580. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
  1581. RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
  1582. }
  1583. static void
  1584. dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
  1585. struct dp_ipa_resources *ipa_res,
  1586. qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
  1587. bool over_gsi)
  1588. {
  1589. struct tcl_data_cmd *tcl_desc_ptr;
  1590. uint8_t *desc_addr;
  1591. uint32_t desc_size;
  1592. if (over_gsi)
  1593. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
  1594. IPA_CLIENT_WLAN2_CONS;
  1595. else
  1596. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
  1597. IPA_CLIENT_WLAN1_CONS;
  1598. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
  1599. &ipa_res->tx_comp_ring.sgtable,
  1600. sizeof(sgtable_t));
  1601. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
  1602. qdf_mem_get_dma_size(soc->osdev,
  1603. &ipa_res->tx_comp_ring.mem_info);
  1604. /* WBM Tail Pointer Address */
  1605. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
  1606. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  1607. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
  1608. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
  1609. &ipa_res->tx_ring.sgtable,
  1610. sizeof(sgtable_t));
  1611. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
  1612. qdf_mem_get_dma_size(soc->osdev,
  1613. &ipa_res->tx_ring.mem_info);
  1614. /* TCL Head Pointer Address */
  1615. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
  1616. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  1617. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
  1618. QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
  1619. ipa_res->tx_num_alloc_buffer;
  1620. QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
  1621. /* Preprogram TCL descriptor */
  1622. desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
  1623. tx_smmu);
  1624. desc_size = sizeof(struct tcl_data_cmd);
  1625. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  1626. tcl_desc_ptr = (struct tcl_data_cmd *)
  1627. (QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
  1628. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  1629. HAL_RX_BUF_RBM_SW2_BM;
  1630. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  1631. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  1632. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  1633. }
  1634. static void
  1635. dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
  1636. struct dp_ipa_resources *ipa_res,
  1637. qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
  1638. bool over_gsi)
  1639. {
  1640. if (over_gsi)
  1641. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
  1642. IPA_CLIENT_WLAN2_PROD;
  1643. else
  1644. QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
  1645. IPA_CLIENT_WLAN1_PROD;
  1646. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
  1647. &ipa_res->rx_rdy_ring.sgtable,
  1648. sizeof(sgtable_t));
  1649. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
  1650. qdf_mem_get_dma_size(soc->osdev,
  1651. &ipa_res->rx_rdy_ring.mem_info);
  1652. /* REO Tail Pointer Address */
  1653. QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
  1654. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  1655. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
  1656. qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
  1657. &ipa_res->rx_refill_ring.sgtable,
  1658. sizeof(sgtable_t));
  1659. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
  1660. qdf_mem_get_dma_size(soc->osdev,
  1661. &ipa_res->rx_refill_ring.mem_info);
  1662. /* FW Head Pointer Address */
  1663. QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
  1664. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  1665. QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
  1666. QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
  1667. RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
  1668. }
  1669. QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1670. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1671. void *ipa_wdi_meter_notifier_cb,
  1672. uint32_t ipa_desc_size, void *ipa_priv,
  1673. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1674. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1675. qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
  1676. {
  1677. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1678. struct dp_pdev *pdev =
  1679. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1680. struct dp_ipa_resources *ipa_res;
  1681. qdf_ipa_ep_cfg_t *tx_cfg;
  1682. qdf_ipa_ep_cfg_t *rx_cfg;
  1683. qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
  1684. qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
  1685. qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
  1686. qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL;
  1687. qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL;
  1688. qdf_ipa_wdi_conn_out_params_t pipe_out;
  1689. int ret;
  1690. if (!pdev) {
  1691. dp_err("Invalid instance");
  1692. return QDF_STATUS_E_FAILURE;
  1693. }
  1694. ipa_res = &pdev->ipa_resource;
  1695. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1696. return QDF_STATUS_SUCCESS;
  1697. pipe_in = qdf_mem_malloc(sizeof(*pipe_in));
  1698. if (!pipe_in)
  1699. return QDF_STATUS_E_NOMEM;
  1700. qdf_mem_zero(&pipe_out, sizeof(pipe_out));
  1701. if (is_smmu_enabled)
  1702. QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true;
  1703. else
  1704. QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false;
  1705. dp_setup_mcc_sys_pipes(sys_in, pipe_in);
  1706. /* TX PIPE */
  1707. if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
  1708. tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in);
  1709. tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
  1710. } else {
  1711. tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in);
  1712. tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
  1713. }
  1714. QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
  1715. QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  1716. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
  1717. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
  1718. QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
  1719. QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
  1720. QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
  1721. /**
  1722. * Transfer Ring: WBM Ring
  1723. * Transfer Ring Doorbell PA: WBM Tail Pointer Address
  1724. * Event Ring: TCL ring
  1725. * Event Ring Doorbell PA: TCL Head Pointer Address
  1726. */
  1727. if (is_smmu_enabled)
  1728. dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
  1729. else
  1730. dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
  1731. dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in);
  1732. /* RX PIPE */
  1733. if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
  1734. rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in);
  1735. rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
  1736. } else {
  1737. rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in);
  1738. rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
  1739. }
  1740. QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
  1741. QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
  1742. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
  1743. QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
  1744. QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
  1745. QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
  1746. QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
  1747. QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
  1748. QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
  1749. /**
  1750. * Transfer Ring: REO Ring
  1751. * Transfer Ring Doorbell PA: REO Tail Pointer Address
  1752. * Event Ring: FW ring
  1753. * Event Ring Doorbell PA: FW Head Pointer Address
  1754. */
  1755. if (is_smmu_enabled)
  1756. dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
  1757. else
  1758. dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
  1759. QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb;
  1760. QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv;
  1761. /* Connect WDI IPA PIPEs */
  1762. ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out);
  1763. if (ret) {
  1764. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1765. "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
  1766. __func__, ret);
  1767. qdf_mem_free(pipe_in);
  1768. return QDF_STATUS_E_FAILURE;
  1769. }
  1770. /* IPA uC Doorbell registers */
  1771. dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
  1772. (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
  1773. (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
  1774. dp_ipa_set_pipe_db(ipa_res, &pipe_out);
  1775. ipa_res->is_db_ddr_mapped =
  1776. QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out);
  1777. soc->ipa_first_tx_db_access = true;
  1778. qdf_mem_free(pipe_in);
  1779. return QDF_STATUS_SUCCESS;
  1780. }
  1781. /**
  1782. * dp_ipa_setup_iface() - Setup IPA header and register interface
  1783. * @ifname: Interface name
  1784. * @mac_addr: Interface MAC address
  1785. * @prod_client: IPA prod client type
  1786. * @cons_client: IPA cons client type
  1787. * @session_id: Session ID
  1788. * @is_ipv6_enabled: Is IPV6 enabled or not
  1789. *
  1790. * Return: QDF_STATUS
  1791. */
  1792. QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
  1793. qdf_ipa_client_type_t prod_client,
  1794. qdf_ipa_client_type_t cons_client,
  1795. uint8_t session_id, bool is_ipv6_enabled)
  1796. {
  1797. qdf_ipa_wdi_reg_intf_in_params_t in;
  1798. qdf_ipa_wdi_hdr_info_t hdr_info;
  1799. struct dp_ipa_uc_tx_hdr uc_tx_hdr;
  1800. struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
  1801. int ret = -EINVAL;
  1802. qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t));
  1803. dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname,
  1804. QDF_MAC_ADDR_REF(mac_addr));
  1805. qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  1806. qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
  1807. /* IPV4 header */
  1808. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
  1809. QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
  1810. QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  1811. QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
  1812. QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
  1813. DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
  1814. QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
  1815. qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
  1816. &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  1817. QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
  1818. QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
  1819. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
  1820. dp_ipa_setup_iface_session_id(&in, session_id);
  1821. /* IPV6 header */
  1822. if (is_ipv6_enabled) {
  1823. qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
  1824. DP_IPA_UC_WLAN_TX_HDR_LEN);
  1825. uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
  1826. QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
  1827. qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
  1828. &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  1829. }
  1830. dp_debug("registering for session_id: %u", session_id);
  1831. ret = qdf_ipa_wdi_reg_intf(&in);
  1832. if (ret) {
  1833. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1834. "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
  1835. __func__, ret);
  1836. return QDF_STATUS_E_FAILURE;
  1837. }
  1838. return QDF_STATUS_SUCCESS;
  1839. }
  1840. #else /* !CONFIG_IPA_WDI_UNIFIED_API */
  1841. QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1842. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1843. void *ipa_wdi_meter_notifier_cb,
  1844. uint32_t ipa_desc_size, void *ipa_priv,
  1845. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1846. uint32_t *rx_pipe_handle)
  1847. {
  1848. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1849. struct dp_pdev *pdev =
  1850. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1851. struct dp_ipa_resources *ipa_res;
  1852. qdf_ipa_wdi_pipe_setup_info_t *tx;
  1853. qdf_ipa_wdi_pipe_setup_info_t *rx;
  1854. qdf_ipa_wdi_conn_in_params_t pipe_in;
  1855. qdf_ipa_wdi_conn_out_params_t pipe_out;
  1856. struct tcl_data_cmd *tcl_desc_ptr;
  1857. uint8_t *desc_addr;
  1858. uint32_t desc_size;
  1859. int ret;
  1860. if (!pdev) {
  1861. dp_err("Invalid instance");
  1862. return QDF_STATUS_E_FAILURE;
  1863. }
  1864. ipa_res = &pdev->ipa_resource;
  1865. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  1866. return QDF_STATUS_SUCCESS;
  1867. qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
  1868. qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
  1869. qdf_mem_zero(&pipe_in, sizeof(pipe_in));
  1870. qdf_mem_zero(&pipe_out, sizeof(pipe_out));
  1871. /* TX PIPE */
  1872. /**
  1873. * Transfer Ring: WBM Ring
  1874. * Transfer Ring Doorbell PA: WBM Tail Pointer Address
  1875. * Event Ring: TCL ring
  1876. * Event Ring Doorbell PA: TCL Head Pointer Address
  1877. */
  1878. tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
  1879. QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
  1880. QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  1881. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
  1882. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
  1883. QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
  1884. QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
  1885. QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
  1886. QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
  1887. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
  1888. ipa_res->tx_comp_ring_base_paddr;
  1889. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
  1890. ipa_res->tx_comp_ring_size;
  1891. /* WBM Tail Pointer Address */
  1892. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
  1893. soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
  1894. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
  1895. ipa_res->tx_ring_base_paddr;
  1896. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
  1897. /* TCL Head Pointer Address */
  1898. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
  1899. soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
  1900. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
  1901. ipa_res->tx_num_alloc_buffer;
  1902. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
  1903. /* Preprogram TCL descriptor */
  1904. desc_addr =
  1905. (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
  1906. desc_size = sizeof(struct tcl_data_cmd);
  1907. HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
  1908. tcl_desc_ptr = (struct tcl_data_cmd *)
  1909. (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
  1910. tcl_desc_ptr->buf_addr_info.return_buffer_manager =
  1911. HAL_RX_BUF_RBM_SW2_BM;
  1912. tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
  1913. tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
  1914. tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
  1915. /* RX PIPE */
  1916. /**
  1917. * Transfer Ring: REO Ring
  1918. * Transfer Ring Doorbell PA: REO Tail Pointer Address
  1919. * Event Ring: FW ring
  1920. * Event Ring Doorbell PA: FW Head Pointer Address
  1921. */
  1922. rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
  1923. QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
  1924. QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
  1925. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
  1926. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
  1927. QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
  1928. QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
  1929. QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
  1930. QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
  1931. QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
  1932. QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
  1933. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
  1934. ipa_res->rx_rdy_ring_base_paddr;
  1935. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
  1936. ipa_res->rx_rdy_ring_size;
  1937. /* REO Tail Pointer Address */
  1938. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
  1939. soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
  1940. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
  1941. ipa_res->rx_refill_ring_base_paddr;
  1942. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
  1943. ipa_res->rx_refill_ring_size;
  1944. /* FW Head Pointer Address */
  1945. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
  1946. soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
  1947. QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN +
  1948. L3_HEADER_PADDING;
  1949. QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
  1950. QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
  1951. /* Connect WDI IPA PIPE */
  1952. ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
  1953. if (ret) {
  1954. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1955. "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
  1956. __func__, ret);
  1957. return QDF_STATUS_E_FAILURE;
  1958. }
  1959. /* IPA uC Doorbell registers */
  1960. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1961. "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
  1962. __func__,
  1963. (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
  1964. (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
  1965. ipa_res->tx_comp_doorbell_paddr =
  1966. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
  1967. ipa_res->tx_comp_doorbell_vaddr =
  1968. QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
  1969. ipa_res->rx_ready_doorbell_paddr =
  1970. QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
  1971. soc->ipa_first_tx_db_access = true;
  1972. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1973. "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  1974. __func__,
  1975. "transfer_ring_base_pa",
  1976. (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
  1977. "transfer_ring_size",
  1978. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
  1979. "transfer_ring_doorbell_pa",
  1980. (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
  1981. "event_ring_base_pa",
  1982. (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
  1983. "event_ring_size",
  1984. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
  1985. "event_ring_doorbell_pa",
  1986. (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
  1987. "num_pkt_buffers",
  1988. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
  1989. "tx_comp_doorbell_paddr",
  1990. (void *)ipa_res->tx_comp_doorbell_paddr);
  1991. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1992. "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
  1993. __func__,
  1994. "transfer_ring_base_pa",
  1995. (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
  1996. "transfer_ring_size",
  1997. QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
  1998. "transfer_ring_doorbell_pa",
  1999. (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
  2000. "event_ring_base_pa",
  2001. (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
  2002. "event_ring_size",
  2003. QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
  2004. "event_ring_doorbell_pa",
  2005. (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
  2006. "num_pkt_buffers",
  2007. QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
  2008. "tx_comp_doorbell_paddr",
  2009. (void *)ipa_res->rx_ready_doorbell_paddr);
  2010. return QDF_STATUS_SUCCESS;
  2011. }
  2012. /**
  2013. * dp_ipa_setup_iface() - Setup IPA header and register interface
  2014. * @ifname: Interface name
  2015. * @mac_addr: Interface MAC address
  2016. * @prod_client: IPA prod client type
  2017. * @cons_client: IPA cons client type
  2018. * @session_id: Session ID
  2019. * @is_ipv6_enabled: Is IPV6 enabled or not
  2020. *
  2021. * Return: QDF_STATUS
  2022. */
  2023. QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
  2024. qdf_ipa_client_type_t prod_client,
  2025. qdf_ipa_client_type_t cons_client,
  2026. uint8_t session_id, bool is_ipv6_enabled)
  2027. {
  2028. qdf_ipa_wdi_reg_intf_in_params_t in;
  2029. qdf_ipa_wdi_hdr_info_t hdr_info;
  2030. struct dp_ipa_uc_tx_hdr uc_tx_hdr;
  2031. struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
  2032. int ret = -EINVAL;
  2033. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  2034. "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT,
  2035. __func__, ifname, QDF_MAC_ADDR_REF(mac_addr));
  2036. qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  2037. qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
  2038. /* IPV4 header */
  2039. uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
  2040. QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
  2041. QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
  2042. QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
  2043. QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
  2044. DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
  2045. QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
  2046. qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
  2047. &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  2048. QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
  2049. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
  2050. htonl(session_id << 16);
  2051. QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
  2052. /* IPV6 header */
  2053. if (is_ipv6_enabled) {
  2054. qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
  2055. DP_IPA_UC_WLAN_TX_HDR_LEN);
  2056. uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
  2057. QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
  2058. qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
  2059. &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
  2060. }
  2061. ret = qdf_ipa_wdi_reg_intf(&in);
  2062. if (ret) {
  2063. dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
  2064. ret);
  2065. return QDF_STATUS_E_FAILURE;
  2066. }
  2067. return QDF_STATUS_SUCCESS;
  2068. }
  2069. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  2070. /**
  2071. * dp_ipa_cleanup() - Disconnect IPA pipes
  2072. * @soc_hdl: dp soc handle
  2073. * @pdev_id: dp pdev id
  2074. * @tx_pipe_handle: Tx pipe handle
  2075. * @rx_pipe_handle: Rx pipe handle
  2076. *
  2077. * Return: QDF_STATUS
  2078. */
  2079. QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  2080. uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
  2081. {
  2082. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2083. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2084. struct dp_pdev *pdev;
  2085. int ret;
  2086. ret = qdf_ipa_wdi_disconn_pipes();
  2087. if (ret) {
  2088. dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
  2089. ret);
  2090. status = QDF_STATUS_E_FAILURE;
  2091. }
  2092. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2093. if (qdf_unlikely(!pdev)) {
  2094. dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);
  2095. status = QDF_STATUS_E_FAILURE;
  2096. goto exit;
  2097. }
  2098. dp_ipa_unmap_ring_doorbell_paddr(pdev);
  2099. exit:
  2100. return status;
  2101. }
  2102. /**
  2103. * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
  2104. * @ifname: Interface name
  2105. * @is_ipv6_enabled: Is IPV6 enabled or not
  2106. *
  2107. * Return: QDF_STATUS
  2108. */
  2109. QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
  2110. {
  2111. int ret;
  2112. ret = qdf_ipa_wdi_dereg_intf(ifname);
  2113. if (ret) {
  2114. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2115. "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
  2116. __func__, ret);
  2117. return QDF_STATUS_E_FAILURE;
  2118. }
  2119. return QDF_STATUS_SUCCESS;
  2120. }
  2121. #ifdef IPA_SET_RESET_TX_DB_PA
  2122. #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \
  2123. dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res))
  2124. #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \
  2125. dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res))
  2126. #else
  2127. #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res)
  2128. #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res)
  2129. #endif
  2130. QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  2131. {
  2132. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2133. struct dp_pdev *pdev =
  2134. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2135. struct dp_ipa_resources *ipa_res;
  2136. QDF_STATUS result;
  2137. if (!pdev) {
  2138. dp_err("Invalid instance");
  2139. return QDF_STATUS_E_FAILURE;
  2140. }
  2141. ipa_res = &pdev->ipa_resource;
  2142. qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
  2143. DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res);
  2144. dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
  2145. result = qdf_ipa_wdi_enable_pipes();
  2146. if (result) {
  2147. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2148. "%s: Enable WDI PIPE fail, code %d",
  2149. __func__, result);
  2150. qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
  2151. DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
  2152. dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
  2153. return QDF_STATUS_E_FAILURE;
  2154. }
  2155. if (soc->ipa_first_tx_db_access) {
  2156. dp_ipa_tx_comp_ring_init_hp(soc, ipa_res);
  2157. soc->ipa_first_tx_db_access = false;
  2158. }
  2159. return QDF_STATUS_SUCCESS;
  2160. }
  2161. QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  2162. {
  2163. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2164. struct dp_pdev *pdev =
  2165. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2166. QDF_STATUS result;
  2167. struct dp_ipa_resources *ipa_res;
  2168. if (!pdev) {
  2169. dp_err("Invalid instance");
  2170. return QDF_STATUS_E_FAILURE;
  2171. }
  2172. ipa_res = &pdev->ipa_resource;
  2173. qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS);
  2174. /*
  2175. * Reset the tx completion doorbell address before invoking IPA disable
  2176. * pipes API to ensure that there is no access to IPA tx doorbell
  2177. * address post disable pipes.
  2178. */
  2179. DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
  2180. result = qdf_ipa_wdi_disable_pipes();
  2181. if (result) {
  2182. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2183. "%s: Disable WDI PIPE fail, code %d",
  2184. __func__, result);
  2185. qdf_assert_always(0);
  2186. return QDF_STATUS_E_FAILURE;
  2187. }
  2188. qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
  2189. dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
  2190. return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
  2191. }
  2192. /**
  2193. * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
  2194. * @client: Client type
  2195. * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
  2196. *
  2197. * Return: QDF_STATUS
  2198. */
  2199. QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
  2200. {
  2201. qdf_ipa_wdi_perf_profile_t profile;
  2202. QDF_STATUS result;
  2203. profile.client = client;
  2204. profile.max_supported_bw_mbps = max_supported_bw_mbps;
  2205. result = qdf_ipa_wdi_set_perf_profile(&profile);
  2206. if (result) {
  2207. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2208. "%s: ipa_wdi_set_perf_profile fail, code %d",
  2209. __func__, result);
  2210. return QDF_STATUS_E_FAILURE;
  2211. }
  2212. return QDF_STATUS_SUCCESS;
  2213. }
  2214. /**
  2215. * dp_ipa_intrabss_send - send IPA RX intra-bss frames
  2216. * @pdev: pdev
  2217. * @vdev: vdev
  2218. * @nbuf: skb
  2219. *
  2220. * Return: nbuf if TX fails and NULL if TX succeeds
  2221. */
  2222. static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
  2223. struct dp_vdev *vdev,
  2224. qdf_nbuf_t nbuf)
  2225. {
  2226. struct dp_peer *vdev_peer;
  2227. uint16_t len;
  2228. vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA);
  2229. if (qdf_unlikely(!vdev_peer))
  2230. return nbuf;
  2231. qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
  2232. len = qdf_nbuf_len(nbuf);
  2233. if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
  2234. DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
  2235. dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
  2236. return nbuf;
  2237. }
  2238. DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
  2239. dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
  2240. return NULL;
  2241. }
  2242. bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2243. qdf_nbuf_t nbuf, bool *fwd_success)
  2244. {
  2245. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2246. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2247. DP_MOD_ID_IPA);
  2248. struct dp_pdev *pdev;
  2249. struct dp_peer *da_peer;
  2250. struct dp_peer *sa_peer;
  2251. qdf_nbuf_t nbuf_copy;
  2252. uint8_t da_is_bcmc;
  2253. struct ethhdr *eh;
  2254. bool status = false;
  2255. *fwd_success = false; /* set default as failure */
  2256. /*
  2257. * WDI 3.0 skb->cb[] info from IPA driver
  2258. * skb->cb[0] = vdev_id
  2259. * skb->cb[1].bit#1 = da_is_bcmc
  2260. */
  2261. da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
  2262. if (qdf_unlikely(!vdev))
  2263. return false;
  2264. pdev = vdev->pdev;
  2265. if (qdf_unlikely(!pdev))
  2266. goto out;
  2267. /* no fwd for station mode and just pass up to stack */
  2268. if (vdev->opmode == wlan_op_mode_sta)
  2269. goto out;
  2270. if (da_is_bcmc) {
  2271. nbuf_copy = qdf_nbuf_copy(nbuf);
  2272. if (!nbuf_copy)
  2273. goto out;
  2274. if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
  2275. qdf_nbuf_free(nbuf_copy);
  2276. else
  2277. *fwd_success = true;
  2278. /* return false to pass original pkt up to stack */
  2279. goto out;
  2280. }
  2281. eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
  2282. if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
  2283. goto out;
  2284. da_peer = dp_peer_find_hash_find(soc, eh->h_dest, 0, vdev->vdev_id,
  2285. DP_MOD_ID_IPA);
  2286. if (!da_peer)
  2287. goto out;
  2288. dp_peer_unref_delete(da_peer, DP_MOD_ID_IPA);
  2289. sa_peer = dp_peer_find_hash_find(soc, eh->h_source, 0, vdev->vdev_id,
  2290. DP_MOD_ID_IPA);
  2291. if (!sa_peer)
  2292. goto out;
  2293. dp_peer_unref_delete(sa_peer, DP_MOD_ID_IPA);
  2294. /*
  2295. * In intra-bss forwarding scenario, skb is allocated by IPA driver.
  2296. * Need to add skb to internal tracking table to avoid nbuf memory
  2297. * leak check for unallocated skb.
  2298. */
  2299. qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
  2300. if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
  2301. qdf_nbuf_free(nbuf);
  2302. else
  2303. *fwd_success = true;
  2304. status = true;
  2305. out:
  2306. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
  2307. return status;
  2308. }
  2309. #ifdef MDM_PLATFORM
  2310. bool dp_ipa_is_mdm_platform(void)
  2311. {
  2312. return true;
  2313. }
  2314. #else
  2315. bool dp_ipa_is_mdm_platform(void)
  2316. {
  2317. return false;
  2318. }
  2319. #endif
  2320. /**
  2321. * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA
  2322. * @soc: soc
  2323. * @nbuf: source skb
  2324. *
  2325. * Return: new nbuf if success and otherwise NULL
  2326. */
  2327. static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
  2328. qdf_nbuf_t nbuf)
  2329. {
  2330. uint8_t *src_nbuf_data;
  2331. uint8_t *dst_nbuf_data;
  2332. qdf_nbuf_t dst_nbuf;
  2333. qdf_nbuf_t temp_nbuf = nbuf;
  2334. uint32_t nbuf_len = qdf_nbuf_len(nbuf);
  2335. bool is_nbuf_head = true;
  2336. uint32_t copy_len = 0;
  2337. dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE,
  2338. RX_BUFFER_RESERVATION,
  2339. RX_DATA_BUFFER_ALIGNMENT, FALSE);
  2340. if (!dst_nbuf) {
  2341. dp_err_rl("nbuf allocate fail");
  2342. return NULL;
  2343. }
  2344. if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) {
  2345. qdf_nbuf_free(dst_nbuf);
  2346. dp_err_rl("nbuf is jumbo data");
  2347. return NULL;
  2348. }
  2349. /* prepeare to copy all data into new skb */
  2350. dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
  2351. while (temp_nbuf) {
  2352. src_nbuf_data = qdf_nbuf_data(temp_nbuf);
  2353. /* first head nbuf */
  2354. if (is_nbuf_head) {
  2355. qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
  2356. RX_PKT_TLVS_LEN);
  2357. /* leave extra 2 bytes L3_HEADER_PADDING */
  2358. dst_nbuf_data += (RX_PKT_TLVS_LEN + L3_HEADER_PADDING);
  2359. src_nbuf_data += RX_PKT_TLVS_LEN;
  2360. copy_len = qdf_nbuf_headlen(temp_nbuf) -
  2361. RX_PKT_TLVS_LEN;
  2362. temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
  2363. is_nbuf_head = false;
  2364. } else {
  2365. copy_len = qdf_nbuf_len(temp_nbuf);
  2366. temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
  2367. }
  2368. qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
  2369. dst_nbuf_data += copy_len;
  2370. }
  2371. qdf_nbuf_set_len(dst_nbuf, nbuf_len);
  2372. /* copy is done, free original nbuf */
  2373. qdf_nbuf_free(nbuf);
  2374. return dst_nbuf;
  2375. }
  2376. /**
  2377. * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
  2378. * @soc: soc
  2379. * @nbuf: skb
  2380. *
  2381. * Return: nbuf if success and otherwise NULL
  2382. */
  2383. qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2384. {
  2385. if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  2386. return nbuf;
  2387. /* WLAN IPA is run-time disabled */
  2388. if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
  2389. return nbuf;
  2390. if (!qdf_nbuf_is_frag(nbuf))
  2391. return nbuf;
  2392. /* linearize skb for IPA */
  2393. return dp_ipa_frag_nbuf_linearize(soc, nbuf);
  2394. }
  2395. QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
  2396. struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  2397. {
  2398. QDF_STATUS ret;
  2399. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2400. struct dp_pdev *pdev =
  2401. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2402. if (!pdev) {
  2403. dp_err("%s invalid instance", __func__);
  2404. return QDF_STATUS_E_FAILURE;
  2405. }
  2406. if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
  2407. dp_debug("SMMU S1 disabled");
  2408. return QDF_STATUS_SUCCESS;
  2409. }
  2410. ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true);
  2411. if (ret)
  2412. return ret;
  2413. ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true);
  2414. if (ret)
  2415. __dp_ipa_tx_buf_smmu_mapping(soc, pdev, false);
  2416. return ret;
  2417. }
  2418. QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
  2419. struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  2420. {
  2421. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2422. struct dp_pdev *pdev =
  2423. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2424. if (!pdev) {
  2425. dp_err("%s invalid instance", __func__);
  2426. return QDF_STATUS_E_FAILURE;
  2427. }
  2428. if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
  2429. dp_debug("SMMU S1 disabled");
  2430. return QDF_STATUS_SUCCESS;
  2431. }
  2432. if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false) ||
  2433. dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false))
  2434. return QDF_STATUS_E_FAILURE;
  2435. return QDF_STATUS_SUCCESS;
  2436. }
  2437. #endif