mac.c 135 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2020 Realtek Corporation
  3. */
  4. #include "cam.h"
  5. #include "chan.h"
  6. #include "debug.h"
  7. #include "fw.h"
  8. #include "mac.h"
  9. #include "ps.h"
  10. #include "reg.h"
  11. #include "util.h"
  12. const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = {
  13. [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR,
  14. [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR,
  15. [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR,
  16. [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR,
  17. [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR,
  18. [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR,
  19. [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR,
  20. [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR,
  21. [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR,
  22. [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR,
  23. [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR,
  24. [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR,
  25. [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR,
  26. [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR,
  27. [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR,
  28. [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR,
  29. [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
  30. [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR,
  31. [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR,
  32. };
  33. static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset,
  34. u32 val, enum rtw89_mac_mem_sel sel)
  35. {
  36. u32 addr = rtw89_mac_mem_base_addrs[sel] + offset;
  37. rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr);
  38. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, val);
  39. }
  40. static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset,
  41. enum rtw89_mac_mem_sel sel)
  42. {
  43. u32 addr = rtw89_mac_mem_base_addrs[sel] + offset;
  44. rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr);
  45. return rtw89_read32(rtwdev, R_AX_INDIR_ACCESS_ENTRY);
  46. }
  47. int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx,
  48. enum rtw89_mac_hwmod_sel sel)
  49. {
  50. u32 val, r_val;
  51. if (sel == RTW89_DMAC_SEL) {
  52. r_val = rtw89_read32(rtwdev, R_AX_DMAC_FUNC_EN);
  53. val = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN);
  54. } else if (sel == RTW89_CMAC_SEL && mac_idx == 0) {
  55. r_val = rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN);
  56. val = B_AX_CMAC_EN;
  57. } else if (sel == RTW89_CMAC_SEL && mac_idx == 1) {
  58. r_val = rtw89_read32(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND);
  59. val = B_AX_CMAC1_FEN;
  60. } else {
  61. return -EINVAL;
  62. }
  63. if (r_val == RTW89_R32_EA || r_val == RTW89_R32_DEAD ||
  64. (val & r_val) != val)
  65. return -EFAULT;
  66. return 0;
  67. }
  68. int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val)
  69. {
  70. u8 lte_ctrl;
  71. int ret;
  72. ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0,
  73. 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3);
  74. if (ret)
  75. rtw89_err(rtwdev, "[ERR]lte not ready(W)\n");
  76. rtw89_write32(rtwdev, R_AX_LTE_WDATA, val);
  77. rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0xC00F0000 | offset);
  78. return ret;
  79. }
  80. int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val)
  81. {
  82. u8 lte_ctrl;
  83. int ret;
  84. ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0,
  85. 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3);
  86. if (ret)
  87. rtw89_err(rtwdev, "[ERR]lte not ready(W)\n");
  88. rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset);
  89. *val = rtw89_read32(rtwdev, R_AX_LTE_RDATA);
  90. return ret;
  91. }
  92. static
  93. int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl)
  94. {
  95. u32 ctrl_reg, data_reg, ctrl_data;
  96. u32 val;
  97. int ret;
  98. switch (ctrl->type) {
  99. case DLE_CTRL_TYPE_WDE:
  100. ctrl_reg = R_AX_WDE_DBG_FUN_INTF_CTL;
  101. data_reg = R_AX_WDE_DBG_FUN_INTF_DATA;
  102. ctrl_data = FIELD_PREP(B_AX_WDE_DFI_TRGSEL_MASK, ctrl->target) |
  103. FIELD_PREP(B_AX_WDE_DFI_ADDR_MASK, ctrl->addr) |
  104. B_AX_WDE_DFI_ACTIVE;
  105. break;
  106. case DLE_CTRL_TYPE_PLE:
  107. ctrl_reg = R_AX_PLE_DBG_FUN_INTF_CTL;
  108. data_reg = R_AX_PLE_DBG_FUN_INTF_DATA;
  109. ctrl_data = FIELD_PREP(B_AX_PLE_DFI_TRGSEL_MASK, ctrl->target) |
  110. FIELD_PREP(B_AX_PLE_DFI_ADDR_MASK, ctrl->addr) |
  111. B_AX_PLE_DFI_ACTIVE;
  112. break;
  113. default:
  114. rtw89_warn(rtwdev, "[ERR] dfi ctrl type %d\n", ctrl->type);
  115. return -EINVAL;
  116. }
  117. rtw89_write32(rtwdev, ctrl_reg, ctrl_data);
  118. ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_WDE_DFI_ACTIVE),
  119. 1, 1000, false, rtwdev, ctrl_reg);
  120. if (ret) {
  121. rtw89_warn(rtwdev, "[ERR] dle dfi ctrl 0x%X set 0x%X timeout\n",
  122. ctrl_reg, ctrl_data);
  123. return ret;
  124. }
  125. ctrl->out_data = rtw89_read32(rtwdev, data_reg);
  126. return 0;
  127. }
  128. static int dle_dfi_quota(struct rtw89_dev *rtwdev,
  129. struct rtw89_mac_dle_dfi_quota *quota)
  130. {
  131. struct rtw89_mac_dle_dfi_ctrl ctrl;
  132. int ret;
  133. ctrl.type = quota->dle_type;
  134. ctrl.target = DLE_DFI_TYPE_QUOTA;
  135. ctrl.addr = quota->qtaid;
  136. ret = dle_dfi_ctrl(rtwdev, &ctrl);
  137. if (ret) {
  138. rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret);
  139. return ret;
  140. }
  141. quota->rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, ctrl.out_data);
  142. quota->use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, ctrl.out_data);
  143. return 0;
  144. }
  145. static int dle_dfi_qempty(struct rtw89_dev *rtwdev,
  146. struct rtw89_mac_dle_dfi_qempty *qempty)
  147. {
  148. struct rtw89_mac_dle_dfi_ctrl ctrl;
  149. u32 ret;
  150. ctrl.type = qempty->dle_type;
  151. ctrl.target = DLE_DFI_TYPE_QEMPTY;
  152. ctrl.addr = qempty->grpsel;
  153. ret = dle_dfi_ctrl(rtwdev, &ctrl);
  154. if (ret) {
  155. rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret);
  156. return ret;
  157. }
  158. qempty->qempty = FIELD_GET(B_AX_DLE_QEMPTY_GRP, ctrl.out_data);
  159. return 0;
  160. }
  161. static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev)
  162. {
  163. rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ",
  164. rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
  165. rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_ISR=0x%08x\n",
  166. rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
  167. rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_IMR=0x%08x ",
  168. rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
  169. rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_ISR=0x%08x\n",
  170. rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
  171. rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_IMR=0x%08x ",
  172. rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
  173. rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_ISR=0x%08x\n",
  174. rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
  175. }
  176. static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
  177. {
  178. struct rtw89_mac_dle_dfi_qempty qempty;
  179. struct rtw89_mac_dle_dfi_quota quota;
  180. struct rtw89_mac_dle_dfi_ctrl ctrl;
  181. u32 val, not_empty, i;
  182. int ret;
  183. qempty.dle_type = DLE_CTRL_TYPE_PLE;
  184. qempty.grpsel = 0;
  185. qempty.qempty = ~(u32)0;
  186. ret = dle_dfi_qempty(rtwdev, &qempty);
  187. if (ret)
  188. rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
  189. else
  190. rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty);
  191. for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) {
  192. if (!(not_empty & BIT(0)))
  193. continue;
  194. ctrl.type = DLE_CTRL_TYPE_PLE;
  195. ctrl.target = DLE_DFI_TYPE_QLNKTBL;
  196. ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) |
  197. FIELD_PREP(QLNKTBL_ADDR_TBL_IDX_MASK, i);
  198. ret = dle_dfi_ctrl(rtwdev, &ctrl);
  199. if (ret)
  200. rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
  201. else
  202. rtw89_info(rtwdev, "qidx%d pktcnt = %ld\n", i,
  203. FIELD_GET(QLNKTBL_DATA_SEL1_PKT_CNT_MASK,
  204. ctrl.out_data));
  205. }
  206. quota.dle_type = DLE_CTRL_TYPE_PLE;
  207. quota.qtaid = 6;
  208. ret = dle_dfi_quota(rtwdev, &quota);
  209. if (ret)
  210. rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
  211. else
  212. rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n",
  213. quota.rsv_pgnum, quota.use_pgnum);
  214. val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG);
  215. rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%lx\n",
  216. FIELD_GET(B_AX_PLE_Q6_MIN_SIZE_MASK, val));
  217. rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%lx\n",
  218. FIELD_GET(B_AX_PLE_Q6_MAX_SIZE_MASK, val));
  219. dump_err_status_dispatcher(rtwdev);
  220. }
  221. static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev,
  222. enum mac_ax_err_info err)
  223. {
  224. u32 dbg, event;
  225. dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO);
  226. event = FIELD_GET(B_AX_L0_TO_L1_EVENT_MASK, dbg);
  227. switch (event) {
  228. case MAC_AX_L0_TO_L1_RX_QTA_LOST:
  229. rtw89_info(rtwdev, "quota lost!\n");
  230. rtw89_mac_dump_qta_lost(rtwdev);
  231. break;
  232. default:
  233. break;
  234. }
  235. }
  236. static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
  237. enum mac_ax_err_info err)
  238. {
  239. u32 dmac_err, cmac_err;
  240. if (err != MAC_AX_ERR_L1_ERR_DMAC &&
  241. err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
  242. err != MAC_AX_ERR_L0_ERR_CMAC0 &&
  243. err != MAC_AX_ERR_L0_ERR_CMAC1)
  244. return;
  245. rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
  246. rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n",
  247. rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
  248. cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR);
  249. rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR =0x%08x\n", cmac_err);
  250. dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR);
  251. rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR =0x%08x\n", dmac_err);
  252. if (dmac_err) {
  253. rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG =0x%08x ",
  254. rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG));
  255. rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG =0x%08x\n",
  256. rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG));
  257. }
  258. if (dmac_err & B_AX_WDRLS_ERR_FLAG) {
  259. rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR =0x%08x ",
  260. rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR));
  261. rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR =0x%08x\n",
  262. rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
  263. }
  264. if (dmac_err & B_AX_WSEC_ERR_FLAG) {
  265. rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR =0x%08x\n",
  266. rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
  267. rtw89_info(rtwdev, "SEC_local_Register 0x9D00 =0x%08x\n",
  268. rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
  269. rtw89_info(rtwdev, "SEC_local_Register 0x9D04 =0x%08x\n",
  270. rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
  271. rtw89_info(rtwdev, "SEC_local_Register 0x9D10 =0x%08x\n",
  272. rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
  273. rtw89_info(rtwdev, "SEC_local_Register 0x9D14 =0x%08x\n",
  274. rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
  275. rtw89_info(rtwdev, "SEC_local_Register 0x9D18 =0x%08x\n",
  276. rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
  277. rtw89_info(rtwdev, "SEC_local_Register 0x9D20 =0x%08x\n",
  278. rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
  279. rtw89_info(rtwdev, "SEC_local_Register 0x9D24 =0x%08x\n",
  280. rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
  281. rtw89_info(rtwdev, "SEC_local_Register 0x9D28 =0x%08x\n",
  282. rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
  283. rtw89_info(rtwdev, "SEC_local_Register 0x9D2C =0x%08x\n",
  284. rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
  285. }
  286. if (dmac_err & B_AX_MPDU_ERR_FLAG) {
  287. rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR =0x%08x ",
  288. rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR));
  289. rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR =0x%08x\n",
  290. rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
  291. rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR =0x%08x ",
  292. rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR));
  293. rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR =0x%08x\n",
  294. rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
  295. }
  296. if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) {
  297. rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR =0x%08x ",
  298. rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
  299. rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR= 0x%08x\n",
  300. rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
  301. }
  302. if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) {
  303. rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ",
  304. rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
  305. rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n",
  306. rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
  307. rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ",
  308. rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
  309. rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
  310. rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
  311. dump_err_status_dispatcher(rtwdev);
  312. }
  313. if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) {
  314. rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
  315. rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
  316. rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
  317. rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
  318. }
  319. if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) {
  320. rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ",
  321. rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
  322. rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n",
  323. rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
  324. rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ",
  325. rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
  326. rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
  327. rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
  328. rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_0=0x%08x\n",
  329. rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0));
  330. rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_1=0x%08x\n",
  331. rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1));
  332. rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n",
  333. rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2));
  334. rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
  335. rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
  336. rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n",
  337. rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0));
  338. rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n",
  339. rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1));
  340. rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n",
  341. rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
  342. rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
  343. rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
  344. rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
  345. rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
  346. rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
  347. rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
  348. rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
  349. rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
  350. dump_err_status_dispatcher(rtwdev);
  351. }
  352. if (dmac_err & B_AX_PKTIN_ERR_FLAG) {
  353. rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ",
  354. rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
  355. rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n",
  356. rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
  357. rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ",
  358. rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
  359. rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n",
  360. rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
  361. }
  362. if (dmac_err & B_AX_DISPATCH_ERR_FLAG)
  363. dump_err_status_dispatcher(rtwdev);
  364. if (dmac_err & B_AX_DLE_CPUIO_ERR_FLAG) {
  365. rtw89_info(rtwdev, "R_AX_CPUIO_ERR_IMR=0x%08x ",
  366. rtw89_read32(rtwdev, R_AX_CPUIO_ERR_IMR));
  367. rtw89_info(rtwdev, "R_AX_CPUIO_ERR_ISR=0x%08x\n",
  368. rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR));
  369. }
  370. if (dmac_err & BIT(11)) {
  371. rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
  372. rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
  373. }
  374. if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) {
  375. rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR=0x%08x ",
  376. rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR));
  377. rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR=0x%04x\n",
  378. rtw89_read16(rtwdev, R_AX_SCHEDULE_ERR_ISR));
  379. }
  380. if (cmac_err & B_AX_PTCL_TOP_ERR_IND) {
  381. rtw89_info(rtwdev, "R_AX_PTCL_IMR0=0x%08x ",
  382. rtw89_read32(rtwdev, R_AX_PTCL_IMR0));
  383. rtw89_info(rtwdev, "R_AX_PTCL_ISR0=0x%08x\n",
  384. rtw89_read32(rtwdev, R_AX_PTCL_ISR0));
  385. }
  386. if (cmac_err & B_AX_DMA_TOP_ERR_IND) {
  387. rtw89_info(rtwdev, "R_AX_DLE_CTRL=0x%08x\n",
  388. rtw89_read32(rtwdev, R_AX_DLE_CTRL));
  389. }
  390. if (cmac_err & B_AX_PHYINTF_ERR_IND) {
  391. rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR=0x%08x\n",
  392. rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR));
  393. }
  394. if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) {
  395. rtw89_info(rtwdev, "R_AX_TXPWR_IMR=0x%08x ",
  396. rtw89_read32(rtwdev, R_AX_TXPWR_IMR));
  397. rtw89_info(rtwdev, "R_AX_TXPWR_ISR=0x%08x\n",
  398. rtw89_read32(rtwdev, R_AX_TXPWR_ISR));
  399. }
  400. if (cmac_err & B_AX_WMAC_RX_ERR_IND) {
  401. rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x ",
  402. rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL));
  403. rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR=0x%08x\n",
  404. rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR));
  405. }
  406. if (cmac_err & B_AX_WMAC_TX_ERR_IND) {
  407. rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR=0x%08x ",
  408. rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR));
  409. rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x\n",
  410. rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL));
  411. }
  412. rtwdev->hci.ops->dump_err_status(rtwdev);
  413. if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1)
  414. rtw89_mac_dump_l0_to_l1(rtwdev, err);
  415. rtw89_info(rtwdev, "<---\n");
  416. }
  417. u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
  418. {
  419. u32 err, err_scnr;
  420. int ret;
  421. ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000,
  422. false, rtwdev, R_AX_HALT_C2H_CTRL);
  423. if (ret) {
  424. rtw89_warn(rtwdev, "Polling FW err status fail\n");
  425. return ret;
  426. }
  427. err = rtw89_read32(rtwdev, R_AX_HALT_C2H);
  428. rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
  429. err_scnr = RTW89_ERROR_SCENARIO(err);
  430. if (err_scnr == RTW89_WCPU_CPU_EXCEPTION)
  431. err = MAC_AX_ERR_CPU_EXCEPTION;
  432. else if (err_scnr == RTW89_WCPU_ASSERTION)
  433. err = MAC_AX_ERR_ASSERTION;
  434. rtw89_fw_st_dbg_dump(rtwdev);
  435. rtw89_mac_dump_err_status(rtwdev, err);
  436. return err;
  437. }
  438. EXPORT_SYMBOL(rtw89_mac_get_err_status);
  439. int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
  440. {
  441. u32 halt;
  442. int ret = 0;
  443. if (err > MAC_AX_SET_ERR_MAX) {
  444. rtw89_err(rtwdev, "Bad set-err-status value 0x%08x\n", err);
  445. return -EINVAL;
  446. }
  447. ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000,
  448. 100000, false, rtwdev, R_AX_HALT_H2C_CTRL);
  449. if (ret) {
  450. rtw89_err(rtwdev, "FW doesn't receive previous msg\n");
  451. return -EFAULT;
  452. }
  453. rtw89_write32(rtwdev, R_AX_HALT_H2C, err);
  454. rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER);
  455. return 0;
  456. }
  457. EXPORT_SYMBOL(rtw89_mac_set_err_status);
  458. static int hfc_reset_param(struct rtw89_dev *rtwdev)
  459. {
  460. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  461. struct rtw89_hfc_param_ini param_ini = {NULL};
  462. u8 qta_mode = rtwdev->mac.dle_info.qta_mode;
  463. switch (rtwdev->hci.type) {
  464. case RTW89_HCI_TYPE_PCIE:
  465. param_ini = rtwdev->chip->hfc_param_ini[qta_mode];
  466. param->en = 0;
  467. break;
  468. default:
  469. return -EINVAL;
  470. }
  471. if (param_ini.pub_cfg)
  472. param->pub_cfg = *param_ini.pub_cfg;
  473. if (param_ini.prec_cfg) {
  474. param->prec_cfg = *param_ini.prec_cfg;
  475. rtwdev->hal.sw_amsdu_max_size =
  476. param->prec_cfg.wp_ch07_prec * HFC_PAGE_UNIT;
  477. }
  478. if (param_ini.ch_cfg)
  479. param->ch_cfg = param_ini.ch_cfg;
  480. memset(&param->ch_info, 0, sizeof(param->ch_info));
  481. memset(&param->pub_info, 0, sizeof(param->pub_info));
  482. param->mode = param_ini.mode;
  483. return 0;
  484. }
  485. static int hfc_ch_cfg_chk(struct rtw89_dev *rtwdev, u8 ch)
  486. {
  487. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  488. const struct rtw89_hfc_ch_cfg *ch_cfg = param->ch_cfg;
  489. const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
  490. const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
  491. if (ch >= RTW89_DMA_CH_NUM)
  492. return -EINVAL;
  493. if ((ch_cfg[ch].min && ch_cfg[ch].min < prec_cfg->ch011_prec) ||
  494. ch_cfg[ch].max > pub_cfg->pub_max)
  495. return -EINVAL;
  496. if (ch_cfg[ch].grp >= grp_num)
  497. return -EINVAL;
  498. return 0;
  499. }
  500. static int hfc_pub_info_chk(struct rtw89_dev *rtwdev)
  501. {
  502. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  503. const struct rtw89_hfc_pub_cfg *cfg = &param->pub_cfg;
  504. struct rtw89_hfc_pub_info *info = &param->pub_info;
  505. if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) {
  506. if (rtwdev->chip->chip_id == RTL8852A)
  507. return 0;
  508. else
  509. return -EFAULT;
  510. }
  511. return 0;
  512. }
  513. static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev)
  514. {
  515. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  516. const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
  517. if (pub_cfg->grp0 + pub_cfg->grp1 != pub_cfg->pub_max)
  518. return -EFAULT;
  519. return 0;
  520. }
  521. static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
  522. {
  523. const struct rtw89_chip_info *chip = rtwdev->chip;
  524. const struct rtw89_page_regs *regs = chip->page_regs;
  525. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  526. const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
  527. int ret = 0;
  528. u32 val = 0;
  529. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  530. if (ret)
  531. return ret;
  532. ret = hfc_ch_cfg_chk(rtwdev, ch);
  533. if (ret)
  534. return ret;
  535. if (ch > RTW89_DMA_B1HI)
  536. return -EINVAL;
  537. val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) |
  538. u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) |
  539. (cfg[ch].grp ? B_AX_GRP : 0);
  540. rtw89_write32(rtwdev, regs->ach_page_ctrl + ch * 4, val);
  541. return 0;
  542. }
  543. static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
  544. {
  545. const struct rtw89_chip_info *chip = rtwdev->chip;
  546. const struct rtw89_page_regs *regs = chip->page_regs;
  547. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  548. struct rtw89_hfc_ch_info *info = param->ch_info;
  549. const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
  550. u32 val;
  551. u32 ret;
  552. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  553. if (ret)
  554. return ret;
  555. if (ch > RTW89_DMA_H2C)
  556. return -EINVAL;
  557. val = rtw89_read32(rtwdev, regs->ach_page_info + ch * 4);
  558. info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK);
  559. if (ch < RTW89_DMA_H2C)
  560. info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK);
  561. else
  562. info[ch].used = cfg[ch].min - info[ch].aval;
  563. return 0;
  564. }
  565. static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
  566. {
  567. const struct rtw89_chip_info *chip = rtwdev->chip;
  568. const struct rtw89_page_regs *regs = chip->page_regs;
  569. const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg;
  570. u32 val;
  571. int ret;
  572. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  573. if (ret)
  574. return ret;
  575. ret = hfc_pub_cfg_chk(rtwdev);
  576. if (ret)
  577. return ret;
  578. val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) |
  579. u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK);
  580. rtw89_write32(rtwdev, regs->pub_page_ctrl1, val);
  581. val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK);
  582. rtw89_write32(rtwdev, regs->wp_page_ctrl2, val);
  583. return 0;
  584. }
  585. static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
  586. {
  587. const struct rtw89_chip_info *chip = rtwdev->chip;
  588. const struct rtw89_page_regs *regs = chip->page_regs;
  589. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  590. struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
  591. struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
  592. struct rtw89_hfc_pub_info *info = &param->pub_info;
  593. u32 val;
  594. int ret;
  595. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  596. if (ret)
  597. return ret;
  598. val = rtw89_read32(rtwdev, regs->pub_page_info1);
  599. info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK);
  600. info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK);
  601. val = rtw89_read32(rtwdev, regs->pub_page_info3);
  602. info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK);
  603. info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK);
  604. info->pub_aval =
  605. u32_get_bits(rtw89_read32(rtwdev, regs->pub_page_info2),
  606. B_AX_PUB_AVAL_PG_MASK);
  607. info->wp_aval =
  608. u32_get_bits(rtw89_read32(rtwdev, regs->wp_page_info1),
  609. B_AX_WP_AVAL_PG_MASK);
  610. val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
  611. param->en = val & B_AX_HCI_FC_EN ? 1 : 0;
  612. param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0;
  613. param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK);
  614. prec_cfg->ch011_full_cond =
  615. u32_get_bits(val, B_AX_HCI_FC_WD_FULL_COND_MASK);
  616. prec_cfg->h2c_full_cond =
  617. u32_get_bits(val, B_AX_HCI_FC_CH12_FULL_COND_MASK);
  618. prec_cfg->wp_ch07_full_cond =
  619. u32_get_bits(val, B_AX_HCI_FC_WP_CH07_FULL_COND_MASK);
  620. prec_cfg->wp_ch811_full_cond =
  621. u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
  622. val = rtw89_read32(rtwdev, regs->ch_page_ctrl);
  623. prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK);
  624. prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK);
  625. val = rtw89_read32(rtwdev, regs->pub_page_ctrl2);
  626. pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK);
  627. val = rtw89_read32(rtwdev, regs->wp_page_ctrl1);
  628. prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK);
  629. prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK);
  630. val = rtw89_read32(rtwdev, regs->wp_page_ctrl2);
  631. pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK);
  632. val = rtw89_read32(rtwdev, regs->pub_page_ctrl1);
  633. pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK);
  634. pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK);
  635. ret = hfc_pub_info_chk(rtwdev);
  636. if (param->en && ret)
  637. return ret;
  638. return 0;
  639. }
  640. static void hfc_h2c_cfg(struct rtw89_dev *rtwdev)
  641. {
  642. const struct rtw89_chip_info *chip = rtwdev->chip;
  643. const struct rtw89_page_regs *regs = chip->page_regs;
  644. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  645. const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
  646. u32 val;
  647. val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
  648. rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
  649. rtw89_write32_mask(rtwdev, regs->hci_fc_ctrl,
  650. B_AX_HCI_FC_CH12_FULL_COND_MASK,
  651. prec_cfg->h2c_full_cond);
  652. }
  653. static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
  654. {
  655. const struct rtw89_chip_info *chip = rtwdev->chip;
  656. const struct rtw89_page_regs *regs = chip->page_regs;
  657. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  658. const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
  659. const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
  660. u32 val;
  661. val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) |
  662. u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
  663. rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
  664. val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK);
  665. rtw89_write32(rtwdev, regs->pub_page_ctrl2, val);
  666. val = u32_encode_bits(prec_cfg->wp_ch07_prec,
  667. B_AX_PREC_PAGE_WP_CH07_MASK) |
  668. u32_encode_bits(prec_cfg->wp_ch811_prec,
  669. B_AX_PREC_PAGE_WP_CH811_MASK);
  670. rtw89_write32(rtwdev, regs->wp_page_ctrl1, val);
  671. val = u32_replace_bits(rtw89_read32(rtwdev, regs->hci_fc_ctrl),
  672. param->mode, B_AX_HCI_FC_MODE_MASK);
  673. val = u32_replace_bits(val, prec_cfg->ch011_full_cond,
  674. B_AX_HCI_FC_WD_FULL_COND_MASK);
  675. val = u32_replace_bits(val, prec_cfg->h2c_full_cond,
  676. B_AX_HCI_FC_CH12_FULL_COND_MASK);
  677. val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond,
  678. B_AX_HCI_FC_WP_CH07_FULL_COND_MASK);
  679. val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond,
  680. B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
  681. rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
  682. }
  683. static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
  684. {
  685. const struct rtw89_chip_info *chip = rtwdev->chip;
  686. const struct rtw89_page_regs *regs = chip->page_regs;
  687. struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
  688. u32 val;
  689. val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
  690. param->en = en;
  691. param->h2c_en = h2c_en;
  692. val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN);
  693. val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) :
  694. (val & ~B_AX_HCI_FC_CH12_EN);
  695. rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
  696. }
  697. static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
  698. {
  699. const struct rtw89_chip_info *chip = rtwdev->chip;
  700. u32 dma_ch_mask = chip->dma_ch_mask;
  701. u8 ch;
  702. u32 ret = 0;
  703. if (reset)
  704. ret = hfc_reset_param(rtwdev);
  705. if (ret)
  706. return ret;
  707. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  708. if (ret)
  709. return ret;
  710. hfc_func_en(rtwdev, false, false);
  711. if (!en && h2c_en) {
  712. hfc_h2c_cfg(rtwdev);
  713. hfc_func_en(rtwdev, en, h2c_en);
  714. return ret;
  715. }
  716. for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
  717. if (dma_ch_mask & BIT(ch))
  718. continue;
  719. ret = hfc_ch_ctrl(rtwdev, ch);
  720. if (ret)
  721. return ret;
  722. }
  723. ret = hfc_pub_ctrl(rtwdev);
  724. if (ret)
  725. return ret;
  726. hfc_mix_cfg(rtwdev);
  727. if (en || h2c_en) {
  728. hfc_func_en(rtwdev, en, h2c_en);
  729. udelay(10);
  730. }
  731. for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
  732. if (dma_ch_mask & BIT(ch))
  733. continue;
  734. ret = hfc_upd_ch_info(rtwdev, ch);
  735. if (ret)
  736. return ret;
  737. }
  738. ret = hfc_upd_mix_info(rtwdev);
  739. return ret;
  740. }
  741. #define PWR_POLL_CNT 2000
  742. static int pwr_cmd_poll(struct rtw89_dev *rtwdev,
  743. const struct rtw89_pwr_cfg *cfg)
  744. {
  745. u8 val = 0;
  746. int ret;
  747. u32 addr = cfg->base == PWR_INTF_MSK_SDIO ?
  748. cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr;
  749. ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk),
  750. 1000, 1000 * PWR_POLL_CNT, false, rtwdev, addr);
  751. if (!ret)
  752. return 0;
  753. rtw89_warn(rtwdev, "[ERR] Polling timeout\n");
  754. rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr);
  755. rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val);
  756. return -EBUSY;
  757. }
  758. static int rtw89_mac_sub_pwr_seq(struct rtw89_dev *rtwdev, u8 cv_msk,
  759. u8 intf_msk, const struct rtw89_pwr_cfg *cfg)
  760. {
  761. const struct rtw89_pwr_cfg *cur_cfg;
  762. u32 addr;
  763. u8 val;
  764. for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) {
  765. if (!(cur_cfg->intf_msk & intf_msk) ||
  766. !(cur_cfg->cv_msk & cv_msk))
  767. continue;
  768. switch (cur_cfg->cmd) {
  769. case PWR_CMD_WRITE:
  770. addr = cur_cfg->addr;
  771. if (cur_cfg->base == PWR_BASE_SDIO)
  772. addr |= SDIO_LOCAL_BASE_ADDR;
  773. val = rtw89_read8(rtwdev, addr);
  774. val &= ~(cur_cfg->msk);
  775. val |= (cur_cfg->val & cur_cfg->msk);
  776. rtw89_write8(rtwdev, addr, val);
  777. break;
  778. case PWR_CMD_POLL:
  779. if (pwr_cmd_poll(rtwdev, cur_cfg))
  780. return -EBUSY;
  781. break;
  782. case PWR_CMD_DELAY:
  783. if (cur_cfg->val == PWR_DELAY_US)
  784. udelay(cur_cfg->addr);
  785. else
  786. fsleep(cur_cfg->addr * 1000);
  787. break;
  788. default:
  789. return -EINVAL;
  790. }
  791. }
  792. return 0;
  793. }
  794. static int rtw89_mac_pwr_seq(struct rtw89_dev *rtwdev,
  795. const struct rtw89_pwr_cfg * const *cfg_seq)
  796. {
  797. int ret;
  798. for (; *cfg_seq; cfg_seq++) {
  799. ret = rtw89_mac_sub_pwr_seq(rtwdev, BIT(rtwdev->hal.cv),
  800. PWR_INTF_MSK_PCIE, *cfg_seq);
  801. if (ret)
  802. return -EBUSY;
  803. }
  804. return 0;
  805. }
  806. static enum rtw89_rpwm_req_pwr_state
  807. rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev)
  808. {
  809. enum rtw89_rpwm_req_pwr_state state;
  810. switch (rtwdev->ps_mode) {
  811. case RTW89_PS_MODE_RFOFF:
  812. state = RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF;
  813. break;
  814. case RTW89_PS_MODE_CLK_GATED:
  815. state = RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED;
  816. break;
  817. case RTW89_PS_MODE_PWR_GATED:
  818. state = RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED;
  819. break;
  820. default:
  821. state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
  822. break;
  823. }
  824. return state;
  825. }
  826. static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev,
  827. enum rtw89_rpwm_req_pwr_state req_pwr_state,
  828. bool notify_wake)
  829. {
  830. u16 request;
  831. spin_lock_bh(&rtwdev->rpwm_lock);
  832. request = rtw89_read16(rtwdev, R_AX_RPWM);
  833. request ^= request | PS_RPWM_TOGGLE;
  834. request |= req_pwr_state;
  835. if (notify_wake) {
  836. request |= PS_RPWM_NOTIFY_WAKE;
  837. } else {
  838. rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
  839. RPWM_SEQ_NUM_MAX;
  840. request |= FIELD_PREP(PS_RPWM_SEQ_NUM,
  841. rtwdev->mac.rpwm_seq_num);
  842. if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
  843. request |= PS_RPWM_ACK;
  844. }
  845. rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request);
  846. spin_unlock_bh(&rtwdev->rpwm_lock);
  847. }
  848. static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
  849. enum rtw89_rpwm_req_pwr_state req_pwr_state)
  850. {
  851. bool request_deep_mode;
  852. bool in_deep_mode;
  853. u8 rpwm_req_num;
  854. u8 cpwm_rsp_seq;
  855. u8 cpwm_seq;
  856. u8 cpwm_status;
  857. if (req_pwr_state >= RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
  858. request_deep_mode = true;
  859. else
  860. request_deep_mode = false;
  861. if (rtw89_read32_mask(rtwdev, R_AX_LDM, B_AX_EN_32K))
  862. in_deep_mode = true;
  863. else
  864. in_deep_mode = false;
  865. if (request_deep_mode != in_deep_mode)
  866. return -EPERM;
  867. if (request_deep_mode)
  868. return 0;
  869. rpwm_req_num = rtwdev->mac.rpwm_seq_num;
  870. cpwm_rsp_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr,
  871. PS_CPWM_RSP_SEQ_NUM);
  872. if (rpwm_req_num != cpwm_rsp_seq)
  873. return -EPERM;
  874. rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) &
  875. CPWM_SEQ_NUM_MAX;
  876. cpwm_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_SEQ_NUM);
  877. if (cpwm_seq != rtwdev->mac.cpwm_seq_num)
  878. return -EPERM;
  879. cpwm_status = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_STATE);
  880. if (cpwm_status != req_pwr_state)
  881. return -EPERM;
  882. return 0;
  883. }
  884. void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
  885. {
  886. enum rtw89_rpwm_req_pwr_state state;
  887. unsigned long delay = enter ? 10 : 150;
  888. int ret;
  889. int i;
  890. if (enter)
  891. state = rtw89_mac_get_req_pwr_state(rtwdev);
  892. else
  893. state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
  894. for (i = 0; i < RPWM_TRY_CNT; i++) {
  895. rtw89_mac_send_rpwm(rtwdev, state, false);
  896. ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret,
  897. !ret, delay, 15000, false,
  898. rtwdev, state);
  899. if (!ret)
  900. break;
  901. if (i == RPWM_TRY_CNT - 1)
  902. rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
  903. enter ? "entering" : "leaving");
  904. else
  905. rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
  906. "%d time firmware failed to ack for %s ps mode\n",
  907. i + 1, enter ? "entering" : "leaving");
  908. }
  909. }
  910. void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
  911. {
  912. enum rtw89_rpwm_req_pwr_state state;
  913. state = rtw89_mac_get_req_pwr_state(rtwdev);
  914. rtw89_mac_send_rpwm(rtwdev, state, true);
  915. }
  916. static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
  917. {
  918. #define PWR_ACT 1
  919. const struct rtw89_chip_info *chip = rtwdev->chip;
  920. const struct rtw89_pwr_cfg * const *cfg_seq;
  921. int (*cfg_func)(struct rtw89_dev *rtwdev);
  922. int ret;
  923. u8 val;
  924. if (on) {
  925. cfg_seq = chip->pwr_on_seq;
  926. cfg_func = chip->ops->pwr_on_func;
  927. } else {
  928. cfg_seq = chip->pwr_off_seq;
  929. cfg_func = chip->ops->pwr_off_func;
  930. }
  931. if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
  932. __rtw89_leave_ps_mode(rtwdev);
  933. val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK);
  934. if (on && val == PWR_ACT) {
  935. rtw89_err(rtwdev, "MAC has already powered on\n");
  936. return -EBUSY;
  937. }
  938. ret = cfg_func ? cfg_func(rtwdev) : rtw89_mac_pwr_seq(rtwdev, cfg_seq);
  939. if (ret)
  940. return ret;
  941. if (on) {
  942. set_bit(RTW89_FLAG_POWERON, rtwdev->flags);
  943. rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR);
  944. } else {
  945. clear_bit(RTW89_FLAG_POWERON, rtwdev->flags);
  946. clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
  947. rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR);
  948. rtw89_set_entity_state(rtwdev, false);
  949. }
  950. return 0;
  951. #undef PWR_ACT
  952. }
  953. void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev)
  954. {
  955. rtw89_mac_power_switch(rtwdev, false);
  956. }
  957. static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
  958. {
  959. u32 func_en = 0;
  960. u32 ck_en = 0;
  961. u32 c1pc_en = 0;
  962. u32 addrl_func_en[] = {R_AX_CMAC_FUNC_EN, R_AX_CMAC_FUNC_EN_C1};
  963. u32 addrl_ck_en[] = {R_AX_CK_EN, R_AX_CK_EN_C1};
  964. func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
  965. B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN |
  966. B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN |
  967. B_AX_CMAC_CRPRT;
  968. ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN |
  969. B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN |
  970. B_AX_RMAC_CKEN;
  971. c1pc_en = B_AX_R_SYM_WLCMAC1_PC_EN |
  972. B_AX_R_SYM_WLCMAC1_P1_PC_EN |
  973. B_AX_R_SYM_WLCMAC1_P2_PC_EN |
  974. B_AX_R_SYM_WLCMAC1_P3_PC_EN |
  975. B_AX_R_SYM_WLCMAC1_P4_PC_EN;
  976. if (en) {
  977. if (mac_idx == RTW89_MAC_1) {
  978. rtw89_write32_set(rtwdev, R_AX_AFE_CTRL1, c1pc_en);
  979. rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
  980. B_AX_R_SYM_ISO_CMAC12PP);
  981. rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
  982. B_AX_CMAC1_FEN);
  983. }
  984. rtw89_write32_set(rtwdev, addrl_ck_en[mac_idx], ck_en);
  985. rtw89_write32_set(rtwdev, addrl_func_en[mac_idx], func_en);
  986. } else {
  987. rtw89_write32_clr(rtwdev, addrl_func_en[mac_idx], func_en);
  988. rtw89_write32_clr(rtwdev, addrl_ck_en[mac_idx], ck_en);
  989. if (mac_idx == RTW89_MAC_1) {
  990. rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
  991. B_AX_CMAC1_FEN);
  992. rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
  993. B_AX_R_SYM_ISO_CMAC12PP);
  994. rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, c1pc_en);
  995. }
  996. }
  997. return 0;
  998. }
  999. static int dmac_func_en(struct rtw89_dev *rtwdev)
  1000. {
  1001. enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
  1002. u32 val32;
  1003. if (chip_id == RTL8852C)
  1004. val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
  1005. B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
  1006. B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
  1007. B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
  1008. B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
  1009. B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
  1010. B_AX_DMAC_CRPRT | B_AX_H_AXIDMA_EN);
  1011. else
  1012. val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
  1013. B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
  1014. B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
  1015. B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
  1016. B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
  1017. B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
  1018. B_AX_DMAC_CRPRT);
  1019. rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32);
  1020. val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN |
  1021. B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN |
  1022. B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN |
  1023. B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN);
  1024. rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
  1025. return 0;
  1026. }
  1027. static int chip_func_en(struct rtw89_dev *rtwdev)
  1028. {
  1029. enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
  1030. if (chip_id == RTL8852A || chip_id == RTL8852B)
  1031. rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
  1032. B_AX_OCP_L1_MASK);
  1033. return 0;
  1034. }
  1035. static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev)
  1036. {
  1037. int ret;
  1038. ret = dmac_func_en(rtwdev);
  1039. if (ret)
  1040. return ret;
  1041. ret = cmac_func_en(rtwdev, 0, true);
  1042. if (ret)
  1043. return ret;
  1044. ret = chip_func_en(rtwdev);
  1045. if (ret)
  1046. return ret;
  1047. return ret;
  1048. }
  1049. const struct rtw89_mac_size_set rtw89_mac_size = {
  1050. .hfc_preccfg_pcie = {2, 40, 0, 0, 1, 0, 0, 0},
  1051. /* PCIE 64 */
  1052. .wde_size0 = {RTW89_WDE_PG_64, 4095, 1,},
  1053. /* DLFW */
  1054. .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
  1055. /* PCIE 64 */
  1056. .wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
  1057. /* DLFW */
  1058. .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
  1059. /* 8852C DLFW */
  1060. .wde_size18 = {RTW89_WDE_PG_64, 0, 2048,},
  1061. /* 8852C PCIE SCC */
  1062. .wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
  1063. /* PCIE */
  1064. .ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
  1065. /* DLFW */
  1066. .ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
  1067. /* PCIE 64 */
  1068. .ple_size6 = {RTW89_PLE_PG_128, 496, 16,},
  1069. /* DLFW */
  1070. .ple_size8 = {RTW89_PLE_PG_128, 64, 960,},
  1071. /* 8852C DLFW */
  1072. .ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
  1073. /* 8852C PCIE SCC */
  1074. .ple_size19 = {RTW89_PLE_PG_128, 1904, 16,},
  1075. /* PCIE 64 */
  1076. .wde_qt0 = {3792, 196, 0, 107,},
  1077. /* DLFW */
  1078. .wde_qt4 = {0, 0, 0, 0,},
  1079. /* PCIE 64 */
  1080. .wde_qt6 = {448, 48, 0, 16,},
  1081. /* 8852C DLFW */
  1082. .wde_qt17 = {0, 0, 0, 0,},
  1083. /* 8852C PCIE SCC */
  1084. .wde_qt18 = {3228, 60, 0, 40,},
  1085. /* PCIE SCC */
  1086. .ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
  1087. /* PCIE SCC */
  1088. .ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,},
  1089. /* DLFW */
  1090. .ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,},
  1091. /* PCIE 64 */
  1092. .ple_qt18 = {147, 0, 16, 20, 17, 13, 89, 0, 32, 14, 8, 0,},
  1093. /* DLFW 52C */
  1094. .ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
  1095. /* DLFW 52C */
  1096. .ple_qt45 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
  1097. /* 8852C PCIE SCC */
  1098. .ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,},
  1099. /* 8852C PCIE SCC */
  1100. .ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,},
  1101. /* PCIE 64 */
  1102. .ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,},
  1103. };
  1104. EXPORT_SYMBOL(rtw89_mac_size);
  1105. static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
  1106. enum rtw89_qta_mode mode)
  1107. {
  1108. struct rtw89_mac_info *mac = &rtwdev->mac;
  1109. const struct rtw89_dle_mem *cfg;
  1110. cfg = &rtwdev->chip->dle_mem[mode];
  1111. if (!cfg)
  1112. return NULL;
  1113. if (cfg->mode != mode) {
  1114. rtw89_warn(rtwdev, "qta mode unmatch!\n");
  1115. return NULL;
  1116. }
  1117. mac->dle_info.wde_pg_size = cfg->wde_size->pge_size;
  1118. mac->dle_info.ple_pg_size = cfg->ple_size->pge_size;
  1119. mac->dle_info.qta_mode = mode;
  1120. mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma;
  1121. mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma;
  1122. return cfg;
  1123. }
  1124. static inline u32 dle_used_size(const struct rtw89_dle_size *wde,
  1125. const struct rtw89_dle_size *ple)
  1126. {
  1127. return wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) +
  1128. ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num);
  1129. }
  1130. static u32 dle_expected_used_size(struct rtw89_dev *rtwdev,
  1131. enum rtw89_qta_mode mode)
  1132. {
  1133. u32 size = rtwdev->chip->fifo_size;
  1134. if (mode == RTW89_QTA_SCC)
  1135. size -= rtwdev->chip->dle_scc_rsvd_size;
  1136. return size;
  1137. }
  1138. static void dle_func_en(struct rtw89_dev *rtwdev, bool enable)
  1139. {
  1140. if (enable)
  1141. rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
  1142. B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN);
  1143. else
  1144. rtw89_write32_clr(rtwdev, R_AX_DMAC_FUNC_EN,
  1145. B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN);
  1146. }
  1147. static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable)
  1148. {
  1149. if (enable)
  1150. rtw89_write32_set(rtwdev, R_AX_DMAC_CLK_EN,
  1151. B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN);
  1152. else
  1153. rtw89_write32_clr(rtwdev, R_AX_DMAC_CLK_EN,
  1154. B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN);
  1155. }
  1156. static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg)
  1157. {
  1158. const struct rtw89_dle_size *size_cfg;
  1159. u32 val;
  1160. u8 bound = 0;
  1161. val = rtw89_read32(rtwdev, R_AX_WDE_PKTBUF_CFG);
  1162. size_cfg = cfg->wde_size;
  1163. switch (size_cfg->pge_size) {
  1164. default:
  1165. case RTW89_WDE_PG_64:
  1166. val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64,
  1167. B_AX_WDE_PAGE_SEL_MASK);
  1168. break;
  1169. case RTW89_WDE_PG_128:
  1170. val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128,
  1171. B_AX_WDE_PAGE_SEL_MASK);
  1172. break;
  1173. case RTW89_WDE_PG_256:
  1174. rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n");
  1175. return -EINVAL;
  1176. }
  1177. val = u32_replace_bits(val, bound, B_AX_WDE_START_BOUND_MASK);
  1178. val = u32_replace_bits(val, size_cfg->lnk_pge_num,
  1179. B_AX_WDE_FREE_PAGE_NUM_MASK);
  1180. rtw89_write32(rtwdev, R_AX_WDE_PKTBUF_CFG, val);
  1181. val = rtw89_read32(rtwdev, R_AX_PLE_PKTBUF_CFG);
  1182. bound = (size_cfg->lnk_pge_num + size_cfg->unlnk_pge_num)
  1183. * size_cfg->pge_size / DLE_BOUND_UNIT;
  1184. size_cfg = cfg->ple_size;
  1185. switch (size_cfg->pge_size) {
  1186. default:
  1187. case RTW89_PLE_PG_64:
  1188. rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n");
  1189. return -EINVAL;
  1190. case RTW89_PLE_PG_128:
  1191. val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128,
  1192. B_AX_PLE_PAGE_SEL_MASK);
  1193. break;
  1194. case RTW89_PLE_PG_256:
  1195. val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256,
  1196. B_AX_PLE_PAGE_SEL_MASK);
  1197. break;
  1198. }
  1199. val = u32_replace_bits(val, bound, B_AX_PLE_START_BOUND_MASK);
  1200. val = u32_replace_bits(val, size_cfg->lnk_pge_num,
  1201. B_AX_PLE_FREE_PAGE_NUM_MASK);
  1202. rtw89_write32(rtwdev, R_AX_PLE_PKTBUF_CFG, val);
  1203. return 0;
  1204. }
  1205. #define INVALID_QT_WCPU U16_MAX
  1206. #define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \
  1207. do { \
  1208. val = u32_encode_bits(_min_x, B_AX_ ## _module ## _MIN_SIZE_MASK) | \
  1209. u32_encode_bits(_max_x, B_AX_ ## _module ## _MAX_SIZE_MASK); \
  1210. rtw89_write32(rtwdev, \
  1211. R_AX_ ## _module ## _QTA ## _idx ## _CFG, \
  1212. val); \
  1213. } while (0)
  1214. #define SET_QUOTA(_x, _module, _idx) \
  1215. SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx)
  1216. static void wde_quota_cfg(struct rtw89_dev *rtwdev,
  1217. const struct rtw89_wde_quota *min_cfg,
  1218. const struct rtw89_wde_quota *max_cfg,
  1219. u16 ext_wde_min_qt_wcpu)
  1220. {
  1221. u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ?
  1222. ext_wde_min_qt_wcpu : min_cfg->wcpu;
  1223. u32 val;
  1224. SET_QUOTA(hif, WDE, 0);
  1225. SET_QUOTA_VAL(min_qt_wcpu, max_cfg->wcpu, WDE, 1);
  1226. SET_QUOTA(pkt_in, WDE, 3);
  1227. SET_QUOTA(cpu_io, WDE, 4);
  1228. }
  1229. static void ple_quota_cfg(struct rtw89_dev *rtwdev,
  1230. const struct rtw89_ple_quota *min_cfg,
  1231. const struct rtw89_ple_quota *max_cfg)
  1232. {
  1233. u32 val;
  1234. SET_QUOTA(cma0_tx, PLE, 0);
  1235. SET_QUOTA(cma1_tx, PLE, 1);
  1236. SET_QUOTA(c2h, PLE, 2);
  1237. SET_QUOTA(h2c, PLE, 3);
  1238. SET_QUOTA(wcpu, PLE, 4);
  1239. SET_QUOTA(mpdu_proc, PLE, 5);
  1240. SET_QUOTA(cma0_dma, PLE, 6);
  1241. SET_QUOTA(cma1_dma, PLE, 7);
  1242. SET_QUOTA(bb_rpt, PLE, 8);
  1243. SET_QUOTA(wd_rel, PLE, 9);
  1244. SET_QUOTA(cpu_io, PLE, 10);
  1245. if (rtwdev->chip->chip_id == RTL8852C)
  1246. SET_QUOTA(tx_rpt, PLE, 11);
  1247. }
  1248. #undef SET_QUOTA
  1249. static void dle_quota_cfg(struct rtw89_dev *rtwdev,
  1250. const struct rtw89_dle_mem *cfg,
  1251. u16 ext_wde_min_qt_wcpu)
  1252. {
  1253. wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu);
  1254. ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt);
  1255. }
  1256. static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
  1257. enum rtw89_qta_mode ext_mode)
  1258. {
  1259. const struct rtw89_dle_mem *cfg, *ext_cfg;
  1260. u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU;
  1261. int ret = 0;
  1262. u32 ini;
  1263. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  1264. if (ret)
  1265. return ret;
  1266. cfg = get_dle_mem_cfg(rtwdev, mode);
  1267. if (!cfg) {
  1268. rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n");
  1269. ret = -EINVAL;
  1270. goto error;
  1271. }
  1272. if (mode == RTW89_QTA_DLFW) {
  1273. ext_cfg = get_dle_mem_cfg(rtwdev, ext_mode);
  1274. if (!ext_cfg) {
  1275. rtw89_err(rtwdev, "[ERR]get_dle_ext_mem_cfg %d\n",
  1276. ext_mode);
  1277. ret = -EINVAL;
  1278. goto error;
  1279. }
  1280. ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu;
  1281. }
  1282. if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
  1283. dle_expected_used_size(rtwdev, mode)) {
  1284. rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
  1285. ret = -EINVAL;
  1286. goto error;
  1287. }
  1288. dle_func_en(rtwdev, false);
  1289. dle_clk_en(rtwdev, true);
  1290. ret = dle_mix_cfg(rtwdev, cfg);
  1291. if (ret) {
  1292. rtw89_err(rtwdev, "[ERR] dle mix cfg\n");
  1293. goto error;
  1294. }
  1295. dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu);
  1296. dle_func_en(rtwdev, true);
  1297. ret = read_poll_timeout(rtw89_read32, ini,
  1298. (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1,
  1299. 2000, false, rtwdev, R_AX_WDE_INI_STATUS);
  1300. if (ret) {
  1301. rtw89_err(rtwdev, "[ERR]WDE cfg ready\n");
  1302. return ret;
  1303. }
  1304. ret = read_poll_timeout(rtw89_read32, ini,
  1305. (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1,
  1306. 2000, false, rtwdev, R_AX_PLE_INI_STATUS);
  1307. if (ret) {
  1308. rtw89_err(rtwdev, "[ERR]PLE cfg ready\n");
  1309. return ret;
  1310. }
  1311. return 0;
  1312. error:
  1313. dle_func_en(rtwdev, false);
  1314. rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n",
  1315. rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS));
  1316. rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n",
  1317. rtw89_read32(rtwdev, R_AX_PLE_INI_STATUS));
  1318. return ret;
  1319. }
  1320. static int preload_init_set(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
  1321. enum rtw89_qta_mode mode)
  1322. {
  1323. u32 reg, max_preld_size, min_rsvd_size;
  1324. max_preld_size = (mac_idx == RTW89_MAC_0 ?
  1325. PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM) * PRELD_AMSDU_SIZE;
  1326. reg = mac_idx == RTW89_MAC_0 ?
  1327. R_AX_TXPKTCTL_B0_PRELD_CFG0 : R_AX_TXPKTCTL_B1_PRELD_CFG0;
  1328. rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_USEMAXSZ_MASK, max_preld_size);
  1329. rtw89_write32_set(rtwdev, reg, B_AX_B0_PRELD_FEN);
  1330. min_rsvd_size = PRELD_AMSDU_SIZE;
  1331. reg = mac_idx == RTW89_MAC_0 ?
  1332. R_AX_TXPKTCTL_B0_PRELD_CFG1 : R_AX_TXPKTCTL_B1_PRELD_CFG1;
  1333. rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_TXENDWIN_MASK, PRELD_NEXT_WND);
  1334. rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_RSVMINSZ_MASK, min_rsvd_size);
  1335. return 0;
  1336. }
  1337. static bool is_qta_poh(struct rtw89_dev *rtwdev)
  1338. {
  1339. return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE;
  1340. }
  1341. static int preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
  1342. enum rtw89_qta_mode mode)
  1343. {
  1344. const struct rtw89_chip_info *chip = rtwdev->chip;
  1345. if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || !is_qta_poh(rtwdev))
  1346. return 0;
  1347. return preload_init_set(rtwdev, mac_idx, mode);
  1348. }
  1349. static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
  1350. {
  1351. u32 msk32;
  1352. u32 val32;
  1353. msk32 = B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC | B_AX_WDE_EMPTY_QUE_CMAC0_MBH |
  1354. B_AX_WDE_EMPTY_QUE_CMAC1_MBH | B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 |
  1355. B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 | B_AX_WDE_EMPTY_QUE_OTHERS |
  1356. B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | B_AX_PLE_EMPTY_QTA_DMAC_H2C |
  1357. B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QUE_DMAC_PKTIN |
  1358. B_AX_WDE_EMPTY_QTA_DMAC_HIF | B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU |
  1359. B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_CPUIO |
  1360. B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL |
  1361. B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL |
  1362. B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX |
  1363. B_AX_PLE_EMPTY_QTA_DMAC_CPUIO |
  1364. B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU |
  1365. B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU;
  1366. val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0);
  1367. if ((val32 & msk32) == msk32)
  1368. return true;
  1369. return false;
  1370. }
  1371. static void _patch_ss2f_path(struct rtw89_dev *rtwdev)
  1372. {
  1373. const struct rtw89_chip_info *chip = rtwdev->chip;
  1374. if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
  1375. return;
  1376. rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK,
  1377. SS2F_PATH_WLCPU);
  1378. }
  1379. static int sta_sch_init(struct rtw89_dev *rtwdev)
  1380. {
  1381. u32 p_val;
  1382. u8 val;
  1383. int ret;
  1384. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  1385. if (ret)
  1386. return ret;
  1387. val = rtw89_read8(rtwdev, R_AX_SS_CTRL);
  1388. val |= B_AX_SS_EN;
  1389. rtw89_write8(rtwdev, R_AX_SS_CTRL, val);
  1390. ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_AX_SS_INIT_DONE_1,
  1391. 1, TRXCFG_WAIT_CNT, false, rtwdev, R_AX_SS_CTRL);
  1392. if (ret) {
  1393. rtw89_err(rtwdev, "[ERR]STA scheduler init\n");
  1394. return ret;
  1395. }
  1396. rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG);
  1397. rtw89_write32_clr(rtwdev, R_AX_SS_CTRL, B_AX_SS_NONEMPTY_SS2FINFO_EN);
  1398. _patch_ss2f_path(rtwdev);
  1399. return 0;
  1400. }
  1401. static int mpdu_proc_init(struct rtw89_dev *rtwdev)
  1402. {
  1403. int ret;
  1404. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  1405. if (ret)
  1406. return ret;
  1407. rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
  1408. rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
  1409. rtw89_write32_set(rtwdev, R_AX_MPDU_PROC,
  1410. B_AX_APPEND_FCS | B_AX_A_ICV_ERR);
  1411. rtw89_write32(rtwdev, R_AX_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL);
  1412. return 0;
  1413. }
  1414. static int sec_eng_init(struct rtw89_dev *rtwdev)
  1415. {
  1416. const struct rtw89_chip_info *chip = rtwdev->chip;
  1417. u32 val = 0;
  1418. int ret;
  1419. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  1420. if (ret)
  1421. return ret;
  1422. val = rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL);
  1423. /* init clock */
  1424. val |= (B_AX_CLK_EN_CGCMP | B_AX_CLK_EN_WAPI | B_AX_CLK_EN_WEP_TKIP);
  1425. /* init TX encryption */
  1426. val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC);
  1427. val |= (B_AX_MC_DEC | B_AX_BC_DEC);
  1428. if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
  1429. val &= ~B_AX_TX_PARTIAL_MODE;
  1430. rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val);
  1431. /* init MIC ICV append */
  1432. val = rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC);
  1433. val |= (B_AX_APPEND_ICV | B_AX_APPEND_MIC);
  1434. /* option init */
  1435. rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val);
  1436. if (chip->chip_id == RTL8852C)
  1437. rtw89_write32_mask(rtwdev, R_AX_SEC_DEBUG1,
  1438. B_AX_TX_TIMEOUT_SEL_MASK, AX_TX_TO_VAL);
  1439. return 0;
  1440. }
  1441. static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1442. {
  1443. int ret;
  1444. ret = dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID);
  1445. if (ret) {
  1446. rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret);
  1447. return ret;
  1448. }
  1449. ret = preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode);
  1450. if (ret) {
  1451. rtw89_err(rtwdev, "[ERR]preload init %d\n", ret);
  1452. return ret;
  1453. }
  1454. ret = hfc_init(rtwdev, true, true, true);
  1455. if (ret) {
  1456. rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret);
  1457. return ret;
  1458. }
  1459. ret = sta_sch_init(rtwdev);
  1460. if (ret) {
  1461. rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret);
  1462. return ret;
  1463. }
  1464. ret = mpdu_proc_init(rtwdev);
  1465. if (ret) {
  1466. rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret);
  1467. return ret;
  1468. }
  1469. ret = sec_eng_init(rtwdev);
  1470. if (ret) {
  1471. rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret);
  1472. return ret;
  1473. }
  1474. return ret;
  1475. }
  1476. static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1477. {
  1478. u32 val, reg;
  1479. u16 p_val;
  1480. int ret;
  1481. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1482. if (ret)
  1483. return ret;
  1484. reg = rtw89_mac_reg_by_idx(R_AX_ADDR_CAM_CTRL, mac_idx);
  1485. val = rtw89_read32(rtwdev, reg);
  1486. val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) |
  1487. B_AX_ADDR_CAM_CLR | B_AX_ADDR_CAM_EN;
  1488. rtw89_write32(rtwdev, reg, val);
  1489. ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR),
  1490. 1, TRXCFG_WAIT_CNT, false, rtwdev, reg);
  1491. if (ret) {
  1492. rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n");
  1493. return ret;
  1494. }
  1495. return 0;
  1496. }
  1497. static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1498. {
  1499. u32 ret;
  1500. u32 reg;
  1501. u32 val;
  1502. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1503. if (ret)
  1504. return ret;
  1505. reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_1, mac_idx);
  1506. if (rtwdev->chip->chip_id == RTL8852C)
  1507. rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
  1508. SIFS_MACTXEN_T1_V1);
  1509. else
  1510. rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
  1511. SIFS_MACTXEN_T1);
  1512. if (rtwdev->chip->chip_id == RTL8852B) {
  1513. reg = rtw89_mac_reg_by_idx(R_AX_SCH_EXT_CTRL, mac_idx);
  1514. rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV);
  1515. }
  1516. reg = rtw89_mac_reg_by_idx(R_AX_CCA_CFG_0, mac_idx);
  1517. rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN);
  1518. reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx);
  1519. if (rtwdev->chip->chip_id == RTL8852C) {
  1520. val = rtw89_read32_mask(rtwdev, R_AX_SEC_ENG_CTRL,
  1521. B_AX_TX_PARTIAL_MODE);
  1522. if (!val)
  1523. rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK,
  1524. SCH_PREBKF_24US);
  1525. } else {
  1526. rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK,
  1527. SCH_PREBKF_24US);
  1528. }
  1529. return 0;
  1530. }
  1531. static int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev,
  1532. enum rtw89_machdr_frame_type type,
  1533. enum rtw89_mac_fwd_target fwd_target,
  1534. u8 mac_idx)
  1535. {
  1536. u32 reg;
  1537. u32 val;
  1538. switch (fwd_target) {
  1539. case RTW89_FWD_DONT_CARE:
  1540. val = RX_FLTR_FRAME_DROP;
  1541. break;
  1542. case RTW89_FWD_TO_HOST:
  1543. val = RX_FLTR_FRAME_TO_HOST;
  1544. break;
  1545. case RTW89_FWD_TO_WLAN_CPU:
  1546. val = RX_FLTR_FRAME_TO_WLCPU;
  1547. break;
  1548. default:
  1549. rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n");
  1550. return -EINVAL;
  1551. }
  1552. switch (type) {
  1553. case RTW89_MGNT:
  1554. reg = rtw89_mac_reg_by_idx(R_AX_MGNT_FLTR, mac_idx);
  1555. break;
  1556. case RTW89_CTRL:
  1557. reg = rtw89_mac_reg_by_idx(R_AX_CTRL_FLTR, mac_idx);
  1558. break;
  1559. case RTW89_DATA:
  1560. reg = rtw89_mac_reg_by_idx(R_AX_DATA_FLTR, mac_idx);
  1561. break;
  1562. default:
  1563. rtw89_err(rtwdev, "[ERR]set rx filter type err\n");
  1564. return -EINVAL;
  1565. }
  1566. rtw89_write32(rtwdev, reg, val);
  1567. return 0;
  1568. }
  1569. static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1570. {
  1571. int ret, i;
  1572. u32 mac_ftlr, plcp_ftlr;
  1573. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1574. if (ret)
  1575. return ret;
  1576. for (i = RTW89_MGNT; i <= RTW89_DATA; i++) {
  1577. ret = rtw89_mac_typ_fltr_opt(rtwdev, i, RTW89_FWD_TO_HOST,
  1578. mac_idx);
  1579. if (ret)
  1580. return ret;
  1581. }
  1582. mac_ftlr = rtwdev->hal.rx_fltr;
  1583. plcp_ftlr = B_AX_CCK_CRC_CHK | B_AX_CCK_SIG_CHK |
  1584. B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK |
  1585. B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK |
  1586. B_AX_HE_SIGB_CRC_CHK;
  1587. rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx),
  1588. mac_ftlr);
  1589. rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx),
  1590. plcp_ftlr);
  1591. return 0;
  1592. }
  1593. static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx)
  1594. {
  1595. u32 reg, val32;
  1596. u32 b_rsp_chk_nav, b_rsp_chk_cca;
  1597. b_rsp_chk_nav = B_AX_RSP_CHK_TXNAV | B_AX_RSP_CHK_INTRA_NAV |
  1598. B_AX_RSP_CHK_BASIC_NAV;
  1599. b_rsp_chk_cca = B_AX_RSP_CHK_SEC_CCA_80 | B_AX_RSP_CHK_SEC_CCA_40 |
  1600. B_AX_RSP_CHK_SEC_CCA_20 | B_AX_RSP_CHK_BTCCA |
  1601. B_AX_RSP_CHK_EDCCA | B_AX_RSP_CHK_CCA;
  1602. switch (rtwdev->chip->chip_id) {
  1603. case RTL8852A:
  1604. case RTL8852B:
  1605. reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx);
  1606. val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav;
  1607. rtw89_write32(rtwdev, reg, val32);
  1608. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx);
  1609. val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca;
  1610. rtw89_write32(rtwdev, reg, val32);
  1611. break;
  1612. default:
  1613. reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx);
  1614. val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav;
  1615. rtw89_write32(rtwdev, reg, val32);
  1616. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx);
  1617. val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca;
  1618. rtw89_write32(rtwdev, reg, val32);
  1619. break;
  1620. }
  1621. }
  1622. static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1623. {
  1624. u32 val, reg;
  1625. int ret;
  1626. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1627. if (ret)
  1628. return ret;
  1629. reg = rtw89_mac_reg_by_idx(R_AX_CCA_CONTROL, mac_idx);
  1630. val = rtw89_read32(rtwdev, reg);
  1631. val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA |
  1632. B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 |
  1633. B_AX_SIFS_CHK_BTCCA | B_AX_SIFS_CHK_CCA_P20 |
  1634. B_AX_CTN_CHK_INTRA_NAV |
  1635. B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA |
  1636. B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 |
  1637. B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 |
  1638. B_AX_CTN_CHK_CCA_P20);
  1639. val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 |
  1640. B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 |
  1641. B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 |
  1642. B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV |
  1643. B_AX_SIFS_CHK_EDCCA);
  1644. rtw89_write32(rtwdev, reg, val);
  1645. _patch_dis_resp_chk(rtwdev, mac_idx);
  1646. return 0;
  1647. }
  1648. static int nav_ctrl_init(struct rtw89_dev *rtwdev)
  1649. {
  1650. rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN |
  1651. B_AX_WMAC_TF_UP_NAV_EN |
  1652. B_AX_WMAC_NAV_UPPER_EN);
  1653. rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_25MS);
  1654. return 0;
  1655. }
  1656. static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1657. {
  1658. u32 reg;
  1659. int ret;
  1660. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1661. if (ret)
  1662. return ret;
  1663. reg = rtw89_mac_reg_by_idx(R_AX_RX_SR_CTRL, mac_idx);
  1664. rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN);
  1665. return 0;
  1666. }
  1667. static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1668. {
  1669. u32 reg;
  1670. int ret;
  1671. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1672. if (ret)
  1673. return ret;
  1674. reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx);
  1675. rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN);
  1676. reg = rtw89_mac_reg_by_idx(R_AX_TCR0, mac_idx);
  1677. rtw89_write32_mask(rtwdev, reg, B_AX_TCR_UDF_THSD_MASK, TCR_UDF_THSD);
  1678. reg = rtw89_mac_reg_by_idx(R_AX_TXD_FIFO_CTRL, mac_idx);
  1679. rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_HIGH_MCS_THRE_MASK, TXDFIFO_HIGH_MCS_THRE);
  1680. rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_LOW_MCS_THRE_MASK, TXDFIFO_LOW_MCS_THRE);
  1681. return 0;
  1682. }
  1683. static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1684. {
  1685. const struct rtw89_chip_info *chip = rtwdev->chip;
  1686. const struct rtw89_rrsr_cfgs *rrsr = chip->rrsr_cfgs;
  1687. u32 reg, val, sifs;
  1688. int ret;
  1689. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1690. if (ret)
  1691. return ret;
  1692. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx);
  1693. val = rtw89_read32(rtwdev, reg);
  1694. val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK;
  1695. val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK);
  1696. switch (rtwdev->chip->chip_id) {
  1697. case RTL8852A:
  1698. sifs = WMAC_SPEC_SIFS_OFDM_52A;
  1699. break;
  1700. case RTL8852B:
  1701. sifs = WMAC_SPEC_SIFS_OFDM_52B;
  1702. break;
  1703. default:
  1704. sifs = WMAC_SPEC_SIFS_OFDM_52C;
  1705. break;
  1706. }
  1707. val &= ~B_AX_WMAC_SPEC_SIFS_OFDM_MASK;
  1708. val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs);
  1709. rtw89_write32(rtwdev, reg, val);
  1710. reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx);
  1711. rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN);
  1712. reg = rtw89_mac_reg_by_idx(rrsr->ref_rate.addr, mac_idx);
  1713. rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data);
  1714. reg = rtw89_mac_reg_by_idx(rrsr->rsc.addr, mac_idx);
  1715. rtw89_write32_mask(rtwdev, reg, rrsr->rsc.mask, rrsr->rsc.data);
  1716. return 0;
  1717. }
  1718. static void rst_bacam(struct rtw89_dev *rtwdev)
  1719. {
  1720. u32 val32;
  1721. int ret;
  1722. rtw89_write32_mask(rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK,
  1723. S_AX_BACAM_RST_ALL);
  1724. ret = read_poll_timeout_atomic(rtw89_read32_mask, val32, val32 == 0,
  1725. 1, 1000, false,
  1726. rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK);
  1727. if (ret)
  1728. rtw89_warn(rtwdev, "failed to reset BA CAM\n");
  1729. }
  1730. static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1731. {
  1732. #define TRXCFG_RMAC_CCA_TO 32
  1733. #define TRXCFG_RMAC_DATA_TO 15
  1734. #define RX_MAX_LEN_UNIT 512
  1735. #define PLD_RLS_MAX_PG 127
  1736. #define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT)
  1737. int ret;
  1738. u32 reg, rx_max_len, rx_qta;
  1739. u16 val;
  1740. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1741. if (ret)
  1742. return ret;
  1743. if (mac_idx == RTW89_MAC_0)
  1744. rst_bacam(rtwdev);
  1745. reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx);
  1746. rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL);
  1747. reg = rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx);
  1748. val = rtw89_read16(rtwdev, reg);
  1749. val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO,
  1750. B_AX_RX_DLK_DATA_TIME_MASK);
  1751. val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO,
  1752. B_AX_RX_DLK_CCA_TIME_MASK);
  1753. rtw89_write16(rtwdev, reg, val);
  1754. reg = rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx);
  1755. rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1);
  1756. reg = rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx);
  1757. if (mac_idx == RTW89_MAC_0)
  1758. rx_qta = rtwdev->mac.dle_info.c0_rx_qta;
  1759. else
  1760. rx_qta = rtwdev->mac.dle_info.c1_rx_qta;
  1761. rx_qta = min_t(u32, rx_qta, PLD_RLS_MAX_PG);
  1762. rx_max_len = rx_qta * rtwdev->mac.dle_info.ple_pg_size;
  1763. rx_max_len = min_t(u32, rx_max_len, RX_SPEC_MAX_LEN);
  1764. rx_max_len /= RX_MAX_LEN_UNIT;
  1765. rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len);
  1766. if (rtwdev->chip->chip_id == RTL8852A &&
  1767. rtwdev->hal.cv == CHIP_CBV) {
  1768. rtw89_write16_mask(rtwdev,
  1769. rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx),
  1770. B_AX_RX_DLK_CCA_TIME_MASK, 0);
  1771. rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx),
  1772. BIT(12));
  1773. }
  1774. reg = rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx);
  1775. rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK);
  1776. return ret;
  1777. }
  1778. static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1779. {
  1780. enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
  1781. u32 val, reg;
  1782. int ret;
  1783. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1784. if (ret)
  1785. return ret;
  1786. reg = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx);
  1787. val = rtw89_read32(rtwdev, reg);
  1788. val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK);
  1789. val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK);
  1790. val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK);
  1791. rtw89_write32(rtwdev, reg, val);
  1792. if (chip_id == RTL8852A || chip_id == RTL8852B) {
  1793. reg = rtw89_mac_reg_by_idx(R_AX_PTCL_RRSR1, mac_idx);
  1794. rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN);
  1795. }
  1796. return 0;
  1797. }
  1798. static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
  1799. {
  1800. const struct rtw89_dle_mem *cfg;
  1801. cfg = get_dle_mem_cfg(rtwdev, mode);
  1802. if (!cfg) {
  1803. rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n");
  1804. return false;
  1805. }
  1806. return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma);
  1807. }
  1808. static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1809. {
  1810. u32 val, reg;
  1811. int ret;
  1812. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1813. if (ret)
  1814. return ret;
  1815. if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) {
  1816. reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx);
  1817. val = rtw89_read32(rtwdev, reg);
  1818. val = u32_replace_bits(val, S_AX_CTS2S_TH_1K,
  1819. B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK);
  1820. val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B,
  1821. B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
  1822. val |= B_AX_HW_CTS2SELF_EN;
  1823. rtw89_write32(rtwdev, reg, val);
  1824. reg = rtw89_mac_reg_by_idx(R_AX_PTCL_FSM_MON, mac_idx);
  1825. val = rtw89_read32(rtwdev, reg);
  1826. val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK);
  1827. val &= ~B_AX_PTCL_TX_ARB_TO_MODE;
  1828. rtw89_write32(rtwdev, reg, val);
  1829. }
  1830. if (mac_idx == RTW89_MAC_0) {
  1831. rtw89_write8_set(rtwdev, R_AX_PTCL_COMMON_SETTING_0,
  1832. B_AX_CMAC_TX_MODE_0 | B_AX_CMAC_TX_MODE_1);
  1833. rtw89_write8_clr(rtwdev, R_AX_PTCL_COMMON_SETTING_0,
  1834. B_AX_PTCL_TRIGGER_SS_EN_0 |
  1835. B_AX_PTCL_TRIGGER_SS_EN_1 |
  1836. B_AX_PTCL_TRIGGER_SS_EN_UL);
  1837. rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL,
  1838. B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
  1839. } else if (mac_idx == RTW89_MAC_1) {
  1840. rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL_C1,
  1841. B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
  1842. }
  1843. return 0;
  1844. }
  1845. static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1846. {
  1847. enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
  1848. u32 reg;
  1849. int ret;
  1850. if (chip_id != RTL8852B)
  1851. return 0;
  1852. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  1853. if (ret)
  1854. return ret;
  1855. reg = rtw89_mac_reg_by_idx(R_AX_RXDMA_CTRL_0, mac_idx);
  1856. rtw89_write8_clr(rtwdev, reg, RX_FULL_MODE);
  1857. return 0;
  1858. }
  1859. static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
  1860. {
  1861. int ret;
  1862. ret = scheduler_init(rtwdev, mac_idx);
  1863. if (ret) {
  1864. rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret);
  1865. return ret;
  1866. }
  1867. ret = addr_cam_init(rtwdev, mac_idx);
  1868. if (ret) {
  1869. rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx,
  1870. ret);
  1871. return ret;
  1872. }
  1873. ret = rx_fltr_init(rtwdev, mac_idx);
  1874. if (ret) {
  1875. rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx,
  1876. ret);
  1877. return ret;
  1878. }
  1879. ret = cca_ctrl_init(rtwdev, mac_idx);
  1880. if (ret) {
  1881. rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx,
  1882. ret);
  1883. return ret;
  1884. }
  1885. ret = nav_ctrl_init(rtwdev);
  1886. if (ret) {
  1887. rtw89_err(rtwdev, "[ERR]CMAC%d NAV CTRL init %d\n", mac_idx,
  1888. ret);
  1889. return ret;
  1890. }
  1891. ret = spatial_reuse_init(rtwdev, mac_idx);
  1892. if (ret) {
  1893. rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n",
  1894. mac_idx, ret);
  1895. return ret;
  1896. }
  1897. ret = tmac_init(rtwdev, mac_idx);
  1898. if (ret) {
  1899. rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret);
  1900. return ret;
  1901. }
  1902. ret = trxptcl_init(rtwdev, mac_idx);
  1903. if (ret) {
  1904. rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret);
  1905. return ret;
  1906. }
  1907. ret = rmac_init(rtwdev, mac_idx);
  1908. if (ret) {
  1909. rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret);
  1910. return ret;
  1911. }
  1912. ret = cmac_com_init(rtwdev, mac_idx);
  1913. if (ret) {
  1914. rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret);
  1915. return ret;
  1916. }
  1917. ret = ptcl_init(rtwdev, mac_idx);
  1918. if (ret) {
  1919. rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret);
  1920. return ret;
  1921. }
  1922. ret = cmac_dma_init(rtwdev, mac_idx);
  1923. if (ret) {
  1924. rtw89_err(rtwdev, "[ERR]CMAC%d DMA init %d\n", mac_idx, ret);
  1925. return ret;
  1926. }
  1927. return ret;
  1928. }
  1929. static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
  1930. struct rtw89_mac_c2h_info *c2h_info)
  1931. {
  1932. struct rtw89_mac_h2c_info h2c_info = {0};
  1933. u32 ret;
  1934. h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE;
  1935. h2c_info.content_len = 0;
  1936. ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info);
  1937. if (ret)
  1938. return ret;
  1939. if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP)
  1940. return -EINVAL;
  1941. return 0;
  1942. }
  1943. int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
  1944. {
  1945. struct rtw89_hal *hal = &rtwdev->hal;
  1946. const struct rtw89_chip_info *chip = rtwdev->chip;
  1947. struct rtw89_mac_c2h_info c2h_info = {0};
  1948. u8 tx_nss;
  1949. u8 rx_nss;
  1950. u8 tx_ant;
  1951. u8 rx_ant;
  1952. u32 ret;
  1953. ret = rtw89_mac_read_phycap(rtwdev, &c2h_info);
  1954. if (ret)
  1955. return ret;
  1956. tx_nss = RTW89_GET_C2H_PHYCAP_TX_NSS(c2h_info.c2hreg);
  1957. rx_nss = RTW89_GET_C2H_PHYCAP_RX_NSS(c2h_info.c2hreg);
  1958. tx_ant = RTW89_GET_C2H_PHYCAP_ANT_TX_NUM(c2h_info.c2hreg);
  1959. rx_ant = RTW89_GET_C2H_PHYCAP_ANT_RX_NUM(c2h_info.c2hreg);
  1960. hal->tx_nss = tx_nss ? min_t(u8, tx_nss, chip->tx_nss) : chip->tx_nss;
  1961. hal->rx_nss = rx_nss ? min_t(u8, rx_nss, chip->rx_nss) : chip->rx_nss;
  1962. if (tx_ant == 1)
  1963. hal->antenna_tx = RF_B;
  1964. if (rx_ant == 1)
  1965. hal->antenna_rx = RF_B;
  1966. if (tx_nss == 1 && tx_ant == 2 && rx_ant == 2) {
  1967. hal->antenna_tx = RF_B;
  1968. hal->tx_path_diversity = true;
  1969. }
  1970. rtw89_debug(rtwdev, RTW89_DBG_FW,
  1971. "phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n",
  1972. hal->tx_nss, tx_nss, chip->tx_nss,
  1973. hal->rx_nss, rx_nss, chip->rx_nss);
  1974. rtw89_debug(rtwdev, RTW89_DBG_FW,
  1975. "ant num/bitmap: tx=%d/0x%x rx=%d/0x%x\n",
  1976. tx_ant, hal->antenna_tx, rx_ant, hal->antenna_rx);
  1977. rtw89_debug(rtwdev, RTW89_DBG_FW, "TX path diversity=%d\n", hal->tx_path_diversity);
  1978. return 0;
  1979. }
  1980. static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band,
  1981. u16 tx_en_u16, u16 mask_u16)
  1982. {
  1983. u32 ret;
  1984. struct rtw89_mac_c2h_info c2h_info = {0};
  1985. struct rtw89_mac_h2c_info h2c_info = {0};
  1986. struct rtw89_h2creg_sch_tx_en *h2creg =
  1987. (struct rtw89_h2creg_sch_tx_en *)h2c_info.h2creg;
  1988. h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN;
  1989. h2c_info.content_len = sizeof(*h2creg) - RTW89_H2CREG_HDR_LEN;
  1990. h2creg->tx_en = tx_en_u16;
  1991. h2creg->mask = mask_u16;
  1992. h2creg->band = band;
  1993. ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
  1994. if (ret)
  1995. return ret;
  1996. if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT)
  1997. return -EINVAL;
  1998. return 0;
  1999. }
  2000. static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx,
  2001. u16 tx_en, u16 tx_en_mask)
  2002. {
  2003. u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx);
  2004. u16 val;
  2005. int ret;
  2006. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  2007. if (ret)
  2008. return ret;
  2009. if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
  2010. return rtw89_hw_sch_tx_en_h2c(rtwdev, mac_idx,
  2011. tx_en, tx_en_mask);
  2012. val = rtw89_read16(rtwdev, reg);
  2013. val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
  2014. rtw89_write16(rtwdev, reg, val);
  2015. return 0;
  2016. }
  2017. static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
  2018. u32 tx_en, u32 tx_en_mask)
  2019. {
  2020. u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx);
  2021. u32 val;
  2022. int ret;
  2023. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  2024. if (ret)
  2025. return ret;
  2026. val = rtw89_read32(rtwdev, reg);
  2027. val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
  2028. rtw89_write32(rtwdev, reg, val);
  2029. return 0;
  2030. }
  2031. int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
  2032. u32 *tx_en, enum rtw89_sch_tx_sel sel)
  2033. {
  2034. int ret;
  2035. *tx_en = rtw89_read16(rtwdev,
  2036. rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx));
  2037. switch (sel) {
  2038. case RTW89_SCH_TX_SEL_ALL:
  2039. ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
  2040. B_AX_CTN_TXEN_ALL_MASK);
  2041. if (ret)
  2042. return ret;
  2043. break;
  2044. case RTW89_SCH_TX_SEL_HIQ:
  2045. ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx,
  2046. 0, B_AX_CTN_TXEN_HGQ);
  2047. if (ret)
  2048. return ret;
  2049. break;
  2050. case RTW89_SCH_TX_SEL_MG0:
  2051. ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx,
  2052. 0, B_AX_CTN_TXEN_MGQ);
  2053. if (ret)
  2054. return ret;
  2055. break;
  2056. case RTW89_SCH_TX_SEL_MACID:
  2057. ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
  2058. B_AX_CTN_TXEN_ALL_MASK);
  2059. if (ret)
  2060. return ret;
  2061. break;
  2062. default:
  2063. return 0;
  2064. }
  2065. return 0;
  2066. }
  2067. EXPORT_SYMBOL(rtw89_mac_stop_sch_tx);
  2068. int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
  2069. u32 *tx_en, enum rtw89_sch_tx_sel sel)
  2070. {
  2071. int ret;
  2072. *tx_en = rtw89_read32(rtwdev,
  2073. rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx));
  2074. switch (sel) {
  2075. case RTW89_SCH_TX_SEL_ALL:
  2076. ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
  2077. B_AX_CTN_TXEN_ALL_MASK_V1);
  2078. if (ret)
  2079. return ret;
  2080. break;
  2081. case RTW89_SCH_TX_SEL_HIQ:
  2082. ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
  2083. 0, B_AX_CTN_TXEN_HGQ);
  2084. if (ret)
  2085. return ret;
  2086. break;
  2087. case RTW89_SCH_TX_SEL_MG0:
  2088. ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
  2089. 0, B_AX_CTN_TXEN_MGQ);
  2090. if (ret)
  2091. return ret;
  2092. break;
  2093. case RTW89_SCH_TX_SEL_MACID:
  2094. ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
  2095. B_AX_CTN_TXEN_ALL_MASK_V1);
  2096. if (ret)
  2097. return ret;
  2098. break;
  2099. default:
  2100. return 0;
  2101. }
  2102. return 0;
  2103. }
  2104. EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v1);
  2105. int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
  2106. {
  2107. int ret;
  2108. ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, B_AX_CTN_TXEN_ALL_MASK);
  2109. if (ret)
  2110. return ret;
  2111. return 0;
  2112. }
  2113. EXPORT_SYMBOL(rtw89_mac_resume_sch_tx);
  2114. int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
  2115. {
  2116. int ret;
  2117. ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, tx_en,
  2118. B_AX_CTN_TXEN_ALL_MASK_V1);
  2119. if (ret)
  2120. return ret;
  2121. return 0;
  2122. }
  2123. EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1);
  2124. u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd)
  2125. {
  2126. u32 val, reg;
  2127. int ret;
  2128. reg = wd ? R_AX_WD_BUF_REQ : R_AX_PL_BUF_REQ;
  2129. val = buf_len;
  2130. val |= B_AX_WD_BUF_REQ_EXEC;
  2131. rtw89_write32(rtwdev, reg, val);
  2132. reg = wd ? R_AX_WD_BUF_STATUS : R_AX_PL_BUF_STATUS;
  2133. ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_BUF_STAT_DONE,
  2134. 1, 2000, false, rtwdev, reg);
  2135. if (ret)
  2136. return 0xffff;
  2137. return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val);
  2138. }
  2139. int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
  2140. struct rtw89_cpuio_ctrl *ctrl_para, bool wd)
  2141. {
  2142. u32 val, cmd_type, reg;
  2143. int ret;
  2144. cmd_type = ctrl_para->cmd_type;
  2145. reg = wd ? R_AX_WD_CPUQ_OP_2 : R_AX_PL_CPUQ_OP_2;
  2146. val = 0;
  2147. val = u32_replace_bits(val, ctrl_para->start_pktid,
  2148. B_AX_WD_CPUQ_OP_STRT_PKTID_MASK);
  2149. val = u32_replace_bits(val, ctrl_para->end_pktid,
  2150. B_AX_WD_CPUQ_OP_END_PKTID_MASK);
  2151. rtw89_write32(rtwdev, reg, val);
  2152. reg = wd ? R_AX_WD_CPUQ_OP_1 : R_AX_PL_CPUQ_OP_1;
  2153. val = 0;
  2154. val = u32_replace_bits(val, ctrl_para->src_pid,
  2155. B_AX_CPUQ_OP_SRC_PID_MASK);
  2156. val = u32_replace_bits(val, ctrl_para->src_qid,
  2157. B_AX_CPUQ_OP_SRC_QID_MASK);
  2158. val = u32_replace_bits(val, ctrl_para->dst_pid,
  2159. B_AX_CPUQ_OP_DST_PID_MASK);
  2160. val = u32_replace_bits(val, ctrl_para->dst_qid,
  2161. B_AX_CPUQ_OP_DST_QID_MASK);
  2162. rtw89_write32(rtwdev, reg, val);
  2163. reg = wd ? R_AX_WD_CPUQ_OP_0 : R_AX_PL_CPUQ_OP_0;
  2164. val = 0;
  2165. val = u32_replace_bits(val, cmd_type,
  2166. B_AX_CPUQ_OP_CMD_TYPE_MASK);
  2167. val = u32_replace_bits(val, ctrl_para->macid,
  2168. B_AX_CPUQ_OP_MACID_MASK);
  2169. val = u32_replace_bits(val, ctrl_para->pkt_num,
  2170. B_AX_CPUQ_OP_PKTNUM_MASK);
  2171. val |= B_AX_WD_CPUQ_OP_EXEC;
  2172. rtw89_write32(rtwdev, reg, val);
  2173. reg = wd ? R_AX_WD_CPUQ_OP_STATUS : R_AX_PL_CPUQ_OP_STATUS;
  2174. ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_CPUQ_OP_STAT_DONE,
  2175. 1, 2000, false, rtwdev, reg);
  2176. if (ret)
  2177. return ret;
  2178. if (cmd_type == CPUIO_OP_CMD_GET_1ST_PID ||
  2179. cmd_type == CPUIO_OP_CMD_GET_NEXT_PID)
  2180. ctrl_para->pktid = FIELD_GET(B_AX_WD_CPUQ_OP_PKTID_MASK, val);
  2181. return 0;
  2182. }
  2183. static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
  2184. {
  2185. const struct rtw89_dle_mem *cfg;
  2186. struct rtw89_cpuio_ctrl ctrl_para = {0};
  2187. u16 pkt_id;
  2188. int ret;
  2189. cfg = get_dle_mem_cfg(rtwdev, mode);
  2190. if (!cfg) {
  2191. rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
  2192. return -EINVAL;
  2193. }
  2194. if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
  2195. dle_expected_used_size(rtwdev, mode)) {
  2196. rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
  2197. return -EINVAL;
  2198. }
  2199. dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU);
  2200. pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true);
  2201. if (pkt_id == 0xffff) {
  2202. rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n");
  2203. return -ENOMEM;
  2204. }
  2205. ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD;
  2206. ctrl_para.start_pktid = pkt_id;
  2207. ctrl_para.end_pktid = pkt_id;
  2208. ctrl_para.pkt_num = 0;
  2209. ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS;
  2210. ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT;
  2211. ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true);
  2212. if (ret) {
  2213. rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n");
  2214. return -EFAULT;
  2215. }
  2216. pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, false);
  2217. if (pkt_id == 0xffff) {
  2218. rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n");
  2219. return -ENOMEM;
  2220. }
  2221. ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD;
  2222. ctrl_para.start_pktid = pkt_id;
  2223. ctrl_para.end_pktid = pkt_id;
  2224. ctrl_para.pkt_num = 0;
  2225. ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS;
  2226. ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT;
  2227. ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, false);
  2228. if (ret) {
  2229. rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n");
  2230. return -EFAULT;
  2231. }
  2232. return 0;
  2233. }
  2234. static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx)
  2235. {
  2236. int ret;
  2237. u32 reg;
  2238. u8 val;
  2239. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  2240. if (ret)
  2241. return ret;
  2242. reg = rtw89_mac_reg_by_idx(R_AX_PTCL_TX_CTN_SEL, mac_idx);
  2243. ret = read_poll_timeout(rtw89_read8, val,
  2244. (val & B_AX_PTCL_TX_ON_STAT) == 0,
  2245. SW_CVR_DUR_US,
  2246. SW_CVR_DUR_US * PTCL_IDLE_POLL_CNT,
  2247. false, rtwdev, reg);
  2248. if (ret)
  2249. return ret;
  2250. return 0;
  2251. }
  2252. static int band1_enable(struct rtw89_dev *rtwdev)
  2253. {
  2254. int ret, i;
  2255. u32 sleep_bak[4] = {0};
  2256. u32 pause_bak[4] = {0};
  2257. u32 tx_en;
  2258. ret = rtw89_chip_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
  2259. if (ret) {
  2260. rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret);
  2261. return ret;
  2262. }
  2263. for (i = 0; i < 4; i++) {
  2264. sleep_bak[i] = rtw89_read32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4);
  2265. pause_bak[i] = rtw89_read32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4);
  2266. rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, U32_MAX);
  2267. rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, U32_MAX);
  2268. }
  2269. ret = band_idle_ck_b(rtwdev, 0);
  2270. if (ret) {
  2271. rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret);
  2272. return ret;
  2273. }
  2274. ret = dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
  2275. if (ret) {
  2276. rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
  2277. return ret;
  2278. }
  2279. for (i = 0; i < 4; i++) {
  2280. rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, sleep_bak[i]);
  2281. rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]);
  2282. }
  2283. ret = rtw89_chip_resume_sch_tx(rtwdev, 0, tx_en);
  2284. if (ret) {
  2285. rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret);
  2286. return ret;
  2287. }
  2288. ret = cmac_func_en(rtwdev, 1, true);
  2289. if (ret) {
  2290. rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret);
  2291. return ret;
  2292. }
  2293. ret = cmac_init(rtwdev, 1);
  2294. if (ret) {
  2295. rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret);
  2296. return ret;
  2297. }
  2298. rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
  2299. B_AX_R_SYM_FEN_WLBBFUN_1 | B_AX_R_SYM_FEN_WLBBGLB_1);
  2300. return 0;
  2301. }
  2302. static void rtw89_wdrls_imr_enable(struct rtw89_dev *rtwdev)
  2303. {
  2304. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2305. rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, B_AX_WDRLS_IMR_EN_CLR);
  2306. rtw89_write32_set(rtwdev, R_AX_WDRLS_ERR_IMR, imr->wdrls_imr_set);
  2307. }
  2308. static void rtw89_wsec_imr_enable(struct rtw89_dev *rtwdev)
  2309. {
  2310. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2311. rtw89_write32_set(rtwdev, imr->wsec_imr_reg, imr->wsec_imr_set);
  2312. }
  2313. static void rtw89_mpdu_trx_imr_enable(struct rtw89_dev *rtwdev)
  2314. {
  2315. enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
  2316. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2317. rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR,
  2318. B_AX_TX_GET_ERRPKTID_INT_EN |
  2319. B_AX_TX_NXT_ERRPKTID_INT_EN |
  2320. B_AX_TX_MPDU_SIZE_ZERO_INT_EN |
  2321. B_AX_TX_OFFSET_ERR_INT_EN |
  2322. B_AX_TX_HDR3_SIZE_ERR_INT_EN);
  2323. if (chip_id == RTL8852C)
  2324. rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR,
  2325. B_AX_TX_ETH_TYPE_ERR_EN |
  2326. B_AX_TX_LLC_PRE_ERR_EN |
  2327. B_AX_TX_NW_TYPE_ERR_EN |
  2328. B_AX_TX_KSRCH_ERR_EN);
  2329. rtw89_write32_set(rtwdev, R_AX_MPDU_TX_ERR_IMR,
  2330. imr->mpdu_tx_imr_set);
  2331. rtw89_write32_clr(rtwdev, R_AX_MPDU_RX_ERR_IMR,
  2332. B_AX_GETPKTID_ERR_INT_EN |
  2333. B_AX_MHDRLEN_ERR_INT_EN |
  2334. B_AX_RPT_ERR_INT_EN);
  2335. rtw89_write32_set(rtwdev, R_AX_MPDU_RX_ERR_IMR,
  2336. imr->mpdu_rx_imr_set);
  2337. }
  2338. static void rtw89_sta_sch_imr_enable(struct rtw89_dev *rtwdev)
  2339. {
  2340. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2341. rtw89_write32_clr(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR,
  2342. B_AX_SEARCH_HANG_TIMEOUT_INT_EN |
  2343. B_AX_RPT_HANG_TIMEOUT_INT_EN |
  2344. B_AX_PLE_B_PKTID_ERR_INT_EN);
  2345. rtw89_write32_set(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR,
  2346. imr->sta_sch_imr_set);
  2347. }
  2348. static void rtw89_txpktctl_imr_enable(struct rtw89_dev *rtwdev)
  2349. {
  2350. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2351. rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b0_reg,
  2352. imr->txpktctl_imr_b0_clr);
  2353. rtw89_write32_set(rtwdev, imr->txpktctl_imr_b0_reg,
  2354. imr->txpktctl_imr_b0_set);
  2355. rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b1_reg,
  2356. imr->txpktctl_imr_b1_clr);
  2357. rtw89_write32_set(rtwdev, imr->txpktctl_imr_b1_reg,
  2358. imr->txpktctl_imr_b1_set);
  2359. }
  2360. static void rtw89_wde_imr_enable(struct rtw89_dev *rtwdev)
  2361. {
  2362. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2363. rtw89_write32_clr(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_clr);
  2364. rtw89_write32_set(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_set);
  2365. }
  2366. static void rtw89_ple_imr_enable(struct rtw89_dev *rtwdev)
  2367. {
  2368. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2369. rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_clr);
  2370. rtw89_write32_set(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_set);
  2371. }
  2372. static void rtw89_pktin_imr_enable(struct rtw89_dev *rtwdev)
  2373. {
  2374. rtw89_write32_set(rtwdev, R_AX_PKTIN_ERR_IMR,
  2375. B_AX_PKTIN_GETPKTID_ERR_INT_EN);
  2376. }
  2377. static void rtw89_dispatcher_imr_enable(struct rtw89_dev *rtwdev)
  2378. {
  2379. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2380. rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
  2381. imr->host_disp_imr_clr);
  2382. rtw89_write32_set(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
  2383. imr->host_disp_imr_set);
  2384. rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
  2385. imr->cpu_disp_imr_clr);
  2386. rtw89_write32_set(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
  2387. imr->cpu_disp_imr_set);
  2388. rtw89_write32_clr(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR,
  2389. imr->other_disp_imr_clr);
  2390. rtw89_write32_set(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR,
  2391. imr->other_disp_imr_set);
  2392. }
  2393. static void rtw89_cpuio_imr_enable(struct rtw89_dev *rtwdev)
  2394. {
  2395. rtw89_write32_clr(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_CLR);
  2396. rtw89_write32_set(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_SET);
  2397. }
  2398. static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev)
  2399. {
  2400. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2401. rtw89_write32_set(rtwdev, imr->bbrpt_com_err_imr_reg,
  2402. B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN);
  2403. rtw89_write32_clr(rtwdev, imr->bbrpt_chinfo_err_imr_reg,
  2404. B_AX_BBRPT_CHINFO_IMR_CLR);
  2405. rtw89_write32_set(rtwdev, imr->bbrpt_chinfo_err_imr_reg,
  2406. imr->bbrpt_err_imr_set);
  2407. rtw89_write32_set(rtwdev, imr->bbrpt_dfs_err_imr_reg,
  2408. B_AX_BBRPT_DFS_TO_ERR_INT_EN);
  2409. rtw89_write32_set(rtwdev, R_AX_LA_ERRFLAG, B_AX_LA_IMR_DATA_LOSS_ERR);
  2410. }
  2411. static void rtw89_scheduler_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
  2412. {
  2413. u32 reg;
  2414. reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx);
  2415. rtw89_write32_clr(rtwdev, reg, B_AX_SORT_NON_IDLE_ERR_INT_EN |
  2416. B_AX_FSM_TIMEOUT_ERR_INT_EN);
  2417. rtw89_write32_set(rtwdev, reg, B_AX_FSM_TIMEOUT_ERR_INT_EN);
  2418. }
  2419. static void rtw89_ptcl_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
  2420. {
  2421. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2422. u32 reg;
  2423. reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx);
  2424. rtw89_write32_clr(rtwdev, reg, imr->ptcl_imr_clr);
  2425. rtw89_write32_set(rtwdev, reg, imr->ptcl_imr_set);
  2426. }
  2427. static void rtw89_cdma_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
  2428. {
  2429. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2430. enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
  2431. u32 reg;
  2432. reg = rtw89_mac_reg_by_idx(imr->cdma_imr_0_reg, mac_idx);
  2433. rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_0_clr);
  2434. rtw89_write32_set(rtwdev, reg, imr->cdma_imr_0_set);
  2435. if (chip_id == RTL8852C) {
  2436. reg = rtw89_mac_reg_by_idx(imr->cdma_imr_1_reg, mac_idx);
  2437. rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_1_clr);
  2438. rtw89_write32_set(rtwdev, reg, imr->cdma_imr_1_set);
  2439. }
  2440. }
  2441. static void rtw89_phy_intf_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
  2442. {
  2443. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2444. u32 reg;
  2445. reg = rtw89_mac_reg_by_idx(imr->phy_intf_imr_reg, mac_idx);
  2446. rtw89_write32_clr(rtwdev, reg, imr->phy_intf_imr_clr);
  2447. rtw89_write32_set(rtwdev, reg, imr->phy_intf_imr_set);
  2448. }
  2449. static void rtw89_rmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
  2450. {
  2451. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2452. u32 reg;
  2453. reg = rtw89_mac_reg_by_idx(imr->rmac_imr_reg, mac_idx);
  2454. rtw89_write32_clr(rtwdev, reg, imr->rmac_imr_clr);
  2455. rtw89_write32_set(rtwdev, reg, imr->rmac_imr_set);
  2456. }
  2457. static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
  2458. {
  2459. const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
  2460. u32 reg;
  2461. reg = rtw89_mac_reg_by_idx(imr->tmac_imr_reg, mac_idx);
  2462. rtw89_write32_clr(rtwdev, reg, imr->tmac_imr_clr);
  2463. rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set);
  2464. }
  2465. static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
  2466. enum rtw89_mac_hwmod_sel sel)
  2467. {
  2468. int ret;
  2469. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel);
  2470. if (ret) {
  2471. rtw89_err(rtwdev, "MAC%d mac_idx%d is not ready\n",
  2472. sel, mac_idx);
  2473. return ret;
  2474. }
  2475. if (sel == RTW89_DMAC_SEL) {
  2476. rtw89_wdrls_imr_enable(rtwdev);
  2477. rtw89_wsec_imr_enable(rtwdev);
  2478. rtw89_mpdu_trx_imr_enable(rtwdev);
  2479. rtw89_sta_sch_imr_enable(rtwdev);
  2480. rtw89_txpktctl_imr_enable(rtwdev);
  2481. rtw89_wde_imr_enable(rtwdev);
  2482. rtw89_ple_imr_enable(rtwdev);
  2483. rtw89_pktin_imr_enable(rtwdev);
  2484. rtw89_dispatcher_imr_enable(rtwdev);
  2485. rtw89_cpuio_imr_enable(rtwdev);
  2486. rtw89_bbrpt_imr_enable(rtwdev);
  2487. } else if (sel == RTW89_CMAC_SEL) {
  2488. rtw89_scheduler_imr_enable(rtwdev, mac_idx);
  2489. rtw89_ptcl_imr_enable(rtwdev, mac_idx);
  2490. rtw89_cdma_imr_enable(rtwdev, mac_idx);
  2491. rtw89_phy_intf_imr_enable(rtwdev, mac_idx);
  2492. rtw89_rmac_imr_enable(rtwdev, mac_idx);
  2493. rtw89_tmac_imr_enable(rtwdev, mac_idx);
  2494. } else {
  2495. return -EINVAL;
  2496. }
  2497. return 0;
  2498. }
  2499. static void rtw89_mac_err_imr_ctrl(struct rtw89_dev *rtwdev, bool en)
  2500. {
  2501. enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
  2502. rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR,
  2503. en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS);
  2504. rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR,
  2505. en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS);
  2506. if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta)
  2507. rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1,
  2508. en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS);
  2509. }
  2510. static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable)
  2511. {
  2512. int ret = 0;
  2513. if (enable) {
  2514. ret = band1_enable(rtwdev);
  2515. if (ret) {
  2516. rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret);
  2517. return ret;
  2518. }
  2519. ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
  2520. if (ret) {
  2521. rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret);
  2522. return ret;
  2523. }
  2524. } else {
  2525. rtw89_err(rtwdev, "[ERR] disable dbcc is not implemented not\n");
  2526. return -EINVAL;
  2527. }
  2528. return 0;
  2529. }
  2530. static int set_host_rpr(struct rtw89_dev *rtwdev)
  2531. {
  2532. if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) {
  2533. rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG,
  2534. B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_POH);
  2535. rtw89_write32_set(rtwdev, R_AX_RLSRPT0_CFG0,
  2536. B_AX_RLSRPT0_FLTR_MAP_MASK);
  2537. } else {
  2538. rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG,
  2539. B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_STF);
  2540. rtw89_write32_clr(rtwdev, R_AX_RLSRPT0_CFG0,
  2541. B_AX_RLSRPT0_FLTR_MAP_MASK);
  2542. }
  2543. rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_AGGNUM_MASK, 30);
  2544. rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_TO_MASK, 255);
  2545. return 0;
  2546. }
  2547. static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev)
  2548. {
  2549. enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode;
  2550. int ret;
  2551. ret = dmac_init(rtwdev, 0);
  2552. if (ret) {
  2553. rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret);
  2554. return ret;
  2555. }
  2556. ret = cmac_init(rtwdev, 0);
  2557. if (ret) {
  2558. rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret);
  2559. return ret;
  2560. }
  2561. if (is_qta_dbcc(rtwdev, qta_mode)) {
  2562. ret = rtw89_mac_dbcc_enable(rtwdev, true);
  2563. if (ret) {
  2564. rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret);
  2565. return ret;
  2566. }
  2567. }
  2568. ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
  2569. if (ret) {
  2570. rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret);
  2571. return ret;
  2572. }
  2573. ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
  2574. if (ret) {
  2575. rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret);
  2576. return ret;
  2577. }
  2578. rtw89_mac_err_imr_ctrl(rtwdev, true);
  2579. ret = set_host_rpr(rtwdev);
  2580. if (ret) {
  2581. rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret);
  2582. return ret;
  2583. }
  2584. return 0;
  2585. }
  2586. static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
  2587. {
  2588. u32 val32;
  2589. rtw89_mac_mem_write(rtwdev, R_AX_WDT_CTRL,
  2590. WDT_CTRL_ALL_DIS, RTW89_MAC_MEM_CPU_LOCAL);
  2591. val32 = rtw89_mac_mem_read(rtwdev, R_AX_WDT_STATUS, RTW89_MAC_MEM_CPU_LOCAL);
  2592. val32 |= B_AX_FS_WDT_INT;
  2593. val32 &= ~B_AX_FS_WDT_INT_MSK;
  2594. rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL);
  2595. }
  2596. static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
  2597. {
  2598. clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
  2599. rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
  2600. rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN |
  2601. B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY);
  2602. rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
  2603. rtw89_disable_fw_watchdog(rtwdev);
  2604. rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
  2605. rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
  2606. }
  2607. static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
  2608. bool dlfw)
  2609. {
  2610. u32 val;
  2611. int ret;
  2612. if (rtw89_read32(rtwdev, R_AX_PLATFORM_ENABLE) & B_AX_WCPU_EN)
  2613. return -EFAULT;
  2614. rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
  2615. rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
  2616. rtw89_write32(rtwdev, R_AX_HALT_H2C, 0);
  2617. rtw89_write32(rtwdev, R_AX_HALT_C2H, 0);
  2618. rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
  2619. val = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
  2620. val &= ~(B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY);
  2621. val = u32_replace_bits(val, RTW89_FWDL_INITIAL_STATE,
  2622. B_AX_WCPU_FWDL_STS_MASK);
  2623. if (dlfw)
  2624. val |= B_AX_WCPU_FWDL_EN;
  2625. rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val);
  2626. rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK,
  2627. boot_reason);
  2628. rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
  2629. if (!dlfw) {
  2630. mdelay(5);
  2631. ret = rtw89_fw_check_rdy(rtwdev);
  2632. if (ret)
  2633. return ret;
  2634. }
  2635. return 0;
  2636. }
  2637. static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev)
  2638. {
  2639. enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
  2640. u32 val;
  2641. int ret;
  2642. if (chip_id == RTL8852C)
  2643. val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
  2644. B_AX_PKT_BUF_EN | B_AX_H_AXIDMA_EN;
  2645. else
  2646. val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
  2647. B_AX_PKT_BUF_EN;
  2648. rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val);
  2649. val = B_AX_DISPATCHER_CLK_EN;
  2650. rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val);
  2651. if (chip_id != RTL8852C)
  2652. goto dle;
  2653. val = rtw89_read32(rtwdev, R_AX_HAXI_INIT_CFG1);
  2654. val &= ~(B_AX_DMA_MODE_MASK | B_AX_STOP_AXI_MST);
  2655. val |= FIELD_PREP(B_AX_DMA_MODE_MASK, DMA_MOD_PCIE_1B) |
  2656. B_AX_TXHCI_EN_V1 | B_AX_RXHCI_EN_V1;
  2657. rtw89_write32(rtwdev, R_AX_HAXI_INIT_CFG1, val);
  2658. rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP1,
  2659. B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | B_AX_STOP_ACH3 |
  2660. B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | B_AX_STOP_ACH6 |
  2661. B_AX_STOP_ACH7 | B_AX_STOP_CH8 | B_AX_STOP_CH9 |
  2662. B_AX_STOP_CH12 | B_AX_STOP_ACH2);
  2663. rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP2, B_AX_STOP_CH10 | B_AX_STOP_CH11);
  2664. rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_AXIDMA_EN);
  2665. dle:
  2666. ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode);
  2667. if (ret) {
  2668. rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret);
  2669. return ret;
  2670. }
  2671. ret = hfc_init(rtwdev, true, false, true);
  2672. if (ret) {
  2673. rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret);
  2674. return ret;
  2675. }
  2676. return ret;
  2677. }
  2678. int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
  2679. {
  2680. rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
  2681. B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
  2682. rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL,
  2683. B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
  2684. B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
  2685. rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
  2686. return 0;
  2687. }
  2688. EXPORT_SYMBOL(rtw89_mac_enable_bb_rf);
  2689. int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
  2690. {
  2691. rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
  2692. B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
  2693. rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL,
  2694. B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
  2695. B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
  2696. rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
  2697. return 0;
  2698. }
  2699. EXPORT_SYMBOL(rtw89_mac_disable_bb_rf);
  2700. int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
  2701. {
  2702. int ret;
  2703. ret = rtw89_mac_power_switch(rtwdev, true);
  2704. if (ret) {
  2705. rtw89_mac_power_switch(rtwdev, false);
  2706. ret = rtw89_mac_power_switch(rtwdev, true);
  2707. if (ret)
  2708. return ret;
  2709. }
  2710. rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
  2711. ret = rtw89_mac_dmac_pre_init(rtwdev);
  2712. if (ret)
  2713. return ret;
  2714. if (rtwdev->hci.ops->mac_pre_init) {
  2715. ret = rtwdev->hci.ops->mac_pre_init(rtwdev);
  2716. if (ret)
  2717. return ret;
  2718. }
  2719. rtw89_mac_disable_cpu(rtwdev);
  2720. ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
  2721. if (ret)
  2722. return ret;
  2723. ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL);
  2724. if (ret)
  2725. return ret;
  2726. return 0;
  2727. }
  2728. int rtw89_mac_init(struct rtw89_dev *rtwdev)
  2729. {
  2730. int ret;
  2731. ret = rtw89_mac_partial_init(rtwdev);
  2732. if (ret)
  2733. goto fail;
  2734. ret = rtw89_chip_enable_bb_rf(rtwdev);
  2735. if (ret)
  2736. goto fail;
  2737. ret = rtw89_mac_sys_init(rtwdev);
  2738. if (ret)
  2739. goto fail;
  2740. ret = rtw89_mac_trx_init(rtwdev);
  2741. if (ret)
  2742. goto fail;
  2743. if (rtwdev->hci.ops->mac_post_init) {
  2744. ret = rtwdev->hci.ops->mac_post_init(rtwdev);
  2745. if (ret)
  2746. goto fail;
  2747. }
  2748. rtw89_fw_send_all_early_h2c(rtwdev);
  2749. rtw89_fw_h2c_set_ofld_cfg(rtwdev);
  2750. return ret;
  2751. fail:
  2752. rtw89_mac_power_switch(rtwdev, false);
  2753. return ret;
  2754. }
  2755. static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid)
  2756. {
  2757. u8 i;
  2758. for (i = 0; i < 4; i++) {
  2759. rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR,
  2760. DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2));
  2761. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0);
  2762. }
  2763. }
  2764. static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid)
  2765. {
  2766. rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR,
  2767. CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE);
  2768. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4);
  2769. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 4, 0x400A0004);
  2770. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 8, 0);
  2771. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 12, 0);
  2772. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 16, 0);
  2773. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 20, 0xE43000B);
  2774. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 24, 0);
  2775. rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109);
  2776. }
  2777. int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
  2778. {
  2779. u8 sh = FIELD_GET(GENMASK(4, 0), macid);
  2780. u8 grp = macid >> 5;
  2781. int ret;
  2782. ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
  2783. if (ret)
  2784. return ret;
  2785. rtw89_fw_h2c_macid_pause(rtwdev, sh, grp, pause);
  2786. return 0;
  2787. }
  2788. static const struct rtw89_port_reg rtw_port_base = {
  2789. .port_cfg = R_AX_PORT_CFG_P0,
  2790. .tbtt_prohib = R_AX_TBTT_PROHIB_P0,
  2791. .bcn_area = R_AX_BCN_AREA_P0,
  2792. .bcn_early = R_AX_BCNERLYINT_CFG_P0,
  2793. .tbtt_early = R_AX_TBTTERLYINT_CFG_P0,
  2794. .tbtt_agg = R_AX_TBTT_AGG_P0,
  2795. .bcn_space = R_AX_BCN_SPACE_CFG_P0,
  2796. .bcn_forcetx = R_AX_BCN_FORCETX_P0,
  2797. .bcn_err_cnt = R_AX_BCN_ERR_CNT_P0,
  2798. .bcn_err_flag = R_AX_BCN_ERR_FLAG_P0,
  2799. .dtim_ctrl = R_AX_DTIM_CTRL_P0,
  2800. .tbtt_shift = R_AX_TBTT_SHIFT_P0,
  2801. .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0,
  2802. .tsftr_l = R_AX_TSFTR_LOW_P0,
  2803. .tsftr_h = R_AX_TSFTR_HIGH_P0
  2804. };
  2805. #define BCN_INTERVAL 100
  2806. #define BCN_ERLY_DEF 160
  2807. #define BCN_SETUP_DEF 2
  2808. #define BCN_HOLD_DEF 200
  2809. #define BCN_MASK_DEF 0
  2810. #define TBTT_ERLY_DEF 5
  2811. #define BCN_SET_UNIT 32
  2812. #define BCN_ERLY_SET_DLY (10 * 2)
  2813. static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev,
  2814. struct rtw89_vif *rtwvif)
  2815. {
  2816. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  2817. const struct rtw89_port_reg *p = &rtw_port_base;
  2818. if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN))
  2819. return;
  2820. rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK);
  2821. rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1);
  2822. rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK);
  2823. rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK);
  2824. msleep(vif->bss_conf.beacon_int + 1);
  2825. rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN |
  2826. B_AX_BRK_SETUP);
  2827. rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST);
  2828. rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0);
  2829. }
  2830. static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev,
  2831. struct rtw89_vif *rtwvif, bool en)
  2832. {
  2833. const struct rtw89_port_reg *p = &rtw_port_base;
  2834. if (en)
  2835. rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN);
  2836. else
  2837. rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN);
  2838. }
  2839. static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev,
  2840. struct rtw89_vif *rtwvif, bool en)
  2841. {
  2842. const struct rtw89_port_reg *p = &rtw_port_base;
  2843. if (en)
  2844. rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN);
  2845. else
  2846. rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN);
  2847. }
  2848. static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev,
  2849. struct rtw89_vif *rtwvif)
  2850. {
  2851. const struct rtw89_port_reg *p = &rtw_port_base;
  2852. rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK,
  2853. rtwvif->net_type);
  2854. }
  2855. static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev,
  2856. struct rtw89_vif *rtwvif)
  2857. {
  2858. const struct rtw89_port_reg *p = &rtw_port_base;
  2859. bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
  2860. u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP;
  2861. if (en)
  2862. rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bits);
  2863. else
  2864. rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bits);
  2865. }
  2866. static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev,
  2867. struct rtw89_vif *rtwvif)
  2868. {
  2869. const struct rtw89_port_reg *p = &rtw_port_base;
  2870. bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
  2871. rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
  2872. u32 bit = B_AX_RX_BSSID_FIT_EN;
  2873. if (en)
  2874. rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bit);
  2875. else
  2876. rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit);
  2877. }
  2878. static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
  2879. struct rtw89_vif *rtwvif)
  2880. {
  2881. const struct rtw89_port_reg *p = &rtw_port_base;
  2882. bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
  2883. rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
  2884. if (en)
  2885. rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
  2886. else
  2887. rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
  2888. }
  2889. static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
  2890. struct rtw89_vif *rtwvif)
  2891. {
  2892. const struct rtw89_port_reg *p = &rtw_port_base;
  2893. bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ||
  2894. rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
  2895. if (en)
  2896. rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
  2897. else
  2898. rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
  2899. }
  2900. static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev,
  2901. struct rtw89_vif *rtwvif)
  2902. {
  2903. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  2904. const struct rtw89_port_reg *p = &rtw_port_base;
  2905. u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL;
  2906. rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK,
  2907. bcn_int);
  2908. }
  2909. static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev,
  2910. struct rtw89_vif *rtwvif)
  2911. {
  2912. static const u32 hiq_win_addr[RTW89_PORT_NUM] = {
  2913. R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
  2914. R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
  2915. R_AX_PORT_HGQ_WINDOW_CFG + 3,
  2916. };
  2917. u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0;
  2918. u8 port = rtwvif->port;
  2919. u32 reg;
  2920. reg = rtw89_mac_reg_by_idx(hiq_win_addr[port], rtwvif->mac_idx);
  2921. rtw89_write8(rtwdev, reg, win);
  2922. }
  2923. static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev,
  2924. struct rtw89_vif *rtwvif)
  2925. {
  2926. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  2927. const struct rtw89_port_reg *p = &rtw_port_base;
  2928. u32 addr;
  2929. addr = rtw89_mac_reg_by_idx(R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx);
  2930. rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE);
  2931. rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK,
  2932. vif->bss_conf.dtim_period);
  2933. }
  2934. static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev,
  2935. struct rtw89_vif *rtwvif)
  2936. {
  2937. const struct rtw89_port_reg *p = &rtw_port_base;
  2938. rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib,
  2939. B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF);
  2940. }
  2941. static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev,
  2942. struct rtw89_vif *rtwvif)
  2943. {
  2944. const struct rtw89_port_reg *p = &rtw_port_base;
  2945. rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib,
  2946. B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF);
  2947. }
  2948. static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev,
  2949. struct rtw89_vif *rtwvif)
  2950. {
  2951. const struct rtw89_port_reg *p = &rtw_port_base;
  2952. rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area,
  2953. B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF);
  2954. }
  2955. static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev,
  2956. struct rtw89_vif *rtwvif)
  2957. {
  2958. const struct rtw89_port_reg *p = &rtw_port_base;
  2959. rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early,
  2960. B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF);
  2961. }
  2962. static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev,
  2963. struct rtw89_vif *rtwvif)
  2964. {
  2965. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  2966. static const u32 masks[RTW89_PORT_NUM] = {
  2967. B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK,
  2968. B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK,
  2969. B_AX_BSS_COLOB_AX_PORT_4_MASK,
  2970. };
  2971. u8 port = rtwvif->port;
  2972. u32 reg_base;
  2973. u32 reg;
  2974. u8 bss_color;
  2975. bss_color = vif->bss_conf.he_bss_color.color;
  2976. reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0;
  2977. reg = rtw89_mac_reg_by_idx(reg_base, rtwvif->mac_idx);
  2978. rtw89_write32_mask(rtwdev, reg, masks[port], bss_color);
  2979. }
  2980. static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev,
  2981. struct rtw89_vif *rtwvif)
  2982. {
  2983. u8 port = rtwvif->port;
  2984. u32 reg;
  2985. if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
  2986. return;
  2987. if (port == 0) {
  2988. reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_CTRL, rtwvif->mac_idx);
  2989. rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK);
  2990. }
  2991. }
  2992. static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev,
  2993. struct rtw89_vif *rtwvif)
  2994. {
  2995. u8 port = rtwvif->port;
  2996. u32 reg;
  2997. u32 val;
  2998. reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_DROP_0, rtwvif->mac_idx);
  2999. val = rtw89_read32(rtwdev, reg);
  3000. val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port));
  3001. if (port == 0)
  3002. val &= ~BIT(0);
  3003. rtw89_write32(rtwdev, reg, val);
  3004. }
  3005. static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev,
  3006. struct rtw89_vif *rtwvif)
  3007. {
  3008. const struct rtw89_port_reg *p = &rtw_port_base;
  3009. rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN);
  3010. }
  3011. static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
  3012. struct rtw89_vif *rtwvif)
  3013. {
  3014. const struct rtw89_port_reg *p = &rtw_port_base;
  3015. rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK,
  3016. BCN_ERLY_DEF);
  3017. }
  3018. static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
  3019. struct rtw89_vif *rtwvif)
  3020. {
  3021. const struct rtw89_port_reg *p = &rtw_port_base;
  3022. u16 val;
  3023. if (rtwdev->chip->chip_id != RTL8852C)
  3024. return;
  3025. if (rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
  3026. rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
  3027. return;
  3028. val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) |
  3029. B_AX_TBTT_SHIFT_OFST_SIGN;
  3030. rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_shift,
  3031. B_AX_TBTT_SHIFT_OFST_MASK, val);
  3032. }
  3033. int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  3034. {
  3035. int ret;
  3036. ret = rtw89_mac_port_update(rtwdev, rtwvif);
  3037. if (ret)
  3038. return ret;
  3039. rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id);
  3040. rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id);
  3041. ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif->mac_id, false);
  3042. if (ret)
  3043. return ret;
  3044. ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_CREATE);
  3045. if (ret)
  3046. return ret;
  3047. ret = rtw89_cam_init(rtwdev, rtwvif);
  3048. if (ret)
  3049. return ret;
  3050. ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
  3051. if (ret)
  3052. return ret;
  3053. ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
  3054. if (ret)
  3055. return ret;
  3056. return 0;
  3057. }
  3058. int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  3059. {
  3060. int ret;
  3061. ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_REMOVE);
  3062. if (ret)
  3063. return ret;
  3064. rtw89_cam_deinit(rtwdev, rtwvif);
  3065. ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
  3066. if (ret)
  3067. return ret;
  3068. return 0;
  3069. }
  3070. int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  3071. {
  3072. u8 port = rtwvif->port;
  3073. if (port >= RTW89_PORT_NUM)
  3074. return -EINVAL;
  3075. rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif);
  3076. rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif, false);
  3077. rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif, false);
  3078. rtw89_mac_port_cfg_net_type(rtwdev, rtwvif);
  3079. rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif);
  3080. rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif);
  3081. rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
  3082. rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif);
  3083. rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
  3084. rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
  3085. rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif);
  3086. rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
  3087. rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif);
  3088. rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
  3089. rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
  3090. rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
  3091. rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif);
  3092. rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
  3093. rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
  3094. rtw89_mac_port_cfg_func_en(rtwdev, rtwvif);
  3095. fsleep(BCN_ERLY_SET_DLY);
  3096. rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif);
  3097. return 0;
  3098. }
  3099. static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
  3100. struct cfg80211_bss *bss,
  3101. void *data)
  3102. {
  3103. const struct cfg80211_bss_ies *ies;
  3104. const struct element *elem;
  3105. bool *tolerated = data;
  3106. rcu_read_lock();
  3107. ies = rcu_dereference(bss->ies);
  3108. elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
  3109. ies->len);
  3110. if (!elem || elem->datalen < 10 ||
  3111. !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT))
  3112. *tolerated = false;
  3113. rcu_read_unlock();
  3114. }
  3115. void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
  3116. struct ieee80211_vif *vif)
  3117. {
  3118. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  3119. struct ieee80211_hw *hw = rtwdev->hw;
  3120. bool tolerated = true;
  3121. u32 reg;
  3122. if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
  3123. return;
  3124. if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
  3125. return;
  3126. cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
  3127. rtw89_mac_check_he_obss_narrow_bw_ru_iter,
  3128. &tolerated);
  3129. reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx);
  3130. if (tolerated)
  3131. rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
  3132. else
  3133. rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
  3134. }
  3135. int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  3136. {
  3137. int ret;
  3138. rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
  3139. RTW89_MAX_MAC_ID_NUM);
  3140. if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM)
  3141. return -ENOSPC;
  3142. ret = rtw89_mac_vif_init(rtwdev, rtwvif);
  3143. if (ret)
  3144. goto release_mac_id;
  3145. return 0;
  3146. release_mac_id:
  3147. rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id);
  3148. return ret;
  3149. }
  3150. int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  3151. {
  3152. int ret;
  3153. ret = rtw89_mac_vif_deinit(rtwdev, rtwvif);
  3154. rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id);
  3155. return ret;
  3156. }
  3157. static void
  3158. rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
  3159. {
  3160. }
  3161. static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
  3162. {
  3163. struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
  3164. return band == scan_info->op_band && channel == scan_info->op_pri_ch;
  3165. }
  3166. static void
  3167. rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
  3168. u32 len)
  3169. {
  3170. struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
  3171. struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
  3172. struct rtw89_chan new;
  3173. u8 reason, status, tx_fail, band, actual_period;
  3174. u32 last_chan = rtwdev->scan_info.last_chan_idx;
  3175. u16 chan;
  3176. int ret;
  3177. tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
  3178. status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
  3179. chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
  3180. reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
  3181. band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
  3182. actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data);
  3183. if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
  3184. band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
  3185. rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
  3186. "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
  3187. band, chan, reason, status, tx_fail, actual_period);
  3188. switch (reason) {
  3189. case RTW89_SCAN_LEAVE_CH_NOTIFY:
  3190. if (rtw89_is_op_chan(rtwdev, band, chan))
  3191. ieee80211_stop_queues(rtwdev->hw);
  3192. return;
  3193. case RTW89_SCAN_END_SCAN_NOTIFY:
  3194. if (rtwvif && rtwvif->scan_req &&
  3195. last_chan < rtwvif->scan_req->n_channels) {
  3196. ret = rtw89_hw_scan_offload(rtwdev, vif, true);
  3197. if (ret) {
  3198. rtw89_hw_scan_abort(rtwdev, vif);
  3199. rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
  3200. }
  3201. } else {
  3202. rtw89_hw_scan_complete(rtwdev, vif, false);
  3203. }
  3204. break;
  3205. case RTW89_SCAN_ENTER_CH_NOTIFY:
  3206. rtw89_chan_create(&new, chan, chan, band, RTW89_CHANNEL_WIDTH_20);
  3207. rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
  3208. if (rtw89_is_op_chan(rtwdev, band, chan)) {
  3209. rtw89_store_op_chan(rtwdev, false);
  3210. ieee80211_wake_queues(rtwdev->hw);
  3211. }
  3212. break;
  3213. default:
  3214. return;
  3215. }
  3216. }
  3217. static void
  3218. rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
  3219. {
  3220. rtw89_debug(rtwdev, RTW89_DBG_FW,
  3221. "C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n",
  3222. RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data),
  3223. RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h->data),
  3224. RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h->data),
  3225. RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h->data));
  3226. }
  3227. static void
  3228. rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
  3229. {
  3230. rtw89_debug(rtwdev, RTW89_DBG_FW,
  3231. "C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n",
  3232. RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h->data),
  3233. RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h->data),
  3234. RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h->data),
  3235. RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h->data),
  3236. RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h->data));
  3237. }
  3238. static void
  3239. rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
  3240. {
  3241. rtw89_info(rtwdev, "%*s", RTW89_GET_C2H_LOG_LEN(len),
  3242. RTW89_GET_C2H_LOG_SRT_PRT(c2h->data));
  3243. }
  3244. static void
  3245. rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
  3246. {
  3247. }
  3248. static void
  3249. rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
  3250. u32 len)
  3251. {
  3252. }
  3253. static void
  3254. rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
  3255. u32 len)
  3256. {
  3257. }
  3258. static
  3259. void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
  3260. struct sk_buff *c2h, u32 len) = {
  3261. [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL,
  3262. [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL,
  3263. [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp,
  3264. [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
  3265. [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
  3266. [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
  3267. [RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT] = rtw89_mac_c2h_tsf32_toggle_rpt,
  3268. };
  3269. static
  3270. void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
  3271. struct sk_buff *c2h, u32 len) = {
  3272. [RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack,
  3273. [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
  3274. [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
  3275. [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
  3276. };
  3277. void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
  3278. u32 len, u8 class, u8 func)
  3279. {
  3280. void (*handler)(struct rtw89_dev *rtwdev,
  3281. struct sk_buff *c2h, u32 len) = NULL;
  3282. switch (class) {
  3283. case RTW89_MAC_C2H_CLASS_INFO:
  3284. if (func < RTW89_MAC_C2H_FUNC_INFO_MAX)
  3285. handler = rtw89_mac_c2h_info_handler[func];
  3286. break;
  3287. case RTW89_MAC_C2H_CLASS_OFLD:
  3288. if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX)
  3289. handler = rtw89_mac_c2h_ofld_handler[func];
  3290. break;
  3291. case RTW89_MAC_C2H_CLASS_FWDBG:
  3292. return;
  3293. default:
  3294. rtw89_info(rtwdev, "c2h class %d not support\n", class);
  3295. return;
  3296. }
  3297. if (!handler) {
  3298. rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
  3299. func);
  3300. return;
  3301. }
  3302. handler(rtwdev, skb, len);
  3303. }
  3304. bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev,
  3305. enum rtw89_phy_idx phy_idx,
  3306. u32 reg_base, u32 *cr)
  3307. {
  3308. const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
  3309. enum rtw89_qta_mode mode = dle_mem->mode;
  3310. u32 addr = rtw89_mac_reg_by_idx(reg_base, phy_idx);
  3311. if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) {
  3312. rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n",
  3313. addr);
  3314. goto error;
  3315. }
  3316. if (addr >= CMAC1_START_ADDR && addr <= CMAC1_END_ADDR)
  3317. if (mode == RTW89_QTA_SCC) {
  3318. rtw89_err(rtwdev,
  3319. "[TXPWR] addr=0x%x but hw not enable\n",
  3320. addr);
  3321. goto error;
  3322. }
  3323. *cr = addr;
  3324. return true;
  3325. error:
  3326. rtw89_err(rtwdev, "[TXPWR] check txpwr cr 0x%x(phy%d) fail\n",
  3327. addr, phy_idx);
  3328. return false;
  3329. }
  3330. EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr);
  3331. int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
  3332. {
  3333. u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx);
  3334. int ret = 0;
  3335. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  3336. if (ret)
  3337. return ret;
  3338. if (!enable) {
  3339. rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN);
  3340. return ret;
  3341. }
  3342. rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN |
  3343. B_AX_APP_MAC_INFO_RPT |
  3344. B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT |
  3345. B_AX_PPDU_STAT_RPT_CRC32);
  3346. rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK,
  3347. RTW89_PRPT_DEST_HOST);
  3348. return ret;
  3349. }
  3350. EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
  3351. void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
  3352. {
  3353. #define MAC_AX_TIME_TH_SH 5
  3354. #define MAC_AX_LEN_TH_SH 4
  3355. #define MAC_AX_TIME_TH_MAX 255
  3356. #define MAC_AX_LEN_TH_MAX 255
  3357. #define MAC_AX_TIME_TH_DEF 88
  3358. #define MAC_AX_LEN_TH_DEF 4080
  3359. struct ieee80211_hw *hw = rtwdev->hw;
  3360. u32 rts_threshold = hw->wiphy->rts_threshold;
  3361. u32 time_th, len_th;
  3362. u32 reg;
  3363. if (rts_threshold == (u32)-1) {
  3364. time_th = MAC_AX_TIME_TH_DEF;
  3365. len_th = MAC_AX_LEN_TH_DEF;
  3366. } else {
  3367. time_th = MAC_AX_TIME_TH_MAX << MAC_AX_TIME_TH_SH;
  3368. len_th = rts_threshold;
  3369. }
  3370. time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX);
  3371. len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX);
  3372. reg = rtw89_mac_reg_by_idx(R_AX_AGG_LEN_HT_0, mac_idx);
  3373. rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th);
  3374. rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th);
  3375. }
  3376. void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop)
  3377. {
  3378. bool empty;
  3379. int ret;
  3380. if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
  3381. return;
  3382. ret = read_poll_timeout(dle_is_txq_empty, empty, empty,
  3383. 10000, 200000, false, rtwdev);
  3384. if (ret && !drop && (rtwdev->total_sta_assoc || rtwdev->scanning))
  3385. rtw89_info(rtwdev, "timed out to flush queues\n");
  3386. }
  3387. int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex)
  3388. {
  3389. u8 val;
  3390. u16 val16;
  3391. u32 val32;
  3392. int ret;
  3393. rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT);
  3394. rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN);
  3395. rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8);
  3396. rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK);
  3397. rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16);
  3398. rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24);
  3399. val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0);
  3400. val16 = (val16 | B_AX_BTCCA_EN) & ~B_AX_BTCCA_BRK_TXOP_EN;
  3401. rtw89_write16(rtwdev, R_AX_CCA_CFG_0, val16);
  3402. ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32);
  3403. if (ret) {
  3404. rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n");
  3405. return ret;
  3406. }
  3407. val32 = val32 & B_AX_WL_RX_CTRL;
  3408. ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32);
  3409. if (ret) {
  3410. rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n");
  3411. return ret;
  3412. }
  3413. switch (coex->pta_mode) {
  3414. case RTW89_MAC_AX_COEX_RTK_MODE:
  3415. val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG);
  3416. val &= ~B_AX_BTMODE_MASK;
  3417. val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_0_3);
  3418. rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val);
  3419. val = rtw89_read8(rtwdev, R_AX_TDMA_MODE);
  3420. rtw89_write8(rtwdev, R_AX_TDMA_MODE, val | B_AX_RTK_BT_ENABLE);
  3421. val = rtw89_read8(rtwdev, R_AX_BT_COEX_CFG_5);
  3422. val &= ~B_AX_BT_RPT_SAMPLE_RATE_MASK;
  3423. val |= FIELD_PREP(B_AX_BT_RPT_SAMPLE_RATE_MASK, MAC_AX_RTK_RATE);
  3424. rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_5, val);
  3425. break;
  3426. case RTW89_MAC_AX_COEX_CSR_MODE:
  3427. val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG);
  3428. val &= ~B_AX_BTMODE_MASK;
  3429. val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_2);
  3430. rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val);
  3431. val16 = rtw89_read16(rtwdev, R_AX_CSR_MODE);
  3432. val16 &= ~B_AX_BT_PRI_DETECT_TO_MASK;
  3433. val16 |= FIELD_PREP(B_AX_BT_PRI_DETECT_TO_MASK, MAC_AX_CSR_PRI_TO);
  3434. val16 &= ~B_AX_BT_TRX_INIT_DETECT_MASK;
  3435. val16 |= FIELD_PREP(B_AX_BT_TRX_INIT_DETECT_MASK, MAC_AX_CSR_TRX_TO);
  3436. val16 &= ~B_AX_BT_STAT_DELAY_MASK;
  3437. val16 |= FIELD_PREP(B_AX_BT_STAT_DELAY_MASK, MAC_AX_CSR_DELAY);
  3438. val16 |= B_AX_ENHANCED_BT;
  3439. rtw89_write16(rtwdev, R_AX_CSR_MODE, val16);
  3440. rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_2, MAC_AX_CSR_RATE);
  3441. break;
  3442. default:
  3443. return -EINVAL;
  3444. }
  3445. switch (coex->direction) {
  3446. case RTW89_MAC_AX_COEX_INNER:
  3447. val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1);
  3448. val = (val & ~BIT(2)) | BIT(1);
  3449. rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val);
  3450. break;
  3451. case RTW89_MAC_AX_COEX_OUTPUT:
  3452. val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1);
  3453. val = val | BIT(1) | BIT(0);
  3454. rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val);
  3455. break;
  3456. case RTW89_MAC_AX_COEX_INPUT:
  3457. val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1);
  3458. val = val & ~(BIT(2) | BIT(1));
  3459. rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val);
  3460. break;
  3461. default:
  3462. return -EINVAL;
  3463. }
  3464. return 0;
  3465. }
  3466. EXPORT_SYMBOL(rtw89_mac_coex_init);
  3467. int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev,
  3468. const struct rtw89_mac_ax_coex *coex)
  3469. {
  3470. rtw89_write32_set(rtwdev, R_AX_BTC_CFG,
  3471. B_AX_BTC_EN | B_AX_BTG_LNA1_GAIN_SEL);
  3472. rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN);
  3473. rtw89_write16_set(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_EN);
  3474. rtw89_write16_clr(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_BRK_TXOP_EN);
  3475. switch (coex->pta_mode) {
  3476. case RTW89_MAC_AX_COEX_RTK_MODE:
  3477. rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK,
  3478. MAC_AX_RTK_MODE);
  3479. rtw89_write32_mask(rtwdev, R_AX_RTK_MODE_CFG_V1,
  3480. B_AX_SAMPLE_CLK_MASK, MAC_AX_RTK_RATE);
  3481. break;
  3482. case RTW89_MAC_AX_COEX_CSR_MODE:
  3483. rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK,
  3484. MAC_AX_CSR_MODE);
  3485. break;
  3486. default:
  3487. return -EINVAL;
  3488. }
  3489. return 0;
  3490. }
  3491. EXPORT_SYMBOL(rtw89_mac_coex_init_v1);
  3492. int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
  3493. const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
  3494. {
  3495. u32 val = 0, ret;
  3496. if (gnt_cfg->band[0].gnt_bt)
  3497. val |= B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL;
  3498. if (gnt_cfg->band[0].gnt_bt_sw_en)
  3499. val |= B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL;
  3500. if (gnt_cfg->band[0].gnt_wl)
  3501. val |= B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL;
  3502. if (gnt_cfg->band[0].gnt_wl_sw_en)
  3503. val |= B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL;
  3504. if (gnt_cfg->band[1].gnt_bt)
  3505. val |= B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL;
  3506. if (gnt_cfg->band[1].gnt_bt_sw_en)
  3507. val |= B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL;
  3508. if (gnt_cfg->band[1].gnt_wl)
  3509. val |= B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL;
  3510. if (gnt_cfg->band[1].gnt_wl_sw_en)
  3511. val |= B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL;
  3512. ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val);
  3513. if (ret) {
  3514. rtw89_err(rtwdev, "Write LTE fail!\n");
  3515. return ret;
  3516. }
  3517. return 0;
  3518. }
  3519. EXPORT_SYMBOL(rtw89_mac_cfg_gnt);
  3520. int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
  3521. const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
  3522. {
  3523. u32 val = 0;
  3524. if (gnt_cfg->band[0].gnt_bt)
  3525. val |= B_AX_GNT_BT_RFC_S0_VAL | B_AX_GNT_BT_RX_VAL |
  3526. B_AX_GNT_BT_TX_VAL;
  3527. else
  3528. val |= B_AX_WL_ACT_VAL;
  3529. if (gnt_cfg->band[0].gnt_bt_sw_en)
  3530. val |= B_AX_GNT_BT_RFC_S0_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
  3531. B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
  3532. if (gnt_cfg->band[0].gnt_wl)
  3533. val |= B_AX_GNT_WL_RFC_S0_VAL | B_AX_GNT_WL_RX_VAL |
  3534. B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
  3535. if (gnt_cfg->band[0].gnt_wl_sw_en)
  3536. val |= B_AX_GNT_WL_RFC_S0_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
  3537. B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
  3538. if (gnt_cfg->band[1].gnt_bt)
  3539. val |= B_AX_GNT_BT_RFC_S1_VAL | B_AX_GNT_BT_RX_VAL |
  3540. B_AX_GNT_BT_TX_VAL;
  3541. else
  3542. val |= B_AX_WL_ACT_VAL;
  3543. if (gnt_cfg->band[1].gnt_bt_sw_en)
  3544. val |= B_AX_GNT_BT_RFC_S1_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
  3545. B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
  3546. if (gnt_cfg->band[1].gnt_wl)
  3547. val |= B_AX_GNT_WL_RFC_S1_VAL | B_AX_GNT_WL_RX_VAL |
  3548. B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
  3549. if (gnt_cfg->band[1].gnt_wl_sw_en)
  3550. val |= B_AX_GNT_WL_RFC_S1_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
  3551. B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
  3552. rtw89_write32(rtwdev, R_AX_GNT_SW_CTRL, val);
  3553. return 0;
  3554. }
  3555. EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1);
  3556. int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
  3557. {
  3558. u32 reg;
  3559. u16 val;
  3560. int ret;
  3561. ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
  3562. if (ret)
  3563. return ret;
  3564. reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, plt->band);
  3565. val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) |
  3566. (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) |
  3567. (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) |
  3568. (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_TX_PLT_GNT_WL : 0) |
  3569. (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) |
  3570. (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) |
  3571. (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) |
  3572. (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0) |
  3573. B_AX_PLT_EN;
  3574. rtw89_write16(rtwdev, reg, val);
  3575. return 0;
  3576. }
  3577. void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val)
  3578. {
  3579. u32 fw_sb;
  3580. fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD);
  3581. fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb);
  3582. fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY;
  3583. if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
  3584. fw_sb = fw_sb | MAC_AX_NOTIFY_PWR_MAJOR;
  3585. else
  3586. fw_sb = fw_sb | MAC_AX_NOTIFY_TP_MAJOR;
  3587. val = FIELD_GET(B_MAC_AX_SB_DRV_MASK, val);
  3588. val = B_AX_TOGGLE |
  3589. FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) |
  3590. FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb);
  3591. rtw89_write32(rtwdev, R_AX_SCOREBOARD, val);
  3592. fsleep(1000); /* avoid BT FW loss information */
  3593. }
  3594. u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev)
  3595. {
  3596. return rtw89_read32(rtwdev, R_AX_SCOREBOARD);
  3597. }
  3598. int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
  3599. {
  3600. u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3);
  3601. val = wl ? val | BIT(2) : val & ~BIT(2);
  3602. rtw89_write8(rtwdev, R_AX_SYS_SDIO_CTRL + 3, val);
  3603. return 0;
  3604. }
  3605. EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path);
  3606. int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl)
  3607. {
  3608. struct rtw89_btc *btc = &rtwdev->btc;
  3609. struct rtw89_btc_dm *dm = &btc->dm;
  3610. struct rtw89_mac_ax_gnt *g = dm->gnt.band;
  3611. int i;
  3612. if (wl)
  3613. return 0;
  3614. for (i = 0; i < RTW89_PHY_MAX; i++) {
  3615. g[i].gnt_bt_sw_en = 1;
  3616. g[i].gnt_bt = 1;
  3617. g[i].gnt_wl_sw_en = 1;
  3618. g[i].gnt_wl = 0;
  3619. }
  3620. return rtw89_mac_cfg_gnt_v1(rtwdev, &dm->gnt);
  3621. }
  3622. EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1);
  3623. bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
  3624. {
  3625. u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3);
  3626. return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val);
  3627. }
  3628. u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
  3629. {
  3630. u32 reg;
  3631. u16 cnt;
  3632. reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, band);
  3633. cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK);
  3634. rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST);
  3635. return cnt;
  3636. }
  3637. static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
  3638. {
  3639. u32 reg;
  3640. u32 mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN |
  3641. B_AX_BFMEE_HE_NDPA_EN;
  3642. rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en);
  3643. reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx);
  3644. if (en) {
  3645. set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
  3646. rtw89_write32_set(rtwdev, reg, mask);
  3647. } else {
  3648. clear_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
  3649. rtw89_write32_clr(rtwdev, reg, mask);
  3650. }
  3651. }
  3652. static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx)
  3653. {
  3654. u32 reg;
  3655. u32 val32;
  3656. int ret;
  3657. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  3658. if (ret)
  3659. return ret;
  3660. /* AP mode set tx gid to 63 */
  3661. /* STA mode set tx gid to 0(default) */
  3662. reg = rtw89_mac_reg_by_idx(R_AX_BFMER_CTRL_0, mac_idx);
  3663. rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN);
  3664. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx);
  3665. rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP);
  3666. reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx);
  3667. val32 = FIELD_PREP(B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, BFRP_RX_STANDBY_TIMER);
  3668. val32 |= FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER);
  3669. rtw89_write32(rtwdev, reg, val32);
  3670. rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true);
  3671. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
  3672. rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL |
  3673. B_AX_BFMEE_USE_NSTS |
  3674. B_AX_BFMEE_CSI_GID_SEL |
  3675. B_AX_BFMEE_CSI_FORCE_RETE_EN);
  3676. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx);
  3677. rtw89_write32(rtwdev, reg,
  3678. u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) |
  3679. u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) |
  3680. u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK));
  3681. reg = rtw89_mac_reg_by_idx(R_AX_CSIRPT_OPTION, mac_idx);
  3682. rtw89_write32_set(rtwdev, reg,
  3683. B_AX_CSIPRT_VHTSU_AID_EN | B_AX_CSIPRT_HESU_AID_EN);
  3684. return 0;
  3685. }
  3686. static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev,
  3687. struct ieee80211_vif *vif,
  3688. struct ieee80211_sta *sta)
  3689. {
  3690. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  3691. u8 mac_idx = rtwvif->mac_idx;
  3692. u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1;
  3693. u8 port_sel = rtwvif->port;
  3694. u8 sound_dim = 3, t;
  3695. u8 *phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
  3696. u32 reg;
  3697. u16 val;
  3698. int ret;
  3699. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  3700. if (ret)
  3701. return ret;
  3702. if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
  3703. (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) {
  3704. ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD);
  3705. stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ);
  3706. t = FIELD_GET(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
  3707. phy_cap[5]);
  3708. sound_dim = min(sound_dim, t);
  3709. }
  3710. if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
  3711. (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
  3712. ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
  3713. stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
  3714. t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
  3715. sta->deflink.vht_cap.cap);
  3716. sound_dim = min(sound_dim, t);
  3717. }
  3718. nc = min(nc, sound_dim);
  3719. nr = min(nr, sound_dim);
  3720. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
  3721. rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL);
  3722. val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) |
  3723. FIELD_PREP(B_AX_BFMEE_CSIINFO0_NR_MASK, nr) |
  3724. FIELD_PREP(B_AX_BFMEE_CSIINFO0_NG_MASK, ng) |
  3725. FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) |
  3726. FIELD_PREP(B_AX_BFMEE_CSIINFO0_CS_MASK, cs) |
  3727. FIELD_PREP(B_AX_BFMEE_CSIINFO0_LDPC_EN, ldpc_en) |
  3728. FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en);
  3729. if (port_sel == 0)
  3730. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
  3731. else
  3732. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx);
  3733. rtw89_write16(rtwdev, reg, val);
  3734. return 0;
  3735. }
  3736. static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev,
  3737. struct ieee80211_vif *vif,
  3738. struct ieee80211_sta *sta)
  3739. {
  3740. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  3741. u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M);
  3742. u32 reg;
  3743. u8 mac_idx = rtwvif->mac_idx;
  3744. int ret;
  3745. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  3746. if (ret)
  3747. return ret;
  3748. if (sta->deflink.he_cap.has_he) {
  3749. rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) |
  3750. BIT(RTW89_MAC_BF_RRSC_HE_MSC3) |
  3751. BIT(RTW89_MAC_BF_RRSC_HE_MSC5));
  3752. }
  3753. if (sta->deflink.vht_cap.vht_supported) {
  3754. rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) |
  3755. BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) |
  3756. BIT(RTW89_MAC_BF_RRSC_VHT_MSC5));
  3757. }
  3758. if (sta->deflink.ht_cap.ht_supported) {
  3759. rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) |
  3760. BIT(RTW89_MAC_BF_RRSC_HT_MSC3) |
  3761. BIT(RTW89_MAC_BF_RRSC_HT_MSC5));
  3762. }
  3763. reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
  3764. rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL);
  3765. rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN);
  3766. rtw89_write32(rtwdev,
  3767. rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx),
  3768. rrsc);
  3769. return 0;
  3770. }
  3771. void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
  3772. struct ieee80211_sta *sta)
  3773. {
  3774. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  3775. if (rtw89_sta_has_beamformer_cap(sta)) {
  3776. rtw89_debug(rtwdev, RTW89_DBG_BF,
  3777. "initialize bfee for new association\n");
  3778. rtw89_mac_init_bfee(rtwdev, rtwvif->mac_idx);
  3779. rtw89_mac_set_csi_para_reg(rtwdev, vif, sta);
  3780. rtw89_mac_csi_rrsc(rtwdev, vif, sta);
  3781. }
  3782. }
  3783. void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
  3784. struct ieee80211_sta *sta)
  3785. {
  3786. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  3787. rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, false);
  3788. }
  3789. void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
  3790. struct ieee80211_bss_conf *conf)
  3791. {
  3792. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  3793. u8 mac_idx = rtwvif->mac_idx;
  3794. __le32 *p;
  3795. rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n");
  3796. p = (__le32 *)conf->mu_group.membership;
  3797. rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN0, mac_idx),
  3798. le32_to_cpu(p[0]));
  3799. rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN1, mac_idx),
  3800. le32_to_cpu(p[1]));
  3801. p = (__le32 *)conf->mu_group.position;
  3802. rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION0, mac_idx),
  3803. le32_to_cpu(p[0]));
  3804. rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION1, mac_idx),
  3805. le32_to_cpu(p[1]));
  3806. rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION2, mac_idx),
  3807. le32_to_cpu(p[2]));
  3808. rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION3, mac_idx),
  3809. le32_to_cpu(p[3]));
  3810. }
  3811. struct rtw89_mac_bf_monitor_iter_data {
  3812. struct rtw89_dev *rtwdev;
  3813. struct ieee80211_sta *down_sta;
  3814. int count;
  3815. };
  3816. static
  3817. void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta)
  3818. {
  3819. struct rtw89_mac_bf_monitor_iter_data *iter_data =
  3820. (struct rtw89_mac_bf_monitor_iter_data *)data;
  3821. struct ieee80211_sta *down_sta = iter_data->down_sta;
  3822. int *count = &iter_data->count;
  3823. if (down_sta == sta)
  3824. return;
  3825. if (rtw89_sta_has_beamformer_cap(sta))
  3826. (*count)++;
  3827. }
  3828. void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev,
  3829. struct ieee80211_sta *sta, bool disconnect)
  3830. {
  3831. struct rtw89_mac_bf_monitor_iter_data data;
  3832. data.rtwdev = rtwdev;
  3833. data.down_sta = disconnect ? sta : NULL;
  3834. data.count = 0;
  3835. ieee80211_iterate_stations_atomic(rtwdev->hw,
  3836. rtw89_mac_bf_monitor_calc_iter,
  3837. &data);
  3838. rtw89_debug(rtwdev, RTW89_DBG_BF, "bfee STA count=%d\n", data.count);
  3839. if (data.count)
  3840. set_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags);
  3841. else
  3842. clear_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags);
  3843. }
  3844. void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
  3845. {
  3846. struct rtw89_traffic_stats *stats = &rtwdev->stats;
  3847. struct rtw89_vif *rtwvif;
  3848. bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv;
  3849. bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
  3850. if (en == old)
  3851. return;
  3852. rtw89_for_each_rtwvif(rtwdev, rtwvif)
  3853. rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, en);
  3854. }
  3855. static int
  3856. __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
  3857. u32 tx_time)
  3858. {
  3859. #define MAC_AX_DFLT_TX_TIME 5280
  3860. u8 mac_idx = rtwsta->rtwvif->mac_idx;
  3861. u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time;
  3862. u32 reg;
  3863. int ret = 0;
  3864. if (rtwsta->cctl_tx_time) {
  3865. rtwsta->ampdu_max_time = (max_tx_time - 512) >> 9;
  3866. ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
  3867. } else {
  3868. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  3869. if (ret) {
  3870. rtw89_warn(rtwdev, "failed to check cmac in set txtime\n");
  3871. return ret;
  3872. }
  3873. reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx);
  3874. rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK,
  3875. max_tx_time >> 5);
  3876. }
  3877. return ret;
  3878. }
  3879. int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
  3880. bool resume, u32 tx_time)
  3881. {
  3882. int ret = 0;
  3883. if (!resume) {
  3884. rtwsta->cctl_tx_time = true;
  3885. ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time);
  3886. } else {
  3887. ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time);
  3888. rtwsta->cctl_tx_time = false;
  3889. }
  3890. return ret;
  3891. }
  3892. int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
  3893. u32 *tx_time)
  3894. {
  3895. u8 mac_idx = rtwsta->rtwvif->mac_idx;
  3896. u32 reg;
  3897. int ret = 0;
  3898. if (rtwsta->cctl_tx_time) {
  3899. *tx_time = (rtwsta->ampdu_max_time + 1) << 9;
  3900. } else {
  3901. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  3902. if (ret) {
  3903. rtw89_warn(rtwdev, "failed to check cmac in tx_time\n");
  3904. return ret;
  3905. }
  3906. reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx);
  3907. *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5;
  3908. }
  3909. return ret;
  3910. }
  3911. int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
  3912. struct rtw89_sta *rtwsta,
  3913. bool resume, u8 tx_retry)
  3914. {
  3915. int ret = 0;
  3916. rtwsta->data_tx_cnt_lmt = tx_retry;
  3917. if (!resume) {
  3918. rtwsta->cctl_tx_retry_limit = true;
  3919. ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
  3920. } else {
  3921. ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
  3922. rtwsta->cctl_tx_retry_limit = false;
  3923. }
  3924. return ret;
  3925. }
  3926. int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
  3927. struct rtw89_sta *rtwsta, u8 *tx_retry)
  3928. {
  3929. u8 mac_idx = rtwsta->rtwvif->mac_idx;
  3930. u32 reg;
  3931. int ret = 0;
  3932. if (rtwsta->cctl_tx_retry_limit) {
  3933. *tx_retry = rtwsta->data_tx_cnt_lmt;
  3934. } else {
  3935. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  3936. if (ret) {
  3937. rtw89_warn(rtwdev, "failed to check cmac in rty_lmt\n");
  3938. return ret;
  3939. }
  3940. reg = rtw89_mac_reg_by_idx(R_AX_TXCNT, mac_idx);
  3941. *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK);
  3942. }
  3943. return ret;
  3944. }
  3945. int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
  3946. struct rtw89_vif *rtwvif, bool en)
  3947. {
  3948. u8 mac_idx = rtwvif->mac_idx;
  3949. u16 set = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0;
  3950. u32 reg;
  3951. u32 ret;
  3952. ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
  3953. if (ret)
  3954. return ret;
  3955. reg = rtw89_mac_reg_by_idx(R_AX_MUEDCA_EN, mac_idx);
  3956. if (en)
  3957. rtw89_write16_set(rtwdev, reg, set);
  3958. else
  3959. rtw89_write16_clr(rtwdev, reg, set);
  3960. return 0;
  3961. }
  3962. int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
  3963. {
  3964. u32 val32;
  3965. int ret;
  3966. val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
  3967. FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, val) |
  3968. FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, mask) |
  3969. FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_WRITE) |
  3970. FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
  3971. rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
  3972. ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
  3973. 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
  3974. if (ret) {
  3975. rtw89_warn(rtwdev, "xtal si not ready(W): offset=%x val=%x mask=%x\n",
  3976. offset, val, mask);
  3977. return ret;
  3978. }
  3979. return 0;
  3980. }
  3981. EXPORT_SYMBOL(rtw89_mac_write_xtal_si);
  3982. int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
  3983. {
  3984. u32 val32;
  3985. int ret;
  3986. val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
  3987. FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, 0x00) |
  3988. FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, 0x00) |
  3989. FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_READ) |
  3990. FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
  3991. rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
  3992. ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
  3993. 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
  3994. if (ret) {
  3995. rtw89_warn(rtwdev, "xtal si not ready(R): offset=%x\n", offset);
  3996. return ret;
  3997. }
  3998. *val = rtw89_read8(rtwdev, R_AX_WLAN_XTAL_SI_CTRL + 1);
  3999. return 0;
  4000. }
  4001. static
  4002. void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
  4003. {
  4004. static const enum rtw89_pkt_drop_sel sels[] = {
  4005. RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
  4006. RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
  4007. RTW89_PKT_DROP_SEL_MACID_VI_ONCE,
  4008. RTW89_PKT_DROP_SEL_MACID_VO_ONCE,
  4009. };
  4010. struct rtw89_vif *rtwvif = rtwsta->rtwvif;
  4011. struct rtw89_pkt_drop_params params = {0};
  4012. int i;
  4013. params.mac_band = RTW89_MAC_0;
  4014. params.macid = rtwsta->mac_id;
  4015. params.port = rtwvif->port;
  4016. params.mbssid = 0;
  4017. params.tf_trs = rtwvif->trigger;
  4018. for (i = 0; i < ARRAY_SIZE(sels); i++) {
  4019. params.sel = sels[i];
  4020. rtw89_fw_h2c_pkt_drop(rtwdev, &params);
  4021. }
  4022. }
  4023. static void rtw89_mac_pkt_drop_vif_iter(void *data, struct ieee80211_sta *sta)
  4024. {
  4025. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  4026. struct rtw89_vif *rtwvif = rtwsta->rtwvif;
  4027. struct rtw89_dev *rtwdev = rtwvif->rtwdev;
  4028. struct rtw89_vif *target = data;
  4029. if (rtwvif != target)
  4030. return;
  4031. rtw89_mac_pkt_drop_sta(rtwdev, rtwsta);
  4032. }
  4033. void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  4034. {
  4035. ieee80211_iterate_stations_atomic(rtwdev->hw,
  4036. rtw89_mac_pkt_drop_vif_iter,
  4037. rtwvif);
  4038. }