ipa.c 221 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/compat.h>
  7. #include <linux/device.h>
  8. #include <linux/dmapool.h>
  9. #include <linux/fs.h>
  10. #include <linux/genalloc.h>
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/of_platform.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/rbtree.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/msm-bus.h>
  22. #include <linux/msm-bus-board.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/delay.h>
  25. #include <linux/msm_gsi.h>
  26. #include <linux/time.h>
  27. #include <linux/hashtable.h>
  28. #include <linux/jhash.h>
  29. #include <linux/pci.h>
  30. #include <soc/qcom/subsystem_restart.h>
  31. #include <linux/soc/qcom/smem.h>
  32. #include <soc/qcom/scm.h>
  33. #include <asm/cacheflush.h>
  34. #include <linux/soc/qcom/smem_state.h>
  35. #include <linux/of_irq.h>
  36. #ifdef CONFIG_ARM64
  37. /* Outer caches unsupported on ARM64 platforms */
  38. #define outer_flush_range(x, y)
  39. #define __cpuc_flush_dcache_area __flush_dcache_area
  40. #endif
  41. #define IPA_SUBSYSTEM_NAME "ipa_fws"
  42. #define IPA_UC_SUBSYSTEM_NAME "ipa_uc"
  43. #include "ipa_i.h"
  44. #include "../ipa_rm_i.h"
  45. #include "ipahal/ipahal.h"
  46. #include "ipahal/ipahal_fltrt.h"
  47. #define CREATE_TRACE_POINTS
  48. #include "ipa_trace.h"
  49. #include "ipa_odl.h"
  50. /*
  51. * The following for adding code (ie. for EMULATION) not found on x86.
  52. */
  53. #if defined(CONFIG_IPA_EMULATION)
  54. # include "ipa_emulation_stubs.h"
  55. #endif
  56. #ifdef CONFIG_COMPAT
  57. /**
  58. * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
  59. * properties
  60. * @dev_name: input parameter, the name of table
  61. * @size: input parameter, size of table in bytes
  62. * @offset: output parameter, offset into page in case of system memory
  63. */
  64. struct ipa3_ioc_nat_alloc_mem32 {
  65. char dev_name[IPA_RESOURCE_NAME_MAX];
  66. compat_size_t size;
  67. compat_off_t offset;
  68. };
  69. /**
  70. * struct ipa_ioc_nat_ipv6ct_table_alloc32 - table memory allocation
  71. * properties
  72. * @size: input parameter, size of table in bytes
  73. * @offset: output parameter, offset into page in case of system memory
  74. */
  75. struct ipa_ioc_nat_ipv6ct_table_alloc32 {
  76. compat_size_t size;
  77. compat_off_t offset;
  78. };
  79. #endif /* #ifdef CONFIG_COMPAT */
  80. #define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
  81. #define TZ_MEM_PROTECT_REGION_ID 0x10
  82. struct tz_smmu_ipa_protect_region_iovec_s {
  83. u64 input_addr;
  84. u64 output_addr;
  85. u64 size;
  86. u32 attr;
  87. } __packed;
  88. struct tz_smmu_ipa_protect_region_s {
  89. phys_addr_t iovec_buf;
  90. u32 size_bytes;
  91. } __packed;
  92. static void ipa3_start_tag_process(struct work_struct *work);
  93. static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
  94. static void ipa3_transport_release_resource(struct work_struct *work);
  95. static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
  96. ipa3_transport_release_resource);
  97. static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
  98. static int ipa3_attach_to_smmu(void);
  99. static int ipa3_alloc_pkt_init(void);
  100. static void ipa3_load_ipa_fw(struct work_struct *work);
  101. static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
  102. static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
  103. static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
  104. ipa_dec_clients_disable_clks_on_wq);
  105. static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg);
  106. static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg);
  107. static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg);
  108. static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg);
  109. static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg);
  110. static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg);
  111. static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg);
  112. static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg);
  113. static int ipa3_ioctl_fnr_counter_query(unsigned long arg);
  114. static struct ipa3_plat_drv_res ipa3_res = {0, };
  115. static struct clk *ipa3_clk;
  116. struct ipa3_context *ipa3_ctx;
  117. static struct {
  118. bool present[IPA_SMMU_CB_MAX];
  119. bool arm_smmu;
  120. bool use_64_bit_dma_mask;
  121. u32 ipa_base;
  122. u32 ipa_size;
  123. } smmu_info;
  124. static char *active_clients_table_buf;
  125. int ipa3_active_clients_log_print_buffer(char *buf, int size)
  126. {
  127. int i;
  128. int nbytes;
  129. int cnt = 0;
  130. int start_idx;
  131. int end_idx;
  132. unsigned long flags;
  133. spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
  134. start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
  135. IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
  136. end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
  137. for (i = start_idx; i != end_idx;
  138. i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
  139. nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
  140. ipa3_ctx->ipa3_active_clients_logging
  141. .log_buffer[i]);
  142. cnt += nbytes;
  143. }
  144. spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
  145. flags);
  146. return cnt;
  147. }
  148. int ipa3_active_clients_log_print_table(char *buf, int size)
  149. {
  150. int i;
  151. struct ipa3_active_client_htable_entry *iterator;
  152. int cnt = 0;
  153. unsigned long flags;
  154. spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
  155. cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
  156. hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
  157. iterator, list) {
  158. switch (iterator->type) {
  159. case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
  160. cnt += scnprintf(buf + cnt, size - cnt,
  161. "%-40s %-3d ENDPOINT\n",
  162. iterator->id_string, iterator->count);
  163. break;
  164. case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
  165. cnt += scnprintf(buf + cnt, size - cnt,
  166. "%-40s %-3d SIMPLE\n",
  167. iterator->id_string, iterator->count);
  168. break;
  169. case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
  170. cnt += scnprintf(buf + cnt, size - cnt,
  171. "%-40s %-3d RESOURCE\n",
  172. iterator->id_string, iterator->count);
  173. break;
  174. case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
  175. cnt += scnprintf(buf + cnt, size - cnt,
  176. "%-40s %-3d SPECIAL\n",
  177. iterator->id_string, iterator->count);
  178. break;
  179. default:
  180. IPAERR("Trying to print illegal active_clients type");
  181. break;
  182. }
  183. }
  184. cnt += scnprintf(buf + cnt, size - cnt,
  185. "\nTotal active clients count: %d\n",
  186. atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
  187. if (ipa3_is_mhip_offload_enabled())
  188. cnt += ipa_mpm_panic_handler(buf + cnt, size - cnt);
  189. spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
  190. flags);
  191. return cnt;
  192. }
  193. static int ipa3_clean_modem_rule(void)
  194. {
  195. struct ipa_install_fltr_rule_req_msg_v01 *req;
  196. struct ipa_install_fltr_rule_req_ex_msg_v01 *req_ex;
  197. int val = 0;
  198. if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_0) {
  199. req = kzalloc(
  200. sizeof(struct ipa_install_fltr_rule_req_msg_v01),
  201. GFP_KERNEL);
  202. if (!req) {
  203. IPAERR("mem allocated failed!\n");
  204. return -ENOMEM;
  205. }
  206. req->filter_spec_list_valid = false;
  207. req->filter_spec_list_len = 0;
  208. req->source_pipe_index_valid = 0;
  209. val = ipa3_qmi_filter_request_send(req);
  210. kfree(req);
  211. } else {
  212. req_ex = kzalloc(
  213. sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01),
  214. GFP_KERNEL);
  215. if (!req_ex) {
  216. IPAERR("mem allocated failed!\n");
  217. return -ENOMEM;
  218. }
  219. req_ex->filter_spec_ex_list_valid = false;
  220. req_ex->filter_spec_ex_list_len = 0;
  221. req_ex->source_pipe_index_valid = 0;
  222. val = ipa3_qmi_filter_request_ex_send(req_ex);
  223. kfree(req_ex);
  224. }
  225. return val;
  226. }
  227. static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
  228. unsigned long event, void *ptr)
  229. {
  230. ipa3_active_clients_log_print_table(active_clients_table_buf,
  231. IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
  232. IPAERR("%s\n", active_clients_table_buf);
  233. return NOTIFY_DONE;
  234. }
  235. static struct notifier_block ipa3_active_clients_panic_blk = {
  236. .notifier_call = ipa3_active_clients_panic_notifier,
  237. };
  238. #ifdef CONFIG_IPA_DEBUG
  239. static int ipa3_active_clients_log_insert(const char *string)
  240. {
  241. int head;
  242. int tail;
  243. if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
  244. return -EPERM;
  245. head = ipa3_ctx->ipa3_active_clients_logging.log_head;
  246. tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
  247. memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
  248. IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
  249. strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
  250. (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
  251. head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
  252. if (tail == head)
  253. tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
  254. ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
  255. ipa3_ctx->ipa3_active_clients_logging.log_head = head;
  256. return 0;
  257. }
  258. #endif
  259. static int ipa3_active_clients_log_init(void)
  260. {
  261. int i;
  262. spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
  263. ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kcalloc(
  264. IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES,
  265. sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
  266. GFP_KERNEL);
  267. active_clients_table_buf = kzalloc(sizeof(
  268. char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
  269. if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
  270. pr_err("Active Clients Logging memory allocation failed\n");
  271. goto bail;
  272. }
  273. for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
  274. ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
  275. ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
  276. (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
  277. }
  278. ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
  279. ipa3_ctx->ipa3_active_clients_logging.log_tail =
  280. IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
  281. hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
  282. atomic_notifier_chain_register(&panic_notifier_list,
  283. &ipa3_active_clients_panic_blk);
  284. ipa3_ctx->ipa3_active_clients_logging.log_rdy = true;
  285. return 0;
  286. bail:
  287. return -ENOMEM;
  288. }
  289. void ipa3_active_clients_log_clear(void)
  290. {
  291. unsigned long flags;
  292. spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
  293. ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
  294. ipa3_ctx->ipa3_active_clients_logging.log_tail =
  295. IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
  296. spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
  297. flags);
  298. }
  299. static void ipa3_active_clients_log_destroy(void)
  300. {
  301. unsigned long flags;
  302. spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
  303. ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
  304. kfree(active_clients_table_buf);
  305. active_clients_table_buf = NULL;
  306. kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
  307. ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
  308. ipa3_ctx->ipa3_active_clients_logging.log_tail =
  309. IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
  310. spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
  311. flags);
  312. }
  313. static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
  314. struct iommu_domain *ipa3_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type)
  315. {
  316. if (VALID_IPA_SMMU_CB_TYPE(cb_type) && smmu_cb[cb_type].valid)
  317. return smmu_cb[cb_type].iommu_domain;
  318. IPAERR("cb_type(%d) not valid\n", cb_type);
  319. return NULL;
  320. }
  321. struct iommu_domain *ipa3_get_smmu_domain(void)
  322. {
  323. return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_AP);
  324. }
  325. struct iommu_domain *ipa3_get_uc_smmu_domain(void)
  326. {
  327. return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_UC);
  328. }
  329. struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
  330. {
  331. return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_WLAN);
  332. }
  333. struct iommu_domain *ipa3_get_11ad_smmu_domain(void)
  334. {
  335. return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_11AD);
  336. }
  337. struct device *ipa3_get_dma_dev(void)
  338. {
  339. return ipa3_ctx->pdev;
  340. }
  341. /**
  342. * ipa3_get_smmu_ctx()- Return smmu context for the given cb_type
  343. *
  344. * Return value: pointer to smmu context address
  345. */
  346. struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type cb_type)
  347. {
  348. return &smmu_cb[cb_type];
  349. }
  350. static int ipa3_open(struct inode *inode, struct file *filp)
  351. {
  352. IPADBG_LOW("ENTER\n");
  353. filp->private_data = ipa3_ctx;
  354. return 0;
  355. }
  356. static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
  357. {
  358. if (!buff) {
  359. IPAERR("Null buffer\n");
  360. return;
  361. }
  362. if (type != WAN_UPSTREAM_ROUTE_ADD &&
  363. type != WAN_UPSTREAM_ROUTE_DEL &&
  364. type != WAN_EMBMS_CONNECT) {
  365. IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
  366. return;
  367. }
  368. kfree(buff);
  369. }
  370. static int ipa3_send_wan_msg(unsigned long usr_param,
  371. uint8_t msg_type, bool is_cache)
  372. {
  373. int retval;
  374. struct ipa_wan_msg *wan_msg;
  375. struct ipa_msg_meta msg_meta;
  376. struct ipa_wan_msg cache_wan_msg;
  377. wan_msg = kzalloc(sizeof(*wan_msg), GFP_KERNEL);
  378. if (!wan_msg)
  379. return -ENOMEM;
  380. if (copy_from_user(wan_msg, (const void __user *)usr_param,
  381. sizeof(struct ipa_wan_msg))) {
  382. kfree(wan_msg);
  383. return -EFAULT;
  384. }
  385. memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
  386. memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
  387. msg_meta.msg_type = msg_type;
  388. msg_meta.msg_len = sizeof(struct ipa_wan_msg);
  389. retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
  390. if (retval) {
  391. IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
  392. kfree(wan_msg);
  393. return retval;
  394. }
  395. if (is_cache) {
  396. mutex_lock(&ipa3_ctx->ipa_cne_evt_lock);
  397. /* cache the cne event */
  398. memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
  399. ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
  400. &cache_wan_msg,
  401. sizeof(cache_wan_msg));
  402. memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
  403. ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
  404. &msg_meta,
  405. sizeof(struct ipa_msg_meta));
  406. ipa3_ctx->num_ipa_cne_evt_req++;
  407. ipa3_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE;
  408. mutex_unlock(&ipa3_ctx->ipa_cne_evt_lock);
  409. }
  410. return 0;
  411. }
  412. static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
  413. {
  414. if (!buff) {
  415. IPAERR("Null buffer\n");
  416. return;
  417. }
  418. switch (type) {
  419. case ADD_VLAN_IFACE:
  420. case DEL_VLAN_IFACE:
  421. case ADD_L2TP_VLAN_MAPPING:
  422. case DEL_L2TP_VLAN_MAPPING:
  423. case ADD_BRIDGE_VLAN_MAPPING:
  424. case DEL_BRIDGE_VLAN_MAPPING:
  425. break;
  426. default:
  427. IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
  428. return;
  429. }
  430. kfree(buff);
  431. }
  432. static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
  433. {
  434. int retval;
  435. struct ipa_ioc_vlan_iface_info *vlan_info;
  436. struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
  437. struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info;
  438. struct ipa_msg_meta msg_meta;
  439. void *buff;
  440. IPADBG("type %d\n", msg_type);
  441. memset(&msg_meta, 0, sizeof(msg_meta));
  442. msg_meta.msg_type = msg_type;
  443. if ((msg_type == ADD_VLAN_IFACE) ||
  444. (msg_type == DEL_VLAN_IFACE)) {
  445. vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
  446. GFP_KERNEL);
  447. if (!vlan_info)
  448. return -ENOMEM;
  449. if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
  450. sizeof(struct ipa_ioc_vlan_iface_info))) {
  451. kfree(vlan_info);
  452. return -EFAULT;
  453. }
  454. msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
  455. buff = vlan_info;
  456. } else if ((msg_type == ADD_L2TP_VLAN_MAPPING) ||
  457. (msg_type == DEL_L2TP_VLAN_MAPPING)) {
  458. mapping_info = kzalloc(sizeof(struct
  459. ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
  460. if (!mapping_info)
  461. return -ENOMEM;
  462. if (copy_from_user((u8 *)mapping_info,
  463. (void __user *)usr_param,
  464. sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
  465. kfree(mapping_info);
  466. return -EFAULT;
  467. }
  468. msg_meta.msg_len = sizeof(struct
  469. ipa_ioc_l2tp_vlan_mapping_info);
  470. buff = mapping_info;
  471. } else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) ||
  472. (msg_type == DEL_BRIDGE_VLAN_MAPPING)) {
  473. bridge_vlan_info = kzalloc(
  474. sizeof(struct ipa_ioc_bridge_vlan_mapping_info),
  475. GFP_KERNEL);
  476. if (!bridge_vlan_info)
  477. return -ENOMEM;
  478. if (copy_from_user((u8 *)bridge_vlan_info,
  479. (void __user *)usr_param,
  480. sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) {
  481. kfree(bridge_vlan_info);
  482. IPAERR("copy from user failed\n");
  483. return -EFAULT;
  484. }
  485. msg_meta.msg_len = sizeof(struct
  486. ipa_ioc_bridge_vlan_mapping_info);
  487. buff = bridge_vlan_info;
  488. } else {
  489. IPAERR("Unexpected event\n");
  490. return -EFAULT;
  491. }
  492. retval = ipa3_send_msg(&msg_meta, buff,
  493. ipa3_vlan_l2tp_msg_free_cb);
  494. if (retval) {
  495. IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
  496. retval,
  497. msg_type);
  498. kfree(buff);
  499. return retval;
  500. }
  501. IPADBG("exit\n");
  502. return 0;
  503. }
  504. static void ipa3_gsb_msg_free_cb(void *buff, u32 len, u32 type)
  505. {
  506. if (!buff) {
  507. IPAERR("Null buffer\n");
  508. return;
  509. }
  510. switch (type) {
  511. case IPA_GSB_CONNECT:
  512. case IPA_GSB_DISCONNECT:
  513. break;
  514. default:
  515. IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
  516. return;
  517. }
  518. kfree(buff);
  519. }
  520. static int ipa3_send_gsb_msg(unsigned long usr_param, uint8_t msg_type)
  521. {
  522. int retval;
  523. struct ipa_ioc_gsb_info *gsb_info;
  524. struct ipa_msg_meta msg_meta;
  525. void *buff;
  526. IPADBG("type %d\n", msg_type);
  527. memset(&msg_meta, 0, sizeof(msg_meta));
  528. msg_meta.msg_type = msg_type;
  529. if ((msg_type == IPA_GSB_CONNECT) ||
  530. (msg_type == IPA_GSB_DISCONNECT)) {
  531. gsb_info = kzalloc(sizeof(struct ipa_ioc_gsb_info),
  532. GFP_KERNEL);
  533. if (!gsb_info) {
  534. IPAERR("no memory\n");
  535. return -ENOMEM;
  536. }
  537. if (copy_from_user((u8 *)gsb_info, (void __user *)usr_param,
  538. sizeof(struct ipa_ioc_gsb_info))) {
  539. kfree(gsb_info);
  540. return -EFAULT;
  541. }
  542. msg_meta.msg_len = sizeof(struct ipa_ioc_gsb_info);
  543. buff = gsb_info;
  544. } else {
  545. IPAERR("Unexpected event\n");
  546. return -EFAULT;
  547. }
  548. retval = ipa3_send_msg(&msg_meta, buff,
  549. ipa3_gsb_msg_free_cb);
  550. if (retval) {
  551. IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
  552. retval,
  553. msg_type);
  554. kfree(buff);
  555. return retval;
  556. }
  557. IPADBG("exit\n");
  558. return 0;
  559. }
  560. static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg)
  561. {
  562. int retval = 0;
  563. int i;
  564. u8 header[128] = { 0 };
  565. int pre_entry;
  566. u32 usr_pyld_sz;
  567. u32 pyld_sz;
  568. u64 uptr = 0;
  569. u8 *param = NULL;
  570. u8 *kptr = NULL;
  571. if (copy_from_user(header, (const void __user *)arg,
  572. sizeof(struct ipa_ioc_add_rt_rule_v2))) {
  573. IPAERR_RL("copy_from_user fails\n");
  574. retval = -EFAULT;
  575. goto free_param_kptr;
  576. }
  577. pre_entry =
  578. ((struct ipa_ioc_add_rt_rule_v2 *)header)->num_rules;
  579. if (unlikely(((struct ipa_ioc_add_rt_rule_v2 *)
  580. header)->rule_add_size >
  581. sizeof(struct ipa_rt_rule_add_i))) {
  582. IPAERR_RL("unexpected rule_add_size %d\n",
  583. ((struct ipa_ioc_add_rt_rule_v2 *)
  584. header)->rule_add_size);
  585. retval = -EPERM;
  586. goto free_param_kptr;
  587. }
  588. /* user payload size */
  589. usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_v2 *)
  590. header)->rule_add_size * pre_entry;
  591. /* actual payload structure size in kernel */
  592. pyld_sz = sizeof(struct ipa_rt_rule_add_i) * pre_entry;
  593. uptr = ((struct ipa_ioc_add_rt_rule_v2 *)
  594. header)->rules;
  595. if (unlikely(!uptr)) {
  596. IPAERR_RL("unexpected NULL rules\n");
  597. retval = -EPERM;
  598. goto free_param_kptr;
  599. }
  600. /* alloc param with same payload size as user payload */
  601. param = memdup_user((const void __user *)uptr,
  602. usr_pyld_sz);
  603. if (IS_ERR(param)) {
  604. retval = -EFAULT;
  605. goto free_param_kptr;
  606. }
  607. /* alloc kernel pointer with actual payload size */
  608. kptr = kzalloc(pyld_sz, GFP_KERNEL);
  609. if (!kptr) {
  610. retval = -ENOMEM;
  611. goto free_param_kptr;
  612. }
  613. for (i = 0; i < pre_entry; i++)
  614. memcpy(kptr + i * sizeof(struct ipa_rt_rule_add_i),
  615. (void *)param + i *
  616. ((struct ipa_ioc_add_rt_rule_v2 *)
  617. header)->rule_add_size,
  618. ((struct ipa_ioc_add_rt_rule_v2 *)
  619. header)->rule_add_size);
  620. /* modify the rule pointer to the kernel pointer */
  621. ((struct ipa_ioc_add_rt_rule_v2 *)header)->rules =
  622. (u64)kptr;
  623. if (ipa3_add_rt_rule_usr_v2(
  624. (struct ipa_ioc_add_rt_rule_v2 *)header, true)) {
  625. IPAERR_RL("ipa3_add_rt_rule_usr_v2 fails\n");
  626. retval = -EPERM;
  627. goto free_param_kptr;
  628. }
  629. for (i = 0; i < pre_entry; i++)
  630. memcpy((void *)param + i *
  631. ((struct ipa_ioc_add_rt_rule_v2 *)
  632. header)->rule_add_size,
  633. kptr + i * sizeof(struct ipa_rt_rule_add_i),
  634. ((struct ipa_ioc_add_rt_rule_v2 *)
  635. header)->rule_add_size);
  636. if (copy_to_user((void __user *)uptr, param,
  637. usr_pyld_sz)) {
  638. IPAERR_RL("copy_to_user fails\n");
  639. retval = -EFAULT;
  640. goto free_param_kptr;
  641. }
  642. free_param_kptr:
  643. if (!IS_ERR(param))
  644. kfree(param);
  645. kfree(kptr);
  646. return retval;
  647. }
  648. static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg)
  649. {
  650. int retval = 0;
  651. int i;
  652. u8 header[128] = { 0 };
  653. int pre_entry;
  654. u32 usr_pyld_sz;
  655. u32 pyld_sz;
  656. u64 uptr = 0;
  657. u8 *param = NULL;
  658. u8 *kptr = NULL;
  659. if (copy_from_user(header,
  660. (const void __user *)arg,
  661. sizeof(struct ipa_ioc_add_rt_rule_ext_v2))) {
  662. IPAERR_RL("copy_from_user fails\n");
  663. retval = -EFAULT;
  664. goto free_param_kptr;
  665. }
  666. pre_entry =
  667. ((struct ipa_ioc_add_rt_rule_ext_v2 *)
  668. header)->num_rules;
  669. if (unlikely(((struct ipa_ioc_add_rt_rule_ext_v2 *)
  670. header)->rule_add_ext_size >
  671. sizeof(struct ipa_rt_rule_add_ext_i))) {
  672. IPAERR_RL("unexpected rule_add_size %d\n",
  673. ((struct ipa_ioc_add_rt_rule_ext_v2 *)
  674. header)->rule_add_ext_size);
  675. retval = -EPERM;
  676. goto free_param_kptr;
  677. }
  678. /* user payload size */
  679. usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_ext_v2 *)
  680. header)->rule_add_ext_size * pre_entry;
  681. /* actual payload structure size in kernel */
  682. pyld_sz = sizeof(struct ipa_rt_rule_add_ext_i)
  683. * pre_entry;
  684. uptr = ((struct ipa_ioc_add_rt_rule_ext_v2 *)
  685. header)->rules;
  686. if (unlikely(!uptr)) {
  687. IPAERR_RL("unexpected NULL rules\n");
  688. retval = -EPERM;
  689. goto free_param_kptr;
  690. }
  691. /* alloc param with same payload size as user payload */
  692. param = memdup_user((const void __user *)uptr,
  693. usr_pyld_sz);
  694. if (IS_ERR(param)) {
  695. retval = -EFAULT;
  696. goto free_param_kptr;
  697. }
  698. /* alloc kernel pointer with actual payload size */
  699. kptr = kzalloc(pyld_sz, GFP_KERNEL);
  700. if (!kptr) {
  701. retval = -ENOMEM;
  702. goto free_param_kptr;
  703. }
  704. for (i = 0; i < pre_entry; i++)
  705. memcpy(kptr + i *
  706. sizeof(struct ipa_rt_rule_add_ext_i),
  707. (void *)param + i *
  708. ((struct ipa_ioc_add_rt_rule_ext_v2 *)
  709. header)->rule_add_ext_size,
  710. ((struct ipa_ioc_add_rt_rule_ext_v2 *)
  711. header)->rule_add_ext_size);
  712. /* modify the rule pointer to the kernel pointer */
  713. ((struct ipa_ioc_add_rt_rule_ext_v2 *)header)->rules =
  714. (u64)kptr;
  715. if (ipa3_add_rt_rule_ext_v2(
  716. (struct ipa_ioc_add_rt_rule_ext_v2 *)header)) {
  717. IPAERR_RL("ipa3_add_rt_rule_ext_v2 fails\n");
  718. retval = -EPERM;
  719. goto free_param_kptr;
  720. }
  721. for (i = 0; i < pre_entry; i++)
  722. memcpy((void *)param + i *
  723. ((struct ipa_ioc_add_rt_rule_ext_v2 *)
  724. header)->rule_add_ext_size,
  725. kptr + i *
  726. sizeof(struct ipa_rt_rule_add_ext_i),
  727. ((struct ipa_ioc_add_rt_rule_ext_v2 *)
  728. header)->rule_add_ext_size);
  729. if (copy_to_user((void __user *)uptr, param,
  730. usr_pyld_sz)) {
  731. IPAERR_RL("copy_to_user fails\n");
  732. retval = -EFAULT;
  733. goto free_param_kptr;
  734. }
  735. free_param_kptr:
  736. if (!IS_ERR(param))
  737. kfree(param);
  738. kfree(kptr);
  739. return retval;
  740. }
  741. static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg)
  742. {
  743. int retval = 0;
  744. int i;
  745. u8 header[128] = { 0 };
  746. int pre_entry;
  747. u32 usr_pyld_sz;
  748. u32 pyld_sz;
  749. u64 uptr = 0;
  750. u8 *param = NULL;
  751. u8 *kptr = NULL;
  752. if (copy_from_user(header, (const void __user *)arg,
  753. sizeof(struct ipa_ioc_add_rt_rule_after_v2))) {
  754. IPAERR_RL("copy_from_user fails\n");
  755. retval = -EFAULT;
  756. goto free_param_kptr;
  757. }
  758. pre_entry =
  759. ((struct ipa_ioc_add_rt_rule_after_v2 *)
  760. header)->num_rules;
  761. if (unlikely(((struct ipa_ioc_add_rt_rule_after_v2 *)
  762. header)->rule_add_size >
  763. sizeof(struct ipa_rt_rule_add_i))) {
  764. IPAERR_RL("unexpected rule_add_size %d\n",
  765. ((struct ipa_ioc_add_rt_rule_after_v2 *)
  766. header)->rule_add_size);
  767. retval = -EPERM;
  768. goto free_param_kptr;
  769. }
  770. /* user payload size */
  771. usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_after_v2 *)
  772. header)->rule_add_size * pre_entry;
  773. /* actual payload structure size in kernel */
  774. pyld_sz = sizeof(struct ipa_rt_rule_add_i)
  775. * pre_entry;
  776. uptr = ((struct ipa_ioc_add_rt_rule_after_v2 *)
  777. header)->rules;
  778. if (unlikely(!uptr)) {
  779. IPAERR_RL("unexpected NULL rules\n");
  780. retval = -EPERM;
  781. goto free_param_kptr;
  782. }
  783. /* alloc param with same payload size as user payload */
  784. param = memdup_user((const void __user *)uptr,
  785. usr_pyld_sz);
  786. if (IS_ERR(param)) {
  787. retval = -EFAULT;
  788. goto free_param_kptr;
  789. }
  790. /* alloc kernel pointer with actual payload size */
  791. kptr = kzalloc(pyld_sz, GFP_KERNEL);
  792. if (!kptr) {
  793. retval = -ENOMEM;
  794. goto free_param_kptr;
  795. }
  796. for (i = 0; i < pre_entry; i++)
  797. memcpy(kptr + i * sizeof(struct ipa_rt_rule_add_i),
  798. (void *)param + i *
  799. ((struct ipa_ioc_add_rt_rule_after_v2 *)
  800. header)->rule_add_size,
  801. ((struct ipa_ioc_add_rt_rule_after_v2 *)
  802. header)->rule_add_size);
  803. /* modify the rule pointer to the kernel pointer */
  804. ((struct ipa_ioc_add_rt_rule_after_v2 *)header)->rules =
  805. (u64)kptr;
  806. if (ipa3_add_rt_rule_after_v2(
  807. (struct ipa_ioc_add_rt_rule_after_v2 *)header)) {
  808. IPAERR_RL("ipa3_add_rt_rule_after_v2 fails\n");
  809. retval = -EPERM;
  810. goto free_param_kptr;
  811. }
  812. for (i = 0; i < pre_entry; i++)
  813. memcpy((void *)param + i *
  814. ((struct ipa_ioc_add_rt_rule_after_v2 *)
  815. header)->rule_add_size,
  816. kptr + i * sizeof(struct ipa_rt_rule_add_i),
  817. ((struct ipa_ioc_add_rt_rule_after_v2 *)
  818. header)->rule_add_size);
  819. if (copy_to_user((void __user *)uptr, param,
  820. usr_pyld_sz)) {
  821. IPAERR_RL("copy_to_user fails\n");
  822. retval = -EFAULT;
  823. goto free_param_kptr;
  824. }
  825. free_param_kptr:
  826. if (!IS_ERR(param))
  827. kfree(param);
  828. kfree(kptr);
  829. return retval;
  830. }
  831. static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg)
  832. {
  833. int retval = 0;
  834. int i;
  835. u8 header[128] = { 0 };
  836. int pre_entry;
  837. u32 usr_pyld_sz;
  838. u32 pyld_sz;
  839. u64 uptr = 0;
  840. u8 *param = NULL;
  841. u8 *kptr = NULL;
  842. if (copy_from_user(header, (const void __user *)arg,
  843. sizeof(struct ipa_ioc_mdfy_rt_rule_v2))) {
  844. IPAERR_RL("copy_from_user fails\n");
  845. retval = -EFAULT;
  846. goto free_param_kptr;
  847. }
  848. pre_entry =
  849. ((struct ipa_ioc_mdfy_rt_rule_v2 *)
  850. header)->num_rules;
  851. if (unlikely(((struct ipa_ioc_mdfy_rt_rule_v2 *)
  852. header)->rule_mdfy_size >
  853. sizeof(struct ipa_rt_rule_mdfy_i))) {
  854. IPAERR_RL("unexpected rule_add_size %d\n",
  855. ((struct ipa_ioc_mdfy_rt_rule_v2 *)
  856. header)->rule_mdfy_size);
  857. retval = -EPERM;
  858. goto free_param_kptr;
  859. }
  860. /* user payload size */
  861. usr_pyld_sz = ((struct ipa_ioc_mdfy_rt_rule_v2 *)
  862. header)->rule_mdfy_size * pre_entry;
  863. /* actual payload structure size in kernel */
  864. pyld_sz = sizeof(struct ipa_rt_rule_mdfy_i)
  865. * pre_entry;
  866. uptr = ((struct ipa_ioc_mdfy_rt_rule_v2 *)
  867. header)->rules;
  868. if (unlikely(!uptr)) {
  869. IPAERR_RL("unexpected NULL rules\n");
  870. retval = -EPERM;
  871. goto free_param_kptr;
  872. }
  873. /* alloc param with same payload size as user payload */
  874. param = memdup_user((const void __user *)uptr,
  875. usr_pyld_sz);
  876. if (IS_ERR(param)) {
  877. retval = -EFAULT;
  878. goto free_param_kptr;
  879. }
  880. /* alloc kernel pointer with actual payload size */
  881. kptr = kzalloc(pyld_sz, GFP_KERNEL);
  882. if (!kptr) {
  883. retval = -ENOMEM;
  884. goto free_param_kptr;
  885. }
  886. for (i = 0; i < pre_entry; i++)
  887. memcpy(kptr + i * sizeof(struct ipa_rt_rule_mdfy_i),
  888. (void *)param + i *
  889. ((struct ipa_ioc_mdfy_rt_rule_v2 *)
  890. header)->rule_mdfy_size,
  891. ((struct ipa_ioc_mdfy_rt_rule_v2 *)
  892. header)->rule_mdfy_size);
  893. /* modify the rule pointer to the kernel pointer */
  894. ((struct ipa_ioc_mdfy_rt_rule_v2 *)header)->rules =
  895. (u64)kptr;
  896. if (ipa3_mdfy_rt_rule_v2((struct ipa_ioc_mdfy_rt_rule_v2 *)
  897. header)) {
  898. IPAERR_RL("ipa3_mdfy_rt_rule_v2 fails\n");
  899. retval = -EPERM;
  900. goto free_param_kptr;
  901. }
  902. for (i = 0; i < pre_entry; i++)
  903. memcpy((void *)param + i *
  904. ((struct ipa_ioc_mdfy_rt_rule_v2 *)
  905. header)->rule_mdfy_size,
  906. kptr + i * sizeof(struct ipa_rt_rule_mdfy_i),
  907. ((struct ipa_ioc_mdfy_rt_rule_v2 *)
  908. header)->rule_mdfy_size);
  909. if (copy_to_user((void __user *)uptr, param,
  910. usr_pyld_sz)) {
  911. IPAERR_RL("copy_to_user fails\n");
  912. retval = -EFAULT;
  913. goto free_param_kptr;
  914. }
  915. free_param_kptr:
  916. if (!IS_ERR(param))
  917. kfree(param);
  918. kfree(kptr);
  919. return retval;
  920. }
  921. static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg)
  922. {
  923. int retval = 0;
  924. int i;
  925. u8 header[128] = { 0 };
  926. int pre_entry;
  927. u32 usr_pyld_sz;
  928. u32 pyld_sz;
  929. u64 uptr = 0;
  930. u8 *param = NULL;
  931. u8 *kptr = NULL;
  932. if (copy_from_user(header, (const void __user *)arg,
  933. sizeof(struct ipa_ioc_add_flt_rule_v2))) {
  934. IPAERR_RL("copy_from_user fails\n");
  935. retval = -EFAULT;
  936. goto free_param_kptr;
  937. }
  938. pre_entry =
  939. ((struct ipa_ioc_add_flt_rule_v2 *)header)->num_rules;
  940. if (unlikely(((struct ipa_ioc_add_flt_rule_v2 *)
  941. header)->flt_rule_size >
  942. sizeof(struct ipa_flt_rule_add_i))) {
  943. IPAERR_RL("unexpected rule_add_size %d\n",
  944. ((struct ipa_ioc_add_flt_rule_v2 *)
  945. header)->flt_rule_size);
  946. retval = -EPERM;
  947. goto free_param_kptr;
  948. }
  949. /* user payload size */
  950. usr_pyld_sz = ((struct ipa_ioc_add_flt_rule_v2 *)
  951. header)->flt_rule_size * pre_entry;
  952. /* actual payload structure size in kernel */
  953. pyld_sz = sizeof(struct ipa_flt_rule_add_i)
  954. * pre_entry;
  955. uptr = ((struct ipa_ioc_add_flt_rule_v2 *)
  956. header)->rules;
  957. if (unlikely(!uptr)) {
  958. IPAERR_RL("unexpected NULL rules\n");
  959. retval = -EPERM;
  960. goto free_param_kptr;
  961. }
  962. /* alloc param with same payload size as user payload */
  963. param = memdup_user((const void __user *)uptr,
  964. usr_pyld_sz);
  965. if (IS_ERR(param)) {
  966. retval = -EFAULT;
  967. goto free_param_kptr;
  968. }
  969. /* alloc kernel pointer with actual payload size */
  970. kptr = kzalloc(pyld_sz, GFP_KERNEL);
  971. if (!kptr) {
  972. retval = -ENOMEM;
  973. goto free_param_kptr;
  974. }
  975. for (i = 0; i < pre_entry; i++)
  976. memcpy(kptr + i * sizeof(struct ipa_flt_rule_add_i),
  977. (void *)param + i *
  978. ((struct ipa_ioc_add_flt_rule_v2 *)
  979. header)->flt_rule_size,
  980. ((struct ipa_ioc_add_flt_rule_v2 *)
  981. header)->flt_rule_size);
  982. /* modify the rule pointer to the kernel pointer */
  983. ((struct ipa_ioc_add_flt_rule_v2 *)header)->rules =
  984. (u64)kptr;
  985. if (ipa3_add_flt_rule_usr_v2((struct ipa_ioc_add_flt_rule_v2 *)
  986. header, true)) {
  987. IPAERR_RL("ipa3_add_flt_rule_usr_v2 fails\n");
  988. retval = -EPERM;
  989. goto free_param_kptr;
  990. }
  991. for (i = 0; i < pre_entry; i++)
  992. memcpy((void *)param + i *
  993. ((struct ipa_ioc_add_flt_rule_v2 *)
  994. header)->flt_rule_size,
  995. kptr + i * sizeof(struct ipa_flt_rule_add_i),
  996. ((struct ipa_ioc_add_flt_rule_v2 *)
  997. header)->flt_rule_size);
  998. if (copy_to_user((void __user *)uptr, param,
  999. usr_pyld_sz)) {
  1000. IPAERR_RL("copy_to_user fails\n");
  1001. retval = -EFAULT;
  1002. goto free_param_kptr;
  1003. }
  1004. free_param_kptr:
  1005. if (!IS_ERR(param))
  1006. kfree(param);
  1007. kfree(kptr);
  1008. return retval;
  1009. }
  1010. static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg)
  1011. {
  1012. int retval = 0;
  1013. int i;
  1014. u8 header[128] = { 0 };
  1015. int pre_entry;
  1016. u32 usr_pyld_sz;
  1017. u32 pyld_sz;
  1018. u64 uptr = 0;
  1019. u8 *param = NULL;
  1020. u8 *kptr = NULL;
  1021. if (copy_from_user(header, (const void __user *)arg,
  1022. sizeof(struct ipa_ioc_add_flt_rule_after_v2))) {
  1023. IPAERR_RL("copy_from_user fails\n");
  1024. retval = -EFAULT;
  1025. goto free_param_kptr;
  1026. }
  1027. pre_entry =
  1028. ((struct ipa_ioc_add_flt_rule_after_v2 *)
  1029. header)->num_rules;
  1030. if (unlikely(((struct ipa_ioc_add_flt_rule_after_v2 *)
  1031. header)->flt_rule_size >
  1032. sizeof(struct ipa_flt_rule_add_i))) {
  1033. IPAERR_RL("unexpected rule_add_size %d\n",
  1034. ((struct ipa_ioc_add_flt_rule_after_v2 *)
  1035. header)->flt_rule_size);
  1036. retval = -EPERM;
  1037. goto free_param_kptr;
  1038. }
  1039. /* user payload size */
  1040. usr_pyld_sz = ((struct ipa_ioc_add_flt_rule_after_v2 *)
  1041. header)->flt_rule_size * pre_entry;
  1042. /* actual payload structure size in kernel */
  1043. pyld_sz = sizeof(struct ipa_flt_rule_add_i)
  1044. * pre_entry;
  1045. uptr = ((struct ipa_ioc_add_flt_rule_after_v2 *)
  1046. header)->rules;
  1047. if (unlikely(!uptr)) {
  1048. IPAERR_RL("unexpected NULL rules\n");
  1049. retval = -EPERM;
  1050. goto free_param_kptr;
  1051. }
  1052. /* alloc param with same payload size as user payload */
  1053. param = memdup_user((const void __user *)uptr,
  1054. usr_pyld_sz);
  1055. if (IS_ERR(param)) {
  1056. retval = -EFAULT;
  1057. goto free_param_kptr;
  1058. }
  1059. /* alloc kernel pointer with actual payload size */
  1060. kptr = kzalloc(pyld_sz, GFP_KERNEL);
  1061. if (!kptr) {
  1062. retval = -ENOMEM;
  1063. goto free_param_kptr;
  1064. }
  1065. for (i = 0; i < pre_entry; i++)
  1066. memcpy(kptr + i * sizeof(struct ipa_flt_rule_add_i),
  1067. (void *)param + i *
  1068. ((struct ipa_ioc_add_flt_rule_after_v2 *)
  1069. header)->flt_rule_size,
  1070. ((struct ipa_ioc_add_flt_rule_after_v2 *)
  1071. header)->flt_rule_size);
  1072. /* modify the rule pointer to the kernel pointer */
  1073. ((struct ipa_ioc_add_flt_rule_after_v2 *)header)->rules =
  1074. (u64)kptr;
  1075. if (ipa3_add_flt_rule_after_v2(
  1076. (struct ipa_ioc_add_flt_rule_after_v2 *)header)) {
  1077. IPAERR_RL("ipa3_add_flt_rule_after_v2 fails\n");
  1078. retval = -EPERM;
  1079. goto free_param_kptr;
  1080. }
  1081. for (i = 0; i < pre_entry; i++)
  1082. memcpy((void *)param + i *
  1083. ((struct ipa_ioc_add_flt_rule_after_v2 *)
  1084. header)->flt_rule_size,
  1085. kptr + i * sizeof(struct ipa_flt_rule_add_i),
  1086. ((struct ipa_ioc_add_flt_rule_after_v2 *)
  1087. header)->flt_rule_size);
  1088. if (copy_to_user((void __user *)uptr, param,
  1089. usr_pyld_sz)) {
  1090. IPAERR_RL("copy_to_user fails\n");
  1091. retval = -EFAULT;
  1092. goto free_param_kptr;
  1093. }
  1094. free_param_kptr:
  1095. if (!IS_ERR(param))
  1096. kfree(param);
  1097. kfree(kptr);
  1098. return retval;
  1099. }
  1100. static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg)
  1101. {
  1102. int retval = 0;
  1103. int i;
  1104. u8 header[128] = { 0 };
  1105. int pre_entry;
  1106. u32 usr_pyld_sz;
  1107. u32 pyld_sz;
  1108. u64 uptr = 0;
  1109. u8 *param = NULL;
  1110. u8 *kptr = NULL;
  1111. if (copy_from_user(header, (const void __user *)arg,
  1112. sizeof(struct ipa_ioc_mdfy_flt_rule_v2))) {
  1113. IPAERR_RL("copy_from_user fails\n");
  1114. retval = -EFAULT;
  1115. goto free_param_kptr;
  1116. }
  1117. pre_entry =
  1118. ((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1119. header)->num_rules;
  1120. if (unlikely(((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1121. header)->rule_mdfy_size >
  1122. sizeof(struct ipa_flt_rule_mdfy_i))) {
  1123. IPAERR_RL("unexpected rule_add_size %d\n",
  1124. ((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1125. header)->rule_mdfy_size);
  1126. retval = -EPERM;
  1127. goto free_param_kptr;
  1128. }
  1129. /* user payload size */
  1130. usr_pyld_sz = ((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1131. header)->rule_mdfy_size * pre_entry;
  1132. /* actual payload structure size in kernel */
  1133. pyld_sz = sizeof(struct ipa_flt_rule_mdfy_i)
  1134. * pre_entry;
  1135. uptr = ((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1136. header)->rules;
  1137. if (unlikely(!uptr)) {
  1138. IPAERR_RL("unexpected NULL rules\n");
  1139. retval = -EPERM;
  1140. goto free_param_kptr;
  1141. }
  1142. /* alloc param with same payload size as user payload */
  1143. param = memdup_user((const void __user *)uptr,
  1144. usr_pyld_sz);
  1145. if (IS_ERR(param)) {
  1146. retval = -EFAULT;
  1147. goto free_param_kptr;
  1148. }
  1149. /* alloc kernel pointer with actual payload size */
  1150. kptr = kzalloc(pyld_sz, GFP_KERNEL);
  1151. if (!kptr) {
  1152. retval = -ENOMEM;
  1153. goto free_param_kptr;
  1154. }
  1155. for (i = 0; i < pre_entry; i++)
  1156. memcpy(kptr + i * sizeof(struct ipa_flt_rule_mdfy_i),
  1157. (void *)param + i *
  1158. ((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1159. header)->rule_mdfy_size,
  1160. ((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1161. header)->rule_mdfy_size);
  1162. /* modify the rule pointer to the kernel pointer */
  1163. ((struct ipa_ioc_add_flt_rule_after_v2 *)header)->rules =
  1164. (u64)kptr;
  1165. if (ipa3_mdfy_flt_rule_v2
  1166. ((struct ipa_ioc_mdfy_flt_rule_v2 *)header)) {
  1167. IPAERR_RL("ipa3_mdfy_flt_rule_v2 fails\n");
  1168. retval = -EPERM;
  1169. goto free_param_kptr;
  1170. }
  1171. for (i = 0; i < pre_entry; i++)
  1172. memcpy((void *)param + i *
  1173. ((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1174. header)->rule_mdfy_size,
  1175. kptr + i * sizeof(struct ipa_flt_rule_mdfy_i),
  1176. ((struct ipa_ioc_mdfy_flt_rule_v2 *)
  1177. header)->rule_mdfy_size);
  1178. if (copy_to_user((void __user *)uptr, param,
  1179. usr_pyld_sz)) {
  1180. IPAERR_RL("copy_to_user fails\n");
  1181. retval = -EFAULT;
  1182. goto free_param_kptr;
  1183. }
  1184. free_param_kptr:
  1185. if (!IS_ERR(param))
  1186. kfree(param);
  1187. kfree(kptr);
  1188. return retval;
  1189. }
  1190. static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg)
  1191. {
  1192. int retval = 0;
  1193. u8 header[128] = { 0 };
  1194. if (copy_from_user(header, (const void __user *)arg,
  1195. sizeof(struct ipa_ioc_flt_rt_counter_alloc))) {
  1196. IPAERR("copy_from_user fails\n");
  1197. return -EFAULT;
  1198. }
  1199. if (((struct ipa_ioc_flt_rt_counter_alloc *)
  1200. header)->hw_counter.num_counters >
  1201. IPA_FLT_RT_HW_COUNTER ||
  1202. ((struct ipa_ioc_flt_rt_counter_alloc *)
  1203. header)->sw_counter.num_counters >
  1204. IPA_FLT_RT_SW_COUNTER) {
  1205. IPAERR("failed: wrong sw/hw num_counters\n");
  1206. return -EPERM;
  1207. }
  1208. if (((struct ipa_ioc_flt_rt_counter_alloc *)
  1209. header)->hw_counter.num_counters == 0 &&
  1210. ((struct ipa_ioc_flt_rt_counter_alloc *)
  1211. header)->sw_counter.num_counters == 0) {
  1212. IPAERR("failed: both sw/hw num_counters 0\n");
  1213. return -EPERM;
  1214. }
  1215. retval = ipa3_alloc_counter_id
  1216. ((struct ipa_ioc_flt_rt_counter_alloc *)header);
  1217. if (retval < 0) {
  1218. IPAERR("ipa3_alloc_counter_id failed\n");
  1219. return retval;
  1220. }
  1221. if (copy_to_user((void __user *)arg, header,
  1222. sizeof(struct ipa_ioc_flt_rt_counter_alloc))) {
  1223. IPAERR("copy_to_user fails\n");
  1224. ipa3_counter_remove_hdl(
  1225. ((struct ipa_ioc_flt_rt_counter_alloc *)
  1226. header)->hdl);
  1227. return -EFAULT;
  1228. }
  1229. return 0;
  1230. }
  1231. static int ipa3_ioctl_fnr_counter_query(unsigned long arg)
  1232. {
  1233. int retval = 0;
  1234. int i;
  1235. u8 header[128] = { 0 };
  1236. int pre_entry;
  1237. u32 usr_pyld_sz;
  1238. u32 pyld_sz;
  1239. u64 uptr = 0;
  1240. u8 *param = NULL;
  1241. u8 *kptr = NULL;
  1242. if (copy_from_user(header, (const void __user *)arg,
  1243. sizeof(struct ipa_ioc_flt_rt_query))) {
  1244. IPAERR_RL("copy_from_user fails\n");
  1245. retval = -EFAULT;
  1246. goto free_param_kptr;
  1247. }
  1248. pre_entry =
  1249. ((struct ipa_ioc_flt_rt_query *)
  1250. header)->end_id - ((struct ipa_ioc_flt_rt_query *)
  1251. header)->start_id + 1;
  1252. if (pre_entry <= 0 || pre_entry > IPA_MAX_FLT_RT_CNT_INDEX) {
  1253. IPAERR("IPA_IOC_FNR_COUNTER_QUERY failed: num %d\n",
  1254. pre_entry);
  1255. retval = -EPERM;
  1256. goto free_param_kptr;
  1257. }
  1258. if (((struct ipa_ioc_flt_rt_query *)header)->stats_size
  1259. > sizeof(struct ipa_flt_rt_stats)) {
  1260. IPAERR_RL("unexpected stats_size %d\n",
  1261. ((struct ipa_ioc_flt_rt_query *)header)->stats_size);
  1262. retval = -EPERM;
  1263. goto free_param_kptr;
  1264. }
  1265. /* user payload size */
  1266. usr_pyld_sz = ((struct ipa_ioc_flt_rt_query *)
  1267. header)->stats_size * pre_entry;
  1268. /* actual payload structure size in kernel */
  1269. pyld_sz = sizeof(struct ipa_flt_rt_stats) * pre_entry;
  1270. uptr = ((struct ipa_ioc_flt_rt_query *)
  1271. header)->stats;
  1272. if (unlikely(!uptr)) {
  1273. IPAERR_RL("unexpected NULL rules\n");
  1274. retval = -EPERM;
  1275. goto free_param_kptr;
  1276. }
  1277. /* alloc param with same payload size as user payload */
  1278. param = memdup_user((const void __user *)uptr,
  1279. usr_pyld_sz);
  1280. if (IS_ERR(param)) {
  1281. retval = -EFAULT;
  1282. goto free_param_kptr;
  1283. }
  1284. /* alloc kernel pointer with actual payload size */
  1285. kptr = kzalloc(pyld_sz, GFP_KERNEL);
  1286. if (!kptr) {
  1287. retval = -ENOMEM;
  1288. goto free_param_kptr;
  1289. }
  1290. for (i = 0; i < pre_entry; i++)
  1291. memcpy(kptr + i * sizeof(struct ipa_flt_rt_stats),
  1292. (void *)param + i *
  1293. ((struct ipa_ioc_flt_rt_query *)
  1294. header)->stats_size,
  1295. ((struct ipa_ioc_flt_rt_query *)
  1296. header)->stats_size);
  1297. /* modify the rule pointer to the kernel pointer */
  1298. ((struct ipa_ioc_flt_rt_query *)
  1299. header)->stats = (u64)kptr;
  1300. retval = ipa_get_flt_rt_stats
  1301. ((struct ipa_ioc_flt_rt_query *)header);
  1302. if (retval < 0) {
  1303. IPAERR("ipa_get_flt_rt_stats failed\n");
  1304. retval = -EPERM;
  1305. goto free_param_kptr;
  1306. }
  1307. for (i = 0; i < pre_entry; i++)
  1308. memcpy((void *)param + i *
  1309. ((struct ipa_ioc_flt_rt_query *)
  1310. header)->stats_size,
  1311. kptr + i * sizeof(struct ipa_flt_rt_stats),
  1312. ((struct ipa_ioc_flt_rt_query *)
  1313. header)->stats_size);
  1314. if (copy_to_user((void __user *)uptr, param,
  1315. usr_pyld_sz)) {
  1316. IPAERR_RL("copy_to_user fails\n");
  1317. retval = -EFAULT;
  1318. goto free_param_kptr;
  1319. }
  1320. free_param_kptr:
  1321. if (!IS_ERR(param))
  1322. kfree(param);
  1323. kfree(kptr);
  1324. return retval;
  1325. }
  1326. static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1327. {
  1328. int retval = 0;
  1329. u32 pyld_sz;
  1330. u8 header[128] = { 0 };
  1331. u8 *param = NULL;
  1332. bool is_vlan_mode;
  1333. struct ipa_ioc_nat_alloc_mem nat_mem;
  1334. struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
  1335. struct ipa_ioc_v4_nat_init nat_init;
  1336. struct ipa_ioc_ipv6ct_init ipv6ct_init;
  1337. struct ipa_ioc_v4_nat_del nat_del;
  1338. struct ipa_ioc_nat_ipv6ct_table_del table_del;
  1339. struct ipa_ioc_nat_pdn_entry mdfy_pdn;
  1340. struct ipa_ioc_nat_dma_cmd *table_dma_cmd;
  1341. struct ipa_ioc_get_vlan_mode vlan_mode;
  1342. struct ipa_ioc_wigig_fst_switch fst_switch;
  1343. size_t sz;
  1344. int pre_entry;
  1345. int hdl;
  1346. IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
  1347. if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
  1348. return -ENOTTY;
  1349. if (!ipa3_is_ready()) {
  1350. IPAERR("IPA not ready, waiting for init completion\n");
  1351. wait_for_completion(&ipa3_ctx->init_completion_obj);
  1352. }
  1353. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1354. switch (cmd) {
  1355. case IPA_IOC_ALLOC_NAT_MEM:
  1356. if (copy_from_user(&nat_mem, (const void __user *)arg,
  1357. sizeof(struct ipa_ioc_nat_alloc_mem))) {
  1358. retval = -EFAULT;
  1359. break;
  1360. }
  1361. /* null terminate the string */
  1362. nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
  1363. if (ipa3_allocate_nat_device(&nat_mem)) {
  1364. retval = -EFAULT;
  1365. break;
  1366. }
  1367. if (copy_to_user((void __user *)arg, &nat_mem,
  1368. sizeof(struct ipa_ioc_nat_alloc_mem))) {
  1369. retval = -EFAULT;
  1370. break;
  1371. }
  1372. break;
  1373. case IPA_IOC_ALLOC_NAT_TABLE:
  1374. if (copy_from_user(&table_alloc, (const void __user *)arg,
  1375. sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
  1376. retval = -EFAULT;
  1377. break;
  1378. }
  1379. if (ipa3_allocate_nat_table(&table_alloc)) {
  1380. retval = -EFAULT;
  1381. break;
  1382. }
  1383. if (table_alloc.offset &&
  1384. copy_to_user((void __user *)arg, &table_alloc, sizeof(
  1385. struct ipa_ioc_nat_ipv6ct_table_alloc))) {
  1386. retval = -EFAULT;
  1387. break;
  1388. }
  1389. break;
  1390. case IPA_IOC_ALLOC_IPV6CT_TABLE:
  1391. if (copy_from_user(&table_alloc, (const void __user *)arg,
  1392. sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
  1393. retval = -EFAULT;
  1394. break;
  1395. }
  1396. if (ipa3_allocate_ipv6ct_table(&table_alloc)) {
  1397. retval = -EFAULT;
  1398. break;
  1399. }
  1400. if (table_alloc.offset &&
  1401. copy_to_user((void __user *)arg, &table_alloc, sizeof(
  1402. struct ipa_ioc_nat_ipv6ct_table_alloc))) {
  1403. retval = -EFAULT;
  1404. break;
  1405. }
  1406. break;
  1407. case IPA_IOC_V4_INIT_NAT:
  1408. if (copy_from_user(&nat_init, (const void __user *)arg,
  1409. sizeof(struct ipa_ioc_v4_nat_init))) {
  1410. retval = -EFAULT;
  1411. break;
  1412. }
  1413. if (ipa3_nat_init_cmd(&nat_init)) {
  1414. retval = -EFAULT;
  1415. break;
  1416. }
  1417. break;
  1418. case IPA_IOC_INIT_IPV6CT_TABLE:
  1419. if (copy_from_user(&ipv6ct_init, (const void __user *)arg,
  1420. sizeof(struct ipa_ioc_ipv6ct_init))) {
  1421. retval = -EFAULT;
  1422. break;
  1423. }
  1424. if (ipa3_ipv6ct_init_cmd(&ipv6ct_init)) {
  1425. retval = -EFAULT;
  1426. break;
  1427. }
  1428. break;
  1429. case IPA_IOC_TABLE_DMA_CMD:
  1430. table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)header;
  1431. if (copy_from_user(header, (const void __user *)arg,
  1432. sizeof(struct ipa_ioc_nat_dma_cmd))) {
  1433. retval = -EFAULT;
  1434. break;
  1435. }
  1436. pre_entry = table_dma_cmd->entries;
  1437. pyld_sz = sizeof(struct ipa_ioc_nat_dma_cmd) +
  1438. pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
  1439. param = memdup_user((const void __user *)arg, pyld_sz);
  1440. if (IS_ERR(param)) {
  1441. retval = PTR_ERR(param);
  1442. break;
  1443. }
  1444. table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)param;
  1445. /* add check in case user-space module compromised */
  1446. if (unlikely(table_dma_cmd->entries != pre_entry)) {
  1447. IPAERR_RL("current %d pre %d\n",
  1448. table_dma_cmd->entries, pre_entry);
  1449. retval = -EFAULT;
  1450. break;
  1451. }
  1452. if (ipa3_table_dma_cmd(table_dma_cmd)) {
  1453. retval = -EFAULT;
  1454. break;
  1455. }
  1456. break;
  1457. case IPA_IOC_V4_DEL_NAT:
  1458. if (copy_from_user(&nat_del, (const void __user *)arg,
  1459. sizeof(struct ipa_ioc_v4_nat_del))) {
  1460. retval = -EFAULT;
  1461. break;
  1462. }
  1463. if (ipa3_nat_del_cmd(&nat_del)) {
  1464. retval = -EFAULT;
  1465. break;
  1466. }
  1467. break;
  1468. case IPA_IOC_DEL_NAT_TABLE:
  1469. if (copy_from_user(&table_del, (const void __user *)arg,
  1470. sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
  1471. retval = -EFAULT;
  1472. break;
  1473. }
  1474. if (ipa3_del_nat_table(&table_del)) {
  1475. retval = -EFAULT;
  1476. break;
  1477. }
  1478. break;
  1479. case IPA_IOC_DEL_IPV6CT_TABLE:
  1480. if (copy_from_user(&table_del, (const void __user *)arg,
  1481. sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
  1482. retval = -EFAULT;
  1483. break;
  1484. }
  1485. if (ipa3_del_ipv6ct_table(&table_del)) {
  1486. retval = -EFAULT;
  1487. break;
  1488. }
  1489. break;
  1490. case IPA_IOC_NAT_MODIFY_PDN:
  1491. if (copy_from_user(&mdfy_pdn, (const void __user *)arg,
  1492. sizeof(struct ipa_ioc_nat_pdn_entry))) {
  1493. retval = -EFAULT;
  1494. break;
  1495. }
  1496. if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) {
  1497. retval = -EFAULT;
  1498. break;
  1499. }
  1500. break;
  1501. case IPA_IOC_ADD_HDR:
  1502. if (copy_from_user(header, (const void __user *)arg,
  1503. sizeof(struct ipa_ioc_add_hdr))) {
  1504. retval = -EFAULT;
  1505. break;
  1506. }
  1507. pre_entry =
  1508. ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
  1509. pyld_sz =
  1510. sizeof(struct ipa_ioc_add_hdr) +
  1511. pre_entry * sizeof(struct ipa_hdr_add);
  1512. param = memdup_user((const void __user *)arg, pyld_sz);
  1513. if (IS_ERR(param)) {
  1514. retval = PTR_ERR(param);
  1515. break;
  1516. }
  1517. /* add check in case user-space module compromised */
  1518. if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
  1519. != pre_entry)) {
  1520. IPAERR_RL("current %d pre %d\n",
  1521. ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
  1522. pre_entry);
  1523. retval = -EFAULT;
  1524. break;
  1525. }
  1526. if (ipa3_add_hdr_usr((struct ipa_ioc_add_hdr *)param,
  1527. true)) {
  1528. retval = -EFAULT;
  1529. break;
  1530. }
  1531. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1532. retval = -EFAULT;
  1533. break;
  1534. }
  1535. break;
  1536. case IPA_IOC_DEL_HDR:
  1537. if (copy_from_user(header, (const void __user *)arg,
  1538. sizeof(struct ipa_ioc_del_hdr))) {
  1539. retval = -EFAULT;
  1540. break;
  1541. }
  1542. pre_entry =
  1543. ((struct ipa_ioc_del_hdr *)header)->num_hdls;
  1544. pyld_sz =
  1545. sizeof(struct ipa_ioc_del_hdr) +
  1546. pre_entry * sizeof(struct ipa_hdr_del);
  1547. param = memdup_user((const void __user *)arg, pyld_sz);
  1548. if (IS_ERR(param)) {
  1549. retval = PTR_ERR(param);
  1550. break;
  1551. }
  1552. /* add check in case user-space module compromised */
  1553. if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
  1554. != pre_entry)) {
  1555. IPAERR_RL("current %d pre %d\n",
  1556. ((struct ipa_ioc_del_hdr *)param)->num_hdls,
  1557. pre_entry);
  1558. retval = -EFAULT;
  1559. break;
  1560. }
  1561. if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
  1562. true)) {
  1563. retval = -EFAULT;
  1564. break;
  1565. }
  1566. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1567. retval = -EFAULT;
  1568. break;
  1569. }
  1570. break;
  1571. case IPA_IOC_ADD_RT_RULE:
  1572. if (copy_from_user(header, (const void __user *)arg,
  1573. sizeof(struct ipa_ioc_add_rt_rule))) {
  1574. retval = -EFAULT;
  1575. break;
  1576. }
  1577. pre_entry =
  1578. ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
  1579. pyld_sz =
  1580. sizeof(struct ipa_ioc_add_rt_rule) +
  1581. pre_entry * sizeof(struct ipa_rt_rule_add);
  1582. param = memdup_user((const void __user *)arg, pyld_sz);
  1583. if (IS_ERR(param)) {
  1584. retval = PTR_ERR(param);
  1585. break;
  1586. }
  1587. /* add check in case user-space module compromised */
  1588. if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
  1589. != pre_entry)) {
  1590. IPAERR_RL("current %d pre %d\n",
  1591. ((struct ipa_ioc_add_rt_rule *)param)->
  1592. num_rules,
  1593. pre_entry);
  1594. retval = -EFAULT;
  1595. break;
  1596. }
  1597. if (ipa3_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param,
  1598. true)) {
  1599. retval = -EFAULT;
  1600. break;
  1601. }
  1602. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1603. retval = -EFAULT;
  1604. break;
  1605. }
  1606. break;
  1607. case IPA_IOC_ADD_RT_RULE_EXT:
  1608. if (copy_from_user(header,
  1609. (const void __user *)arg,
  1610. sizeof(struct ipa_ioc_add_rt_rule_ext))) {
  1611. retval = -EFAULT;
  1612. break;
  1613. }
  1614. pre_entry =
  1615. ((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
  1616. pyld_sz =
  1617. sizeof(struct ipa_ioc_add_rt_rule_ext) +
  1618. pre_entry * sizeof(struct ipa_rt_rule_add_ext);
  1619. param = memdup_user((const void __user *)arg, pyld_sz);
  1620. if (IS_ERR(param)) {
  1621. retval = PTR_ERR(param);
  1622. break;
  1623. }
  1624. /* add check in case user-space module compromised */
  1625. if (unlikely(
  1626. ((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
  1627. != pre_entry)) {
  1628. IPAERR(" prevent memory corruption(%d not match %d)\n",
  1629. ((struct ipa_ioc_add_rt_rule_ext *)param)->
  1630. num_rules,
  1631. pre_entry);
  1632. retval = -EINVAL;
  1633. break;
  1634. }
  1635. if (ipa3_add_rt_rule_ext(
  1636. (struct ipa_ioc_add_rt_rule_ext *)param)) {
  1637. retval = -EFAULT;
  1638. break;
  1639. }
  1640. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1641. retval = -EFAULT;
  1642. break;
  1643. }
  1644. break;
  1645. case IPA_IOC_ADD_RT_RULE_AFTER:
  1646. if (copy_from_user(header, (const void __user *)arg,
  1647. sizeof(struct ipa_ioc_add_rt_rule_after))) {
  1648. retval = -EFAULT;
  1649. break;
  1650. }
  1651. pre_entry =
  1652. ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
  1653. pyld_sz =
  1654. sizeof(struct ipa_ioc_add_rt_rule_after) +
  1655. pre_entry * sizeof(struct ipa_rt_rule_add);
  1656. param = memdup_user((const void __user *)arg, pyld_sz);
  1657. if (IS_ERR(param)) {
  1658. retval = PTR_ERR(param);
  1659. break;
  1660. }
  1661. /* add check in case user-space module compromised */
  1662. if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
  1663. num_rules != pre_entry)) {
  1664. IPAERR_RL("current %d pre %d\n",
  1665. ((struct ipa_ioc_add_rt_rule_after *)param)->
  1666. num_rules,
  1667. pre_entry);
  1668. retval = -EFAULT;
  1669. break;
  1670. }
  1671. if (ipa3_add_rt_rule_after(
  1672. (struct ipa_ioc_add_rt_rule_after *)param)) {
  1673. retval = -EFAULT;
  1674. break;
  1675. }
  1676. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1677. retval = -EFAULT;
  1678. break;
  1679. }
  1680. break;
  1681. case IPA_IOC_MDFY_RT_RULE:
  1682. if (copy_from_user(header, (const void __user *)arg,
  1683. sizeof(struct ipa_ioc_mdfy_rt_rule))) {
  1684. retval = -EFAULT;
  1685. break;
  1686. }
  1687. pre_entry =
  1688. ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
  1689. pyld_sz =
  1690. sizeof(struct ipa_ioc_mdfy_rt_rule) +
  1691. pre_entry * sizeof(struct ipa_rt_rule_mdfy);
  1692. param = memdup_user((const void __user *)arg, pyld_sz);
  1693. if (IS_ERR(param)) {
  1694. retval = PTR_ERR(param);
  1695. break;
  1696. }
  1697. /* add check in case user-space module compromised */
  1698. if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
  1699. != pre_entry)) {
  1700. IPAERR_RL("current %d pre %d\n",
  1701. ((struct ipa_ioc_mdfy_rt_rule *)param)->
  1702. num_rules,
  1703. pre_entry);
  1704. retval = -EFAULT;
  1705. break;
  1706. }
  1707. if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
  1708. retval = -EFAULT;
  1709. break;
  1710. }
  1711. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1712. retval = -EFAULT;
  1713. break;
  1714. }
  1715. break;
  1716. case IPA_IOC_DEL_RT_RULE:
  1717. if (copy_from_user(header, (const void __user *)arg,
  1718. sizeof(struct ipa_ioc_del_rt_rule))) {
  1719. retval = -EFAULT;
  1720. break;
  1721. }
  1722. pre_entry =
  1723. ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
  1724. pyld_sz =
  1725. sizeof(struct ipa_ioc_del_rt_rule) +
  1726. pre_entry * sizeof(struct ipa_rt_rule_del);
  1727. param = memdup_user((const void __user *)arg, pyld_sz);
  1728. if (IS_ERR(param)) {
  1729. retval = PTR_ERR(param);
  1730. break;
  1731. }
  1732. /* add check in case user-space module compromised */
  1733. if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
  1734. != pre_entry)) {
  1735. IPAERR_RL("current %d pre %d\n",
  1736. ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
  1737. pre_entry);
  1738. retval = -EFAULT;
  1739. break;
  1740. }
  1741. if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
  1742. retval = -EFAULT;
  1743. break;
  1744. }
  1745. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1746. retval = -EFAULT;
  1747. break;
  1748. }
  1749. break;
  1750. case IPA_IOC_ADD_FLT_RULE:
  1751. if (copy_from_user(header, (const void __user *)arg,
  1752. sizeof(struct ipa_ioc_add_flt_rule))) {
  1753. retval = -EFAULT;
  1754. break;
  1755. }
  1756. pre_entry =
  1757. ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
  1758. pyld_sz =
  1759. sizeof(struct ipa_ioc_add_flt_rule) +
  1760. pre_entry * sizeof(struct ipa_flt_rule_add);
  1761. param = memdup_user((const void __user *)arg, pyld_sz);
  1762. if (IS_ERR(param)) {
  1763. retval = PTR_ERR(param);
  1764. break;
  1765. }
  1766. /* add check in case user-space module compromised */
  1767. if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
  1768. != pre_entry)) {
  1769. IPAERR_RL("current %d pre %d\n",
  1770. ((struct ipa_ioc_add_flt_rule *)param)->
  1771. num_rules,
  1772. pre_entry);
  1773. retval = -EFAULT;
  1774. break;
  1775. }
  1776. if (ipa3_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param,
  1777. true)) {
  1778. retval = -EFAULT;
  1779. break;
  1780. }
  1781. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1782. retval = -EFAULT;
  1783. break;
  1784. }
  1785. break;
  1786. case IPA_IOC_ADD_FLT_RULE_AFTER:
  1787. if (copy_from_user(header, (const void __user *)arg,
  1788. sizeof(struct ipa_ioc_add_flt_rule_after))) {
  1789. retval = -EFAULT;
  1790. break;
  1791. }
  1792. pre_entry =
  1793. ((struct ipa_ioc_add_flt_rule_after *)header)->
  1794. num_rules;
  1795. pyld_sz =
  1796. sizeof(struct ipa_ioc_add_flt_rule_after) +
  1797. pre_entry * sizeof(struct ipa_flt_rule_add);
  1798. param = memdup_user((const void __user *)arg, pyld_sz);
  1799. if (IS_ERR(param)) {
  1800. retval = PTR_ERR(param);
  1801. break;
  1802. }
  1803. /* add check in case user-space module compromised */
  1804. if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
  1805. num_rules != pre_entry)) {
  1806. IPAERR_RL("current %d pre %d\n",
  1807. ((struct ipa_ioc_add_flt_rule_after *)param)->
  1808. num_rules,
  1809. pre_entry);
  1810. retval = -EFAULT;
  1811. break;
  1812. }
  1813. if (ipa3_add_flt_rule_after(
  1814. (struct ipa_ioc_add_flt_rule_after *)param)) {
  1815. retval = -EFAULT;
  1816. break;
  1817. }
  1818. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1819. retval = -EFAULT;
  1820. break;
  1821. }
  1822. break;
  1823. case IPA_IOC_DEL_FLT_RULE:
  1824. if (copy_from_user(header, (const void __user *)arg,
  1825. sizeof(struct ipa_ioc_del_flt_rule))) {
  1826. retval = -EFAULT;
  1827. break;
  1828. }
  1829. pre_entry =
  1830. ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
  1831. pyld_sz =
  1832. sizeof(struct ipa_ioc_del_flt_rule) +
  1833. pre_entry * sizeof(struct ipa_flt_rule_del);
  1834. param = memdup_user((const void __user *)arg, pyld_sz);
  1835. if (IS_ERR(param)) {
  1836. retval = PTR_ERR(param);
  1837. break;
  1838. }
  1839. /* add check in case user-space module compromised */
  1840. if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
  1841. != pre_entry)) {
  1842. IPAERR_RL("current %d pre %d\n",
  1843. ((struct ipa_ioc_del_flt_rule *)param)->
  1844. num_hdls,
  1845. pre_entry);
  1846. retval = -EFAULT;
  1847. break;
  1848. }
  1849. if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
  1850. retval = -EFAULT;
  1851. break;
  1852. }
  1853. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1854. retval = -EFAULT;
  1855. break;
  1856. }
  1857. break;
  1858. case IPA_IOC_MDFY_FLT_RULE:
  1859. if (copy_from_user(header, (const void __user *)arg,
  1860. sizeof(struct ipa_ioc_mdfy_flt_rule))) {
  1861. retval = -EFAULT;
  1862. break;
  1863. }
  1864. pre_entry =
  1865. ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
  1866. pyld_sz =
  1867. sizeof(struct ipa_ioc_mdfy_flt_rule) +
  1868. pre_entry * sizeof(struct ipa_flt_rule_mdfy);
  1869. param = memdup_user((const void __user *)arg, pyld_sz);
  1870. if (IS_ERR(param)) {
  1871. retval = PTR_ERR(param);
  1872. break;
  1873. }
  1874. /* add check in case user-space module compromised */
  1875. if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
  1876. != pre_entry)) {
  1877. IPAERR_RL("current %d pre %d\n",
  1878. ((struct ipa_ioc_mdfy_flt_rule *)param)->
  1879. num_rules,
  1880. pre_entry);
  1881. retval = -EFAULT;
  1882. break;
  1883. }
  1884. if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
  1885. retval = -EFAULT;
  1886. break;
  1887. }
  1888. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  1889. retval = -EFAULT;
  1890. break;
  1891. }
  1892. break;
  1893. case IPA_IOC_COMMIT_HDR:
  1894. retval = ipa3_commit_hdr();
  1895. break;
  1896. case IPA_IOC_RESET_HDR:
  1897. retval = ipa3_reset_hdr(false);
  1898. break;
  1899. case IPA_IOC_COMMIT_RT:
  1900. retval = ipa3_commit_rt(arg);
  1901. break;
  1902. case IPA_IOC_RESET_RT:
  1903. retval = ipa3_reset_rt(arg, false);
  1904. break;
  1905. case IPA_IOC_COMMIT_FLT:
  1906. retval = ipa3_commit_flt(arg);
  1907. break;
  1908. case IPA_IOC_RESET_FLT:
  1909. retval = ipa3_reset_flt(arg, false);
  1910. break;
  1911. case IPA_IOC_GET_RT_TBL:
  1912. if (copy_from_user(header, (const void __user *)arg,
  1913. sizeof(struct ipa_ioc_get_rt_tbl))) {
  1914. retval = -EFAULT;
  1915. break;
  1916. }
  1917. if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
  1918. retval = -EFAULT;
  1919. break;
  1920. }
  1921. if (copy_to_user((void __user *)arg, header,
  1922. sizeof(struct ipa_ioc_get_rt_tbl))) {
  1923. retval = -EFAULT;
  1924. break;
  1925. }
  1926. break;
  1927. case IPA_IOC_PUT_RT_TBL:
  1928. retval = ipa3_put_rt_tbl(arg);
  1929. break;
  1930. case IPA_IOC_GET_HDR:
  1931. if (copy_from_user(header, (const void __user *)arg,
  1932. sizeof(struct ipa_ioc_get_hdr))) {
  1933. retval = -EFAULT;
  1934. break;
  1935. }
  1936. if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
  1937. retval = -EFAULT;
  1938. break;
  1939. }
  1940. if (copy_to_user((void __user *)arg, header,
  1941. sizeof(struct ipa_ioc_get_hdr))) {
  1942. retval = -EFAULT;
  1943. break;
  1944. }
  1945. break;
  1946. case IPA_IOC_PUT_HDR:
  1947. retval = ipa3_put_hdr(arg);
  1948. break;
  1949. case IPA_IOC_SET_FLT:
  1950. retval = ipa3_cfg_filter(arg);
  1951. break;
  1952. case IPA_IOC_COPY_HDR:
  1953. if (copy_from_user(header, (const void __user *)arg,
  1954. sizeof(struct ipa_ioc_copy_hdr))) {
  1955. retval = -EFAULT;
  1956. break;
  1957. }
  1958. if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
  1959. retval = -EFAULT;
  1960. break;
  1961. }
  1962. if (copy_to_user((void __user *)arg, header,
  1963. sizeof(struct ipa_ioc_copy_hdr))) {
  1964. retval = -EFAULT;
  1965. break;
  1966. }
  1967. break;
  1968. case IPA_IOC_QUERY_INTF:
  1969. if (copy_from_user(header, (const void __user *)arg,
  1970. sizeof(struct ipa_ioc_query_intf))) {
  1971. retval = -EFAULT;
  1972. break;
  1973. }
  1974. if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
  1975. retval = -1;
  1976. break;
  1977. }
  1978. if (copy_to_user((void __user *)arg, header,
  1979. sizeof(struct ipa_ioc_query_intf))) {
  1980. retval = -EFAULT;
  1981. break;
  1982. }
  1983. break;
  1984. case IPA_IOC_QUERY_INTF_TX_PROPS:
  1985. sz = sizeof(struct ipa_ioc_query_intf_tx_props);
  1986. if (copy_from_user(header, (const void __user *)arg, sz)) {
  1987. retval = -EFAULT;
  1988. break;
  1989. }
  1990. if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
  1991. > IPA_NUM_PROPS_MAX) {
  1992. retval = -EFAULT;
  1993. break;
  1994. }
  1995. pre_entry =
  1996. ((struct ipa_ioc_query_intf_tx_props *)
  1997. header)->num_tx_props;
  1998. pyld_sz = sz + pre_entry *
  1999. sizeof(struct ipa_ioc_tx_intf_prop);
  2000. param = memdup_user((const void __user *)arg, pyld_sz);
  2001. if (IS_ERR(param)) {
  2002. retval = PTR_ERR(param);
  2003. break;
  2004. }
  2005. /* add check in case user-space module compromised */
  2006. if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
  2007. param)->num_tx_props
  2008. != pre_entry)) {
  2009. IPAERR_RL("current %d pre %d\n",
  2010. ((struct ipa_ioc_query_intf_tx_props *)
  2011. param)->num_tx_props, pre_entry);
  2012. retval = -EFAULT;
  2013. break;
  2014. }
  2015. if (ipa3_query_intf_tx_props(
  2016. (struct ipa_ioc_query_intf_tx_props *)param)) {
  2017. retval = -1;
  2018. break;
  2019. }
  2020. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  2021. retval = -EFAULT;
  2022. break;
  2023. }
  2024. break;
  2025. case IPA_IOC_QUERY_INTF_RX_PROPS:
  2026. sz = sizeof(struct ipa_ioc_query_intf_rx_props);
  2027. if (copy_from_user(header, (const void __user *)arg, sz)) {
  2028. retval = -EFAULT;
  2029. break;
  2030. }
  2031. if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
  2032. > IPA_NUM_PROPS_MAX) {
  2033. retval = -EFAULT;
  2034. break;
  2035. }
  2036. pre_entry =
  2037. ((struct ipa_ioc_query_intf_rx_props *)
  2038. header)->num_rx_props;
  2039. pyld_sz = sz + pre_entry *
  2040. sizeof(struct ipa_ioc_rx_intf_prop);
  2041. param = memdup_user((const void __user *)arg, pyld_sz);
  2042. if (IS_ERR(param)) {
  2043. retval = PTR_ERR(param);
  2044. break;
  2045. }
  2046. /* add check in case user-space module compromised */
  2047. if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
  2048. param)->num_rx_props != pre_entry)) {
  2049. IPAERR_RL("current %d pre %d\n",
  2050. ((struct ipa_ioc_query_intf_rx_props *)
  2051. param)->num_rx_props, pre_entry);
  2052. retval = -EFAULT;
  2053. break;
  2054. }
  2055. if (ipa3_query_intf_rx_props(
  2056. (struct ipa_ioc_query_intf_rx_props *)param)) {
  2057. retval = -1;
  2058. break;
  2059. }
  2060. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  2061. retval = -EFAULT;
  2062. break;
  2063. }
  2064. break;
  2065. case IPA_IOC_QUERY_INTF_EXT_PROPS:
  2066. sz = sizeof(struct ipa_ioc_query_intf_ext_props);
  2067. if (copy_from_user(header, (const void __user *)arg, sz)) {
  2068. retval = -EFAULT;
  2069. break;
  2070. }
  2071. if (((struct ipa_ioc_query_intf_ext_props *)
  2072. header)->num_ext_props > IPA_NUM_PROPS_MAX) {
  2073. retval = -EFAULT;
  2074. break;
  2075. }
  2076. pre_entry =
  2077. ((struct ipa_ioc_query_intf_ext_props *)
  2078. header)->num_ext_props;
  2079. pyld_sz = sz + pre_entry *
  2080. sizeof(struct ipa_ioc_ext_intf_prop);
  2081. param = memdup_user((const void __user *)arg, pyld_sz);
  2082. if (IS_ERR(param)) {
  2083. retval = PTR_ERR(param);
  2084. break;
  2085. }
  2086. /* add check in case user-space module compromised */
  2087. if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
  2088. param)->num_ext_props != pre_entry)) {
  2089. IPAERR_RL("current %d pre %d\n",
  2090. ((struct ipa_ioc_query_intf_ext_props *)
  2091. param)->num_ext_props, pre_entry);
  2092. retval = -EFAULT;
  2093. break;
  2094. }
  2095. if (ipa3_query_intf_ext_props(
  2096. (struct ipa_ioc_query_intf_ext_props *)param)) {
  2097. retval = -1;
  2098. break;
  2099. }
  2100. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  2101. retval = -EFAULT;
  2102. break;
  2103. }
  2104. break;
  2105. case IPA_IOC_PULL_MSG:
  2106. if (copy_from_user(header, (const void __user *)arg,
  2107. sizeof(struct ipa_msg_meta))) {
  2108. retval = -EFAULT;
  2109. break;
  2110. }
  2111. pre_entry =
  2112. ((struct ipa_msg_meta *)header)->msg_len;
  2113. pyld_sz = sizeof(struct ipa_msg_meta) +
  2114. pre_entry;
  2115. param = memdup_user((const void __user *)arg, pyld_sz);
  2116. if (IS_ERR(param)) {
  2117. retval = PTR_ERR(param);
  2118. break;
  2119. }
  2120. /* add check in case user-space module compromised */
  2121. if (unlikely(((struct ipa_msg_meta *)param)->msg_len
  2122. != pre_entry)) {
  2123. IPAERR_RL("current %d pre %d\n",
  2124. ((struct ipa_msg_meta *)param)->msg_len,
  2125. pre_entry);
  2126. retval = -EFAULT;
  2127. break;
  2128. }
  2129. if (ipa3_pull_msg((struct ipa_msg_meta *)param,
  2130. (char *)param + sizeof(struct ipa_msg_meta),
  2131. ((struct ipa_msg_meta *)param)->msg_len) !=
  2132. ((struct ipa_msg_meta *)param)->msg_len) {
  2133. retval = -1;
  2134. break;
  2135. }
  2136. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  2137. retval = -EFAULT;
  2138. break;
  2139. }
  2140. break;
  2141. case IPA_IOC_RM_ADD_DEPENDENCY:
  2142. /* IPA RM is deprecate because IPA PM is used */
  2143. IPAERR("using obselete command: IPA_IOC_RM_ADD_DEPENDENCY");
  2144. return -EINVAL;
  2145. case IPA_IOC_RM_DEL_DEPENDENCY:
  2146. /* IPA RM is deprecate because IPA PM is used */
  2147. IPAERR("using obselete command: IPA_IOC_RM_DEL_DEPENDENCY");
  2148. return -EINVAL;
  2149. case IPA_IOC_GENERATE_FLT_EQ:
  2150. {
  2151. struct ipa_ioc_generate_flt_eq flt_eq;
  2152. if (copy_from_user(&flt_eq, (const void __user *)arg,
  2153. sizeof(struct ipa_ioc_generate_flt_eq))) {
  2154. retval = -EFAULT;
  2155. break;
  2156. }
  2157. if (ipahal_flt_generate_equation(flt_eq.ip,
  2158. &flt_eq.attrib, &flt_eq.eq_attrib)) {
  2159. retval = -EFAULT;
  2160. break;
  2161. }
  2162. if (copy_to_user((void __user *)arg, &flt_eq,
  2163. sizeof(struct ipa_ioc_generate_flt_eq))) {
  2164. retval = -EFAULT;
  2165. break;
  2166. }
  2167. break;
  2168. }
  2169. case IPA_IOC_QUERY_EP_MAPPING:
  2170. {
  2171. retval = ipa3_get_ep_mapping(arg);
  2172. break;
  2173. }
  2174. case IPA_IOC_QUERY_RT_TBL_INDEX:
  2175. if (copy_from_user(header, (const void __user *)arg,
  2176. sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
  2177. retval = -EFAULT;
  2178. break;
  2179. }
  2180. if (ipa3_query_rt_index(
  2181. (struct ipa_ioc_get_rt_tbl_indx *)header)) {
  2182. retval = -EFAULT;
  2183. break;
  2184. }
  2185. if (copy_to_user((void __user *)arg, header,
  2186. sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
  2187. retval = -EFAULT;
  2188. break;
  2189. }
  2190. break;
  2191. case IPA_IOC_WRITE_QMAPID:
  2192. if (copy_from_user(header, (const void __user *)arg,
  2193. sizeof(struct ipa_ioc_write_qmapid))) {
  2194. retval = -EFAULT;
  2195. break;
  2196. }
  2197. if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
  2198. retval = -EFAULT;
  2199. break;
  2200. }
  2201. if (copy_to_user((void __user *)arg, header,
  2202. sizeof(struct ipa_ioc_write_qmapid))) {
  2203. retval = -EFAULT;
  2204. break;
  2205. }
  2206. break;
  2207. case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
  2208. retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true);
  2209. if (retval) {
  2210. IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
  2211. break;
  2212. }
  2213. break;
  2214. case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
  2215. retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true);
  2216. if (retval) {
  2217. IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
  2218. break;
  2219. }
  2220. break;
  2221. case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
  2222. retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT, false);
  2223. if (retval) {
  2224. IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
  2225. break;
  2226. }
  2227. break;
  2228. case IPA_IOC_ADD_HDR_PROC_CTX:
  2229. if (copy_from_user(header, (const void __user *)arg,
  2230. sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
  2231. retval = -EFAULT;
  2232. break;
  2233. }
  2234. pre_entry =
  2235. ((struct ipa_ioc_add_hdr_proc_ctx *)
  2236. header)->num_proc_ctxs;
  2237. pyld_sz =
  2238. sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
  2239. pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
  2240. param = memdup_user((const void __user *)arg, pyld_sz);
  2241. if (IS_ERR(param)) {
  2242. retval = PTR_ERR(param);
  2243. break;
  2244. }
  2245. /* add check in case user-space module compromised */
  2246. if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
  2247. param)->num_proc_ctxs != pre_entry)) {
  2248. IPAERR_RL("current %d pre %d\n",
  2249. ((struct ipa_ioc_add_hdr_proc_ctx *)
  2250. param)->num_proc_ctxs, pre_entry);
  2251. retval = -EFAULT;
  2252. break;
  2253. }
  2254. if (ipa3_add_hdr_proc_ctx(
  2255. (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) {
  2256. retval = -EFAULT;
  2257. break;
  2258. }
  2259. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  2260. retval = -EFAULT;
  2261. break;
  2262. }
  2263. break;
  2264. case IPA_IOC_DEL_HDR_PROC_CTX:
  2265. if (copy_from_user(header, (const void __user *)arg,
  2266. sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
  2267. retval = -EFAULT;
  2268. break;
  2269. }
  2270. pre_entry =
  2271. ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
  2272. pyld_sz =
  2273. sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
  2274. pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
  2275. param = memdup_user((const void __user *)arg, pyld_sz);
  2276. if (IS_ERR(param)) {
  2277. retval = PTR_ERR(param);
  2278. break;
  2279. }
  2280. /* add check in case user-space module compromised */
  2281. if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
  2282. param)->num_hdls != pre_entry)) {
  2283. IPAERR_RL("current %d pre %d\n",
  2284. ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
  2285. num_hdls,
  2286. pre_entry);
  2287. retval = -EFAULT;
  2288. break;
  2289. }
  2290. if (ipa3_del_hdr_proc_ctx_by_user(
  2291. (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
  2292. retval = -EFAULT;
  2293. break;
  2294. }
  2295. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  2296. retval = -EFAULT;
  2297. break;
  2298. }
  2299. break;
  2300. case IPA_IOC_GET_HW_VERSION:
  2301. pyld_sz = sizeof(enum ipa_hw_type);
  2302. param = kmemdup(&ipa3_ctx->ipa_hw_type, pyld_sz, GFP_KERNEL);
  2303. if (!param) {
  2304. retval = -ENOMEM;
  2305. break;
  2306. }
  2307. if (copy_to_user((void __user *)arg, param, pyld_sz)) {
  2308. retval = -EFAULT;
  2309. break;
  2310. }
  2311. break;
  2312. case IPA_IOC_GET_VLAN_MODE:
  2313. if (copy_from_user(&vlan_mode, (const void __user *)arg,
  2314. sizeof(struct ipa_ioc_get_vlan_mode))) {
  2315. retval = -EFAULT;
  2316. break;
  2317. }
  2318. retval = ipa3_is_vlan_mode(
  2319. vlan_mode.iface,
  2320. &is_vlan_mode);
  2321. if (retval)
  2322. break;
  2323. vlan_mode.is_vlan_mode = is_vlan_mode;
  2324. if (copy_to_user((void __user *)arg,
  2325. &vlan_mode,
  2326. sizeof(struct ipa_ioc_get_vlan_mode))) {
  2327. retval = -EFAULT;
  2328. break;
  2329. }
  2330. break;
  2331. case IPA_IOC_ADD_VLAN_IFACE:
  2332. if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
  2333. retval = -EFAULT;
  2334. break;
  2335. }
  2336. break;
  2337. case IPA_IOC_DEL_VLAN_IFACE:
  2338. if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
  2339. retval = -EFAULT;
  2340. break;
  2341. }
  2342. break;
  2343. case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING:
  2344. if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) {
  2345. retval = -EFAULT;
  2346. break;
  2347. }
  2348. break;
  2349. case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING:
  2350. if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) {
  2351. retval = -EFAULT;
  2352. break;
  2353. }
  2354. break;
  2355. case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
  2356. if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
  2357. retval = -EFAULT;
  2358. break;
  2359. }
  2360. break;
  2361. case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
  2362. if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
  2363. retval = -EFAULT;
  2364. break;
  2365. }
  2366. break;
  2367. case IPA_IOC_CLEANUP:
  2368. /*Route and filter rules will also be clean*/
  2369. IPADBG("Got IPA_IOC_CLEANUP\n");
  2370. retval = ipa3_reset_hdr(true);
  2371. memset(&nat_del, 0, sizeof(nat_del));
  2372. nat_del.table_index = 0;
  2373. retval = ipa3_nat_del_cmd(&nat_del);
  2374. retval = ipa3_clean_modem_rule();
  2375. ipa3_counter_id_remove_all();
  2376. break;
  2377. case IPA_IOC_QUERY_WLAN_CLIENT:
  2378. IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n");
  2379. retval = ipa3_resend_wlan_msg();
  2380. break;
  2381. case IPA_IOC_GSB_CONNECT:
  2382. IPADBG("Got IPA_IOC_GSB_CONNECT\n");
  2383. if (ipa3_send_gsb_msg(arg, IPA_GSB_CONNECT)) {
  2384. retval = -EFAULT;
  2385. break;
  2386. }
  2387. break;
  2388. case IPA_IOC_GSB_DISCONNECT:
  2389. IPADBG("Got IPA_IOC_GSB_DISCONNECT\n");
  2390. if (ipa3_send_gsb_msg(arg, IPA_GSB_DISCONNECT)) {
  2391. retval = -EFAULT;
  2392. break;
  2393. }
  2394. break;
  2395. case IPA_IOC_ADD_RT_RULE_V2:
  2396. retval = ipa3_ioctl_add_rt_rule_v2(arg);
  2397. break;
  2398. case IPA_IOC_ADD_RT_RULE_EXT_V2:
  2399. retval = ipa3_ioctl_add_rt_rule_ext_v2(arg);
  2400. break;
  2401. case IPA_IOC_ADD_RT_RULE_AFTER_V2:
  2402. retval = ipa3_ioctl_add_rt_rule_after_v2(arg);
  2403. break;
  2404. case IPA_IOC_MDFY_RT_RULE_V2:
  2405. retval = ipa3_ioctl_mdfy_rt_rule_v2(arg);
  2406. break;
  2407. case IPA_IOC_ADD_FLT_RULE_V2:
  2408. retval = ipa3_ioctl_add_flt_rule_v2(arg);
  2409. break;
  2410. case IPA_IOC_ADD_FLT_RULE_AFTER_V2:
  2411. retval = ipa3_ioctl_add_flt_rule_after_v2(arg);
  2412. break;
  2413. case IPA_IOC_MDFY_FLT_RULE_V2:
  2414. retval = ipa3_ioctl_mdfy_flt_rule_v2(arg);
  2415. break;
  2416. case IPA_IOC_FNR_COUNTER_ALLOC:
  2417. retval = ipa3_ioctl_fnr_counter_alloc(arg);
  2418. break;
  2419. case IPA_IOC_FNR_COUNTER_DEALLOC:
  2420. hdl = (int)arg;
  2421. if (hdl < 0) {
  2422. IPAERR("IPA_FNR_COUNTER_DEALLOC failed: hdl %d\n",
  2423. hdl);
  2424. retval = -EPERM;
  2425. break;
  2426. }
  2427. ipa3_counter_remove_hdl(hdl);
  2428. break;
  2429. case IPA_IOC_FNR_COUNTER_QUERY:
  2430. retval = ipa3_ioctl_fnr_counter_query(arg);
  2431. break;
  2432. case IPA_IOC_WIGIG_FST_SWITCH:
  2433. IPADBG("Got IPA_IOCTL_WIGIG_FST_SWITCH\n");
  2434. if (copy_from_user(&fst_switch, (const void __user *)arg,
  2435. sizeof(struct ipa_ioc_wigig_fst_switch))) {
  2436. retval = -EFAULT;
  2437. break;
  2438. }
  2439. retval = ipa_wigig_send_msg(WIGIG_FST_SWITCH,
  2440. fst_switch.netdev_name,
  2441. fst_switch.client_mac_addr,
  2442. IPA_CLIENT_MAX,
  2443. fst_switch.to_wigig);
  2444. break;
  2445. default:
  2446. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2447. return -ENOTTY;
  2448. }
  2449. if (!IS_ERR(param))
  2450. kfree(param);
  2451. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2452. return retval;
  2453. }
  2454. /**
  2455. * ipa3_setup_dflt_rt_tables() - Setup default routing tables
  2456. *
  2457. * Return codes:
  2458. * 0: success
  2459. * -ENOMEM: failed to allocate memory
  2460. * -EPERM: failed to add the tables
  2461. */
  2462. int ipa3_setup_dflt_rt_tables(void)
  2463. {
  2464. struct ipa_ioc_add_rt_rule *rt_rule;
  2465. struct ipa_rt_rule_add *rt_rule_entry;
  2466. rt_rule =
  2467. kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
  2468. sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
  2469. if (!rt_rule)
  2470. return -ENOMEM;
  2471. /* setup a default v4 route to point to Apps */
  2472. rt_rule->num_rules = 1;
  2473. rt_rule->commit = 1;
  2474. rt_rule->ip = IPA_IP_v4;
  2475. strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
  2476. IPA_RESOURCE_NAME_MAX);
  2477. rt_rule_entry = &rt_rule->rules[0];
  2478. rt_rule_entry->at_rear = 1;
  2479. rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
  2480. rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
  2481. rt_rule_entry->rule.retain_hdr = 1;
  2482. if (ipa3_add_rt_rule(rt_rule)) {
  2483. IPAERR("fail to add dflt v4 rule\n");
  2484. kfree(rt_rule);
  2485. return -EPERM;
  2486. }
  2487. IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
  2488. ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
  2489. /* setup a default v6 route to point to A5 */
  2490. rt_rule->ip = IPA_IP_v6;
  2491. if (ipa3_add_rt_rule(rt_rule)) {
  2492. IPAERR("fail to add dflt v6 rule\n");
  2493. kfree(rt_rule);
  2494. return -EPERM;
  2495. }
  2496. IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
  2497. ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
  2498. /*
  2499. * because these tables are the very first to be added, they will both
  2500. * have the same index (0) which is essential for programming the
  2501. * "route" end-point config
  2502. */
  2503. kfree(rt_rule);
  2504. return 0;
  2505. }
  2506. static int ipa3_setup_exception_path(void)
  2507. {
  2508. struct ipa_ioc_add_hdr *hdr;
  2509. struct ipa_hdr_add *hdr_entry;
  2510. struct ipahal_reg_route route = { 0 };
  2511. int ret;
  2512. /* install the basic exception header */
  2513. hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
  2514. sizeof(struct ipa_hdr_add), GFP_KERNEL);
  2515. if (!hdr)
  2516. return -ENOMEM;
  2517. hdr->num_hdrs = 1;
  2518. hdr->commit = 1;
  2519. hdr_entry = &hdr->hdr[0];
  2520. strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
  2521. hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
  2522. if (ipa3_add_hdr(hdr)) {
  2523. IPAERR("fail to add exception hdr\n");
  2524. ret = -EPERM;
  2525. goto bail;
  2526. }
  2527. if (hdr_entry->status) {
  2528. IPAERR("fail to add exception hdr\n");
  2529. ret = -EPERM;
  2530. goto bail;
  2531. }
  2532. ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
  2533. /* set the route register to pass exception packets to Apps */
  2534. route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
  2535. route.route_frag_def_pipe = ipa3_get_ep_mapping(
  2536. IPA_CLIENT_APPS_LAN_CONS);
  2537. route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
  2538. route.route_def_retain_hdr = 1;
  2539. if (ipa3_cfg_route(&route)) {
  2540. IPAERR("fail to add exception hdr\n");
  2541. ret = -EPERM;
  2542. goto bail;
  2543. }
  2544. ret = 0;
  2545. bail:
  2546. kfree(hdr);
  2547. return ret;
  2548. }
  2549. static int ipa3_init_smem_region(int memory_region_size,
  2550. int memory_region_offset)
  2551. {
  2552. struct ipahal_imm_cmd_dma_shared_mem cmd;
  2553. struct ipahal_imm_cmd_pyld *cmd_pyld;
  2554. struct ipa3_desc desc;
  2555. struct ipa_mem_buffer mem;
  2556. int rc;
  2557. if (memory_region_size == 0)
  2558. return 0;
  2559. memset(&desc, 0, sizeof(desc));
  2560. memset(&cmd, 0, sizeof(cmd));
  2561. memset(&mem, 0, sizeof(mem));
  2562. mem.size = memory_region_size;
  2563. mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size,
  2564. &mem.phys_base, GFP_KERNEL);
  2565. if (!mem.base) {
  2566. IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
  2567. return -ENOMEM;
  2568. }
  2569. cmd.is_read = false;
  2570. cmd.skip_pipeline_clear = false;
  2571. cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  2572. cmd.size = mem.size;
  2573. cmd.system_addr = mem.phys_base;
  2574. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  2575. memory_region_offset;
  2576. cmd_pyld = ipahal_construct_imm_cmd(
  2577. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  2578. if (!cmd_pyld) {
  2579. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  2580. return -ENOMEM;
  2581. }
  2582. ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
  2583. rc = ipa3_send_cmd(1, &desc);
  2584. if (rc) {
  2585. IPAERR("failed to send immediate command (error %d)\n", rc);
  2586. rc = -EFAULT;
  2587. }
  2588. ipahal_destroy_imm_cmd(cmd_pyld);
  2589. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
  2590. mem.phys_base);
  2591. return rc;
  2592. }
  2593. /**
  2594. * ipa3_init_q6_smem() - Initialize Q6 general memory and
  2595. * header memory regions in IPA.
  2596. *
  2597. * Return codes:
  2598. * 0: success
  2599. * -ENOMEM: failed to allocate dma memory
  2600. * -EFAULT: failed to send IPA command to initialize the memory
  2601. */
  2602. int ipa3_init_q6_smem(void)
  2603. {
  2604. int rc;
  2605. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  2606. rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
  2607. IPA_MEM_PART(modem_ofst));
  2608. if (rc) {
  2609. IPAERR("failed to initialize Modem RAM memory\n");
  2610. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2611. return rc;
  2612. }
  2613. rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
  2614. IPA_MEM_PART(modem_hdr_ofst));
  2615. if (rc) {
  2616. IPAERR("failed to initialize Modem HDRs RAM memory\n");
  2617. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2618. return rc;
  2619. }
  2620. rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
  2621. IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
  2622. if (rc) {
  2623. IPAERR("failed to initialize Modem proc ctx RAM memory\n");
  2624. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2625. return rc;
  2626. }
  2627. rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
  2628. IPA_MEM_PART(modem_comp_decomp_ofst));
  2629. if (rc) {
  2630. IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
  2631. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2632. return rc;
  2633. }
  2634. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2635. return rc;
  2636. }
  2637. static void ipa3_destroy_imm(void *user1, int user2)
  2638. {
  2639. ipahal_destroy_imm_cmd(user1);
  2640. }
  2641. static void ipa3_q6_pipe_delay(bool delay)
  2642. {
  2643. int client_idx;
  2644. int ep_idx;
  2645. struct ipa_ep_cfg_ctrl ep_ctrl;
  2646. memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
  2647. ep_ctrl.ipa_ep_delay = delay;
  2648. for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
  2649. if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
  2650. ep_idx = ipa3_get_ep_mapping(client_idx);
  2651. if (ep_idx == -1)
  2652. continue;
  2653. ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
  2654. ep_idx, &ep_ctrl);
  2655. }
  2656. }
  2657. }
  2658. static void ipa3_q6_avoid_holb(void)
  2659. {
  2660. int ep_idx;
  2661. int client_idx;
  2662. struct ipa_ep_cfg_ctrl ep_suspend;
  2663. struct ipa_ep_cfg_holb ep_holb;
  2664. memset(&ep_suspend, 0, sizeof(ep_suspend));
  2665. memset(&ep_holb, 0, sizeof(ep_holb));
  2666. ep_suspend.ipa_ep_suspend = true;
  2667. ep_holb.tmr_val = 0;
  2668. ep_holb.en = 1;
  2669. if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2)
  2670. ipa3_cal_ep_holb_scale_base_val(ep_holb.tmr_val, &ep_holb);
  2671. for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
  2672. if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
  2673. ep_idx = ipa3_get_ep_mapping(client_idx);
  2674. if (ep_idx == -1)
  2675. continue;
  2676. /* from IPA 4.0 pipe suspend is not supported */
  2677. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
  2678. ipahal_write_reg_n_fields(
  2679. IPA_ENDP_INIT_CTRL_n,
  2680. ep_idx, &ep_suspend);
  2681. /*
  2682. * ipa3_cfg_ep_holb is not used here because we are
  2683. * setting HOLB on Q6 pipes, and from APPS perspective
  2684. * they are not valid, therefore, the above function
  2685. * will fail.
  2686. */
  2687. ipahal_write_reg_n_fields(
  2688. IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
  2689. ep_idx, &ep_holb);
  2690. ipahal_write_reg_n_fields(
  2691. IPA_ENDP_INIT_HOL_BLOCK_EN_n,
  2692. ep_idx, &ep_holb);
  2693. /* IPA4.5 issue requires HOLB_EN to be written twice */
  2694. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
  2695. ipahal_write_reg_n_fields(
  2696. IPA_ENDP_INIT_HOL_BLOCK_EN_n,
  2697. ep_idx, &ep_holb);
  2698. }
  2699. }
  2700. }
  2701. static void ipa3_halt_q6_gsi_channels(bool prod)
  2702. {
  2703. int ep_idx;
  2704. int client_idx;
  2705. const struct ipa_gsi_ep_config *gsi_ep_cfg;
  2706. int i;
  2707. int ret;
  2708. int code = 0;
  2709. /* if prod flag is true, then we halt the producer channels also */
  2710. for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
  2711. if (IPA_CLIENT_IS_Q6_CONS(client_idx)
  2712. || (IPA_CLIENT_IS_Q6_PROD(client_idx) && prod)) {
  2713. ep_idx = ipa3_get_ep_mapping(client_idx);
  2714. if (ep_idx == -1)
  2715. continue;
  2716. gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
  2717. if (!gsi_ep_cfg) {
  2718. IPAERR("failed to get GSI config\n");
  2719. ipa_assert();
  2720. return;
  2721. }
  2722. ret = gsi_halt_channel_ee(
  2723. gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
  2724. &code);
  2725. for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY &&
  2726. ret == -GSI_STATUS_AGAIN; i++) {
  2727. IPADBG(
  2728. "ch %d ee %d with code %d\n is busy try again",
  2729. gsi_ep_cfg->ipa_gsi_chan_num,
  2730. gsi_ep_cfg->ee,
  2731. code);
  2732. usleep_range(IPA_GSI_CHANNEL_HALT_MIN_SLEEP,
  2733. IPA_GSI_CHANNEL_HALT_MAX_SLEEP);
  2734. ret = gsi_halt_channel_ee(
  2735. gsi_ep_cfg->ipa_gsi_chan_num,
  2736. gsi_ep_cfg->ee, &code);
  2737. }
  2738. if (ret == GSI_STATUS_SUCCESS)
  2739. IPADBG("halted gsi ch %d ee %d with code %d\n",
  2740. gsi_ep_cfg->ipa_gsi_chan_num,
  2741. gsi_ep_cfg->ee,
  2742. code);
  2743. else
  2744. IPAERR("failed to halt ch %d ee %d code %d\n",
  2745. gsi_ep_cfg->ipa_gsi_chan_num,
  2746. gsi_ep_cfg->ee,
  2747. code);
  2748. }
  2749. }
  2750. }
  2751. static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
  2752. enum ipa_rule_type rlt)
  2753. {
  2754. struct ipa3_desc *desc;
  2755. struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
  2756. struct ipahal_imm_cmd_pyld **cmd_pyld;
  2757. int retval = 0;
  2758. int pipe_idx;
  2759. int flt_idx = 0;
  2760. int num_cmds = 0;
  2761. int index;
  2762. u32 lcl_addr_mem_part;
  2763. u32 lcl_hdr_sz;
  2764. struct ipa_mem_buffer mem;
  2765. IPADBG("Entry\n");
  2766. if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
  2767. IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
  2768. return -EINVAL;
  2769. }
  2770. /*
  2771. * SRAM memory not allocated to hash tables. Cleaning the of hash table
  2772. * operation not supported.
  2773. */
  2774. if (rlt == IPA_RULE_HASHABLE && ipa3_ctx->ipa_fltrt_not_hashable) {
  2775. IPADBG("Clean hashable rules not supported\n");
  2776. return retval;
  2777. }
  2778. /* Up to filtering pipes we have filtering tables */
  2779. desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
  2780. GFP_KERNEL);
  2781. if (!desc)
  2782. return -ENOMEM;
  2783. cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
  2784. sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
  2785. if (!cmd_pyld) {
  2786. retval = -ENOMEM;
  2787. goto free_desc;
  2788. }
  2789. if (ip == IPA_IP_v4) {
  2790. if (rlt == IPA_RULE_HASHABLE) {
  2791. lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
  2792. lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
  2793. } else {
  2794. lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
  2795. lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
  2796. }
  2797. } else {
  2798. if (rlt == IPA_RULE_HASHABLE) {
  2799. lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
  2800. lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
  2801. } else {
  2802. lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
  2803. lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
  2804. }
  2805. }
  2806. retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
  2807. 0, &mem, true);
  2808. if (retval) {
  2809. IPAERR("failed to generate flt single tbl empty img\n");
  2810. goto free_cmd_pyld;
  2811. }
  2812. for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
  2813. if (!ipa_is_ep_support_flt(pipe_idx))
  2814. continue;
  2815. /*
  2816. * Iterating over all the filtering pipes which are either
  2817. * invalid but connected or connected but not configured by AP.
  2818. */
  2819. if (!ipa3_ctx->ep[pipe_idx].valid ||
  2820. ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
  2821. if (num_cmds >= ipa3_ctx->ep_flt_num) {
  2822. IPAERR("number of commands is out of range\n");
  2823. retval = -ENOBUFS;
  2824. goto free_empty_img;
  2825. }
  2826. cmd.is_read = false;
  2827. cmd.skip_pipeline_clear = false;
  2828. cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  2829. cmd.size = mem.size;
  2830. cmd.system_addr = mem.phys_base;
  2831. cmd.local_addr =
  2832. ipa3_ctx->smem_restricted_bytes +
  2833. lcl_addr_mem_part +
  2834. ipahal_get_hw_tbl_hdr_width() +
  2835. flt_idx * ipahal_get_hw_tbl_hdr_width();
  2836. cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
  2837. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  2838. if (!cmd_pyld[num_cmds]) {
  2839. IPAERR("fail construct dma_shared_mem cmd\n");
  2840. retval = -ENOMEM;
  2841. goto free_empty_img;
  2842. }
  2843. ipa3_init_imm_cmd_desc(&desc[num_cmds],
  2844. cmd_pyld[num_cmds]);
  2845. ++num_cmds;
  2846. }
  2847. ++flt_idx;
  2848. }
  2849. IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
  2850. retval = ipa3_send_cmd(num_cmds, desc);
  2851. if (retval) {
  2852. IPAERR("failed to send immediate command (err %d)\n", retval);
  2853. retval = -EFAULT;
  2854. }
  2855. free_empty_img:
  2856. ipahal_free_dma_mem(&mem);
  2857. free_cmd_pyld:
  2858. for (index = 0; index < num_cmds; index++)
  2859. ipahal_destroy_imm_cmd(cmd_pyld[index]);
  2860. kfree(cmd_pyld);
  2861. free_desc:
  2862. kfree(desc);
  2863. return retval;
  2864. }
  2865. static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
  2866. enum ipa_rule_type rlt)
  2867. {
  2868. struct ipa3_desc *desc;
  2869. struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
  2870. struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
  2871. int retval = 0;
  2872. u32 modem_rt_index_lo;
  2873. u32 modem_rt_index_hi;
  2874. u32 lcl_addr_mem_part;
  2875. u32 lcl_hdr_sz;
  2876. struct ipa_mem_buffer mem;
  2877. IPADBG("Entry\n");
  2878. if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
  2879. IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
  2880. return -EINVAL;
  2881. }
  2882. /*
  2883. * SRAM memory not allocated to hash tables. Cleaning the of hash table
  2884. * operation not supported.
  2885. */
  2886. if (rlt == IPA_RULE_HASHABLE && ipa3_ctx->ipa_fltrt_not_hashable) {
  2887. IPADBG("Clean hashable rules not supported\n");
  2888. return retval;
  2889. }
  2890. if (ip == IPA_IP_v4) {
  2891. modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
  2892. modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
  2893. if (rlt == IPA_RULE_HASHABLE) {
  2894. lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
  2895. lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
  2896. } else {
  2897. lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
  2898. lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
  2899. }
  2900. } else {
  2901. modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
  2902. modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
  2903. if (rlt == IPA_RULE_HASHABLE) {
  2904. lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
  2905. lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
  2906. } else {
  2907. lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
  2908. lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
  2909. }
  2910. }
  2911. retval = ipahal_rt_generate_empty_img(
  2912. modem_rt_index_hi - modem_rt_index_lo + 1,
  2913. lcl_hdr_sz, lcl_hdr_sz, &mem, true);
  2914. if (retval) {
  2915. IPAERR("fail generate empty rt img\n");
  2916. return -ENOMEM;
  2917. }
  2918. desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
  2919. if (!desc) {
  2920. IPAERR("failed to allocate memory\n");
  2921. goto free_empty_img;
  2922. }
  2923. cmd.is_read = false;
  2924. cmd.skip_pipeline_clear = false;
  2925. cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  2926. cmd.size = mem.size;
  2927. cmd.system_addr = mem.phys_base;
  2928. cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  2929. lcl_addr_mem_part +
  2930. modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
  2931. cmd_pyld = ipahal_construct_imm_cmd(
  2932. IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
  2933. if (!cmd_pyld) {
  2934. IPAERR("failed to construct dma_shared_mem imm cmd\n");
  2935. retval = -ENOMEM;
  2936. goto free_desc;
  2937. }
  2938. ipa3_init_imm_cmd_desc(desc, cmd_pyld);
  2939. IPADBG("Sending 1 descriptor for rt tbl clearing\n");
  2940. retval = ipa3_send_cmd(1, desc);
  2941. if (retval) {
  2942. IPAERR("failed to send immediate command (err %d)\n", retval);
  2943. retval = -EFAULT;
  2944. }
  2945. ipahal_destroy_imm_cmd(cmd_pyld);
  2946. free_desc:
  2947. kfree(desc);
  2948. free_empty_img:
  2949. ipahal_free_dma_mem(&mem);
  2950. return retval;
  2951. }
  2952. static int ipa3_q6_clean_q6_tables(void)
  2953. {
  2954. struct ipa3_desc *desc;
  2955. struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
  2956. struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
  2957. int retval = 0;
  2958. struct ipahal_reg_fltrt_hash_flush flush;
  2959. struct ipahal_reg_valmask valmask;
  2960. IPADBG("Entry\n");
  2961. if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
  2962. IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
  2963. return -EFAULT;
  2964. }
  2965. if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
  2966. IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
  2967. return -EFAULT;
  2968. }
  2969. if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
  2970. IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
  2971. return -EFAULT;
  2972. }
  2973. if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
  2974. IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
  2975. return -EFAULT;
  2976. }
  2977. if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
  2978. IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
  2979. return -EFAULT;
  2980. }
  2981. if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
  2982. IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
  2983. return -EFAULT;
  2984. }
  2985. if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
  2986. IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
  2987. return -EFAULT;
  2988. }
  2989. if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
  2990. IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
  2991. return -EFAULT;
  2992. }
  2993. /*
  2994. * SRAM memory not allocated to hash tables. Cleaning the of hash table
  2995. * operation not supported.
  2996. */
  2997. if (ipa3_ctx->ipa_fltrt_not_hashable)
  2998. return retval;
  2999. /* Flush rules cache */
  3000. desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
  3001. if (!desc)
  3002. return -ENOMEM;
  3003. flush.v4_flt = true;
  3004. flush.v4_rt = true;
  3005. flush.v6_flt = true;
  3006. flush.v6_rt = true;
  3007. ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
  3008. reg_write_cmd.skip_pipeline_clear = false;
  3009. reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  3010. reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
  3011. reg_write_cmd.value = valmask.val;
  3012. reg_write_cmd.value_mask = valmask.mask;
  3013. cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
  3014. &reg_write_cmd, false);
  3015. if (!cmd_pyld) {
  3016. IPAERR("fail construct register_write imm cmd\n");
  3017. retval = -EFAULT;
  3018. goto bail_desc;
  3019. }
  3020. ipa3_init_imm_cmd_desc(desc, cmd_pyld);
  3021. IPADBG("Sending 1 descriptor for tbls flush\n");
  3022. retval = ipa3_send_cmd(1, desc);
  3023. if (retval) {
  3024. IPAERR("failed to send immediate command (err %d)\n", retval);
  3025. retval = -EFAULT;
  3026. }
  3027. ipahal_destroy_imm_cmd(cmd_pyld);
  3028. bail_desc:
  3029. kfree(desc);
  3030. IPADBG("Done - retval = %d\n", retval);
  3031. return retval;
  3032. }
  3033. static int ipa3_q6_set_ex_path_to_apps(void)
  3034. {
  3035. int ep_idx;
  3036. int client_idx;
  3037. struct ipa3_desc *desc;
  3038. int num_descs = 0;
  3039. int index;
  3040. struct ipahal_imm_cmd_register_write reg_write;
  3041. struct ipahal_imm_cmd_pyld *cmd_pyld;
  3042. int retval;
  3043. desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
  3044. GFP_KERNEL);
  3045. if (!desc)
  3046. return -ENOMEM;
  3047. /* Set the exception path to AP */
  3048. for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
  3049. ep_idx = ipa3_get_ep_mapping(client_idx);
  3050. if (ep_idx == -1)
  3051. continue;
  3052. /* disable statuses for all modem controlled prod pipes */
  3053. if (!IPA_CLIENT_IS_TEST(client_idx) &&
  3054. (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
  3055. (IPA_CLIENT_IS_PROD(client_idx) &&
  3056. ipa3_ctx->ep[ep_idx].valid &&
  3057. ipa3_ctx->ep[ep_idx].skip_ep_cfg) ||
  3058. (ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD
  3059. && ipa3_ctx->modem_cfg_emb_pipe_flt))) {
  3060. ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
  3061. ipa3_ctx->ep[ep_idx].status.status_en = false;
  3062. reg_write.skip_pipeline_clear = false;
  3063. reg_write.pipeline_clear_options =
  3064. IPAHAL_HPS_CLEAR;
  3065. reg_write.offset =
  3066. ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
  3067. ep_idx);
  3068. reg_write.value = 0;
  3069. reg_write.value_mask = ~0;
  3070. cmd_pyld = ipahal_construct_imm_cmd(
  3071. IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
  3072. if (!cmd_pyld) {
  3073. IPAERR("fail construct register_write cmd\n");
  3074. ipa_assert();
  3075. return -ENOMEM;
  3076. }
  3077. ipa3_init_imm_cmd_desc(&desc[num_descs], cmd_pyld);
  3078. desc[num_descs].callback = ipa3_destroy_imm;
  3079. desc[num_descs].user1 = cmd_pyld;
  3080. ++num_descs;
  3081. }
  3082. }
  3083. /* Will wait 500msecs for IPA tag process completion */
  3084. retval = ipa3_tag_process(desc, num_descs,
  3085. msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
  3086. if (retval) {
  3087. IPAERR("TAG process failed! (error %d)\n", retval);
  3088. /* For timeout error ipa3_destroy_imm cb will destroy user1 */
  3089. if (retval != -ETIME) {
  3090. for (index = 0; index < num_descs; index++)
  3091. if (desc[index].callback)
  3092. desc[index].callback(desc[index].user1,
  3093. desc[index].user2);
  3094. retval = -EINVAL;
  3095. }
  3096. }
  3097. kfree(desc);
  3098. return retval;
  3099. }
  3100. /*
  3101. * ipa3_update_ssr_state() - updating current SSR state
  3102. * @is_ssr: [in] Current SSR state
  3103. */
  3104. void ipa3_update_ssr_state(bool is_ssr)
  3105. {
  3106. if (is_ssr)
  3107. atomic_set(&ipa3_ctx->is_ssr, 1);
  3108. else
  3109. atomic_set(&ipa3_ctx->is_ssr, 0);
  3110. }
  3111. /**
  3112. * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
  3113. * in IPA HW. This is performed in case of SSR.
  3114. *
  3115. * This is a mandatory procedure, in case one of the steps fails, the
  3116. * AP needs to restart.
  3117. */
  3118. void ipa3_q6_pre_shutdown_cleanup(void)
  3119. {
  3120. IPADBG_LOW("ENTER\n");
  3121. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  3122. ipa3_update_ssr_state(true);
  3123. if (!ipa3_ctx->ipa_endp_delay_wa)
  3124. ipa3_q6_pipe_delay(true);
  3125. ipa3_q6_avoid_holb();
  3126. if (ipa3_ctx->ipa_config_is_mhi)
  3127. ipa3_set_reset_client_cons_pipe_sus_holb(true,
  3128. IPA_CLIENT_MHI_CONS);
  3129. if (ipa3_q6_clean_q6_tables()) {
  3130. IPAERR("Failed to clean Q6 tables\n");
  3131. /*
  3132. * Indicates IPA hardware is stalled, unexpected
  3133. * hardware state.
  3134. */
  3135. ipa_assert();
  3136. }
  3137. if (ipa3_q6_set_ex_path_to_apps()) {
  3138. IPAERR("Failed to redirect exceptions to APPS\n");
  3139. /*
  3140. * Indicates IPA hardware is stalled, unexpected
  3141. * hardware state.
  3142. */
  3143. ipa_assert();
  3144. }
  3145. /* Remove delay from Q6 PRODs to avoid pending descriptors
  3146. * on pipe reset procedure
  3147. */
  3148. if (!ipa3_ctx->ipa_endp_delay_wa) {
  3149. ipa3_q6_pipe_delay(false);
  3150. ipa3_set_reset_client_prod_pipe_delay(true,
  3151. IPA_CLIENT_USB_PROD);
  3152. } else {
  3153. ipa3_start_stop_client_prod_gsi_chnl(IPA_CLIENT_USB_PROD,
  3154. false);
  3155. }
  3156. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  3157. IPADBG_LOW("Exit with success\n");
  3158. }
  3159. /*
  3160. * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
  3161. * check if GSI channel related to Q6 producer client is empty.
  3162. *
  3163. * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
  3164. * info are injected into IPA RX from IPA_IF, while modem is restarting.
  3165. */
  3166. void ipa3_q6_post_shutdown_cleanup(void)
  3167. {
  3168. int client_idx;
  3169. int ep_idx;
  3170. bool prod = false;
  3171. IPADBG_LOW("ENTER\n");
  3172. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  3173. /* Handle the issue where SUSPEND was removed for some reason */
  3174. ipa3_q6_avoid_holb();
  3175. /* halt both prod and cons channels starting at IPAv4 */
  3176. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  3177. prod = true;
  3178. ipa3_halt_q6_gsi_channels(prod);
  3179. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  3180. IPADBG("Exit without consumer check\n");
  3181. return;
  3182. }
  3183. ipa3_halt_q6_gsi_channels(prod);
  3184. if (!ipa3_ctx->uc_ctx.uc_loaded) {
  3185. IPAERR("uC is not loaded. Skipping\n");
  3186. return;
  3187. }
  3188. for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
  3189. if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
  3190. ep_idx = ipa3_get_ep_mapping(client_idx);
  3191. if (ep_idx == -1)
  3192. continue;
  3193. if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
  3194. IPAERR("fail to validate Q6 ch emptiness %d\n",
  3195. client_idx);
  3196. /*
  3197. * Indicates GSI hardware is stalled, unexpected
  3198. * hardware state.
  3199. * Remove bug for adb reboot issue.
  3200. */
  3201. }
  3202. }
  3203. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  3204. IPADBG_LOW("Exit with success\n");
  3205. }
  3206. /**
  3207. * ipa3_q6_pre_powerup_cleanup() - A cleanup routine for pheripheral
  3208. * configuration in IPA HW. This is performed in case of SSR.
  3209. *
  3210. * This is a mandatory procedure, in case one of the steps fails, the
  3211. * AP needs to restart.
  3212. */
  3213. void ipa3_q6_pre_powerup_cleanup(void)
  3214. {
  3215. IPADBG_LOW("ENTER\n");
  3216. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  3217. if (ipa3_ctx->ipa_config_is_mhi)
  3218. ipa3_set_reset_client_prod_pipe_delay(true,
  3219. IPA_CLIENT_MHI_PROD);
  3220. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  3221. IPADBG_LOW("Exit with success\n");
  3222. }
  3223. /*
  3224. * ipa3_client_prod_post_shutdown_cleanup () - As part of this function
  3225. * set end point delay client producer pipes and starting corresponding
  3226. * gsi channels
  3227. */
  3228. void ipa3_client_prod_post_shutdown_cleanup(void)
  3229. {
  3230. IPADBG_LOW("ENTER\n");
  3231. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  3232. ipa3_set_reset_client_prod_pipe_delay(true,
  3233. IPA_CLIENT_USB_PROD);
  3234. ipa3_start_stop_client_prod_gsi_chnl(IPA_CLIENT_USB_PROD, true);
  3235. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  3236. IPADBG_LOW("Exit with success\n");
  3237. }
  3238. static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
  3239. {
  3240. /* Set 4 bytes of CANARY before the offset */
  3241. sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
  3242. }
  3243. /**
  3244. * _ipa_init_sram_v3() - Initialize IPA local SRAM.
  3245. *
  3246. * Return codes: 0 for success, negative value for failure
  3247. */
  3248. int _ipa_init_sram_v3(void)
  3249. {
  3250. u32 *ipa_sram_mmio;
  3251. unsigned long phys_addr;
  3252. IPADBG(
  3253. "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SW_AREA_RAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n",
  3254. ipa3_ctx->ipa_wrapper_base,
  3255. ipa3_ctx->ctrl->ipa_reg_base_ofst,
  3256. ipahal_get_reg_n_ofst(
  3257. IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
  3258. ipa3_ctx->smem_restricted_bytes / 4),
  3259. ipa3_ctx->smem_restricted_bytes,
  3260. ipa3_ctx->smem_sz);
  3261. phys_addr = ipa3_ctx->ipa_wrapper_base +
  3262. ipa3_ctx->ctrl->ipa_reg_base_ofst +
  3263. ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
  3264. ipa3_ctx->smem_restricted_bytes / 4);
  3265. ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
  3266. if (!ipa_sram_mmio) {
  3267. IPAERR("fail to ioremap IPA SRAM\n");
  3268. return -ENOMEM;
  3269. }
  3270. /* Consult with ipa_i.h on the location of the CANARY values */
  3271. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
  3272. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
  3273. ipa3_sram_set_canary(ipa_sram_mmio,
  3274. IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
  3275. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
  3276. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
  3277. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
  3278. ipa3_sram_set_canary(ipa_sram_mmio,
  3279. IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
  3280. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
  3281. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
  3282. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
  3283. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
  3284. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
  3285. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
  3286. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
  3287. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
  3288. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
  3289. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
  3290. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
  3291. ipa3_sram_set_canary(ipa_sram_mmio,
  3292. IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
  3293. ipa3_sram_set_canary(ipa_sram_mmio,
  3294. IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
  3295. if (ipa_get_hw_type() >= IPA_HW_v4_5) {
  3296. ipa3_sram_set_canary(ipa_sram_mmio,
  3297. IPA_MEM_PART(nat_tbl_ofst) - 12);
  3298. ipa3_sram_set_canary(ipa_sram_mmio,
  3299. IPA_MEM_PART(nat_tbl_ofst) - 8);
  3300. ipa3_sram_set_canary(ipa_sram_mmio,
  3301. IPA_MEM_PART(nat_tbl_ofst) - 4);
  3302. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(nat_tbl_ofst));
  3303. }
  3304. if (ipa_get_hw_type() >= IPA_HW_v4_0) {
  3305. ipa3_sram_set_canary(ipa_sram_mmio,
  3306. IPA_MEM_PART(pdn_config_ofst) - 4);
  3307. ipa3_sram_set_canary(ipa_sram_mmio,
  3308. IPA_MEM_PART(pdn_config_ofst));
  3309. ipa3_sram_set_canary(ipa_sram_mmio,
  3310. IPA_MEM_PART(stats_quota_ofst) - 4);
  3311. ipa3_sram_set_canary(ipa_sram_mmio,
  3312. IPA_MEM_PART(stats_quota_ofst));
  3313. }
  3314. if (ipa_get_hw_type() <= IPA_HW_v3_5 ||
  3315. ipa_get_hw_type() >= IPA_HW_v4_5) {
  3316. ipa3_sram_set_canary(ipa_sram_mmio,
  3317. IPA_MEM_PART(modem_ofst) - 4);
  3318. ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
  3319. }
  3320. ipa3_sram_set_canary(ipa_sram_mmio,
  3321. (ipa_get_hw_type() >= IPA_HW_v3_5) ?
  3322. IPA_MEM_PART(uc_descriptor_ram_ofst) :
  3323. IPA_MEM_PART(end_ofst));
  3324. iounmap(ipa_sram_mmio);
  3325. return 0;
  3326. }
  3327. /**
  3328. * _ipa_init_hdr_v3_0() - Initialize IPA header block.
  3329. *
  3330. * Return codes: 0 for success, negative value for failure
  3331. */
  3332. int _ipa_init_hdr_v3_0(void)
  3333. {
  3334. struct ipa3_desc desc;
  3335. struct ipa_mem_buffer mem;
  3336. struct ipahal_imm_cmd_hdr_init_local cmd = {0};
  3337. struct ipahal_imm_cmd_pyld *cmd_pyld;
  3338. struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
  3339. mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
  3340. mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
  3341. GFP_KERNEL);
  3342. if (!mem.base) {
  3343. IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
  3344. return -ENOMEM;
  3345. }
  3346. cmd.hdr_table_addr = mem.phys_base;
  3347. cmd.size_hdr_table = mem.size;
  3348. cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
  3349. IPA_MEM_PART(modem_hdr_ofst);
  3350. cmd_pyld = ipahal_construct_imm_cmd(
  3351. IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
  3352. if (!cmd_pyld) {
  3353. IPAERR("fail to construct hdr_init_local imm cmd\n");
  3354. dma_free_coherent(ipa3_ctx->pdev,
  3355. mem.size, mem.base,
  3356. mem.phys_base);
  3357. return -EFAULT;
  3358. }
  3359. ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
  3360. IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
  3361. if (ipa3_send_cmd(1, &desc)) {
  3362. IPAERR("fail to send immediate command\n");
  3363. ipahal_destroy_imm_cmd(cmd_pyld);
  3364. dma_free_coherent(ipa3_ctx->pdev,
  3365. mem.size, mem.base,
  3366. mem.phys_base);
  3367. return -EFAULT;
  3368. }
  3369. ipahal_destroy_imm_cmd(cmd_pyld);
  3370. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
  3371. mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
  3372. IPA_MEM_PART(apps_hdr_proc_ctx_size);
  3373. mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
  3374. GFP_KERNEL);
  3375. if (!mem.base) {
  3376. IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
  3377. return -ENOMEM;
  3378. }
  3379. dma_cmd.is_read = false;
  3380. dma_cmd.skip_pipeline_clear = false;
  3381. dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
  3382. dma_cmd.system_addr = mem.phys_base;
  3383. dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
  3384. IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
  3385. dma_cmd.size = mem.size;
  3386. cmd_pyld = ipahal_construct_imm_cmd(
  3387. IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
  3388. if (!cmd_pyld) {
  3389. IPAERR("fail to construct dma_shared_mem imm\n");
  3390. dma_free_coherent(ipa3_ctx->pdev,
  3391. mem.size, mem.base,
  3392. mem.phys_base);
  3393. return -ENOMEM;
  3394. }
  3395. ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
  3396. IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
  3397. if (ipa3_send_cmd(1, &desc)) {
  3398. IPAERR("fail to send immediate command\n");
  3399. ipahal_destroy_imm_cmd(cmd_pyld);
  3400. dma_free_coherent(ipa3_ctx->pdev,
  3401. mem.size,
  3402. mem.base,
  3403. mem.phys_base);
  3404. return -EBUSY;
  3405. }
  3406. ipahal_destroy_imm_cmd(cmd_pyld);
  3407. ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
  3408. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
  3409. return 0;
  3410. }
  3411. /**
  3412. * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
  3413. *
  3414. * Return codes: 0 for success, negative value for failure
  3415. */
  3416. int _ipa_init_rt4_v3(void)
  3417. {
  3418. struct ipa3_desc desc;
  3419. struct ipa_mem_buffer mem;
  3420. struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
  3421. struct ipahal_imm_cmd_pyld *cmd_pyld;
  3422. int i;
  3423. int rc = 0;
  3424. for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
  3425. i <= IPA_MEM_PART(v4_modem_rt_index_hi);
  3426. i++)
  3427. ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
  3428. IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
  3429. rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
  3430. IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
  3431. &mem, false);
  3432. if (rc) {
  3433. IPAERR("fail generate empty v4 rt img\n");
  3434. return rc;
  3435. }
  3436. /*
  3437. * SRAM memory not allocated to hash tables. Initializing/Sending
  3438. * command to hash tables(filer/routing) operation not supported.
  3439. */
  3440. if (ipa3_ctx->ipa_fltrt_not_hashable) {
  3441. v4_cmd.hash_rules_addr = 0;
  3442. v4_cmd.hash_rules_size = 0;
  3443. v4_cmd.hash_local_addr = 0;
  3444. } else {
  3445. v4_cmd.hash_rules_addr = mem.phys_base;
  3446. v4_cmd.hash_rules_size = mem.size;
  3447. v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
  3448. IPA_MEM_PART(v4_rt_hash_ofst);
  3449. }
  3450. v4_cmd.nhash_rules_addr = mem.phys_base;
  3451. v4_cmd.nhash_rules_size = mem.size;
  3452. v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
  3453. IPA_MEM_PART(v4_rt_nhash_ofst);
  3454. IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
  3455. v4_cmd.hash_local_addr);
  3456. IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
  3457. v4_cmd.nhash_local_addr);
  3458. cmd_pyld = ipahal_construct_imm_cmd(
  3459. IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
  3460. if (!cmd_pyld) {
  3461. IPAERR("fail construct ip_v4_rt_init imm cmd\n");
  3462. rc = -EPERM;
  3463. goto free_mem;
  3464. }
  3465. ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
  3466. IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
  3467. if (ipa3_send_cmd(1, &desc)) {
  3468. IPAERR("fail to send immediate command\n");
  3469. rc = -EFAULT;
  3470. }
  3471. ipahal_destroy_imm_cmd(cmd_pyld);
  3472. free_mem:
  3473. ipahal_free_dma_mem(&mem);
  3474. return rc;
  3475. }
  3476. /**
  3477. * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
  3478. *
  3479. * Return codes: 0 for success, negative value for failure
  3480. */
  3481. int _ipa_init_rt6_v3(void)
  3482. {
  3483. struct ipa3_desc desc;
  3484. struct ipa_mem_buffer mem;
  3485. struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
  3486. struct ipahal_imm_cmd_pyld *cmd_pyld;
  3487. int i;
  3488. int rc = 0;
  3489. for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
  3490. i <= IPA_MEM_PART(v6_modem_rt_index_hi);
  3491. i++)
  3492. ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
  3493. IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
  3494. rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
  3495. IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
  3496. &mem, false);
  3497. if (rc) {
  3498. IPAERR("fail generate empty v6 rt img\n");
  3499. return rc;
  3500. }
  3501. /*
  3502. * SRAM memory not allocated to hash tables. Initializing/Sending
  3503. * command to hash tables(filer/routing) operation not supported.
  3504. */
  3505. if (ipa3_ctx->ipa_fltrt_not_hashable) {
  3506. v6_cmd.hash_rules_addr = 0;
  3507. v6_cmd.hash_rules_size = 0;
  3508. v6_cmd.hash_local_addr = 0;
  3509. } else {
  3510. v6_cmd.hash_rules_addr = mem.phys_base;
  3511. v6_cmd.hash_rules_size = mem.size;
  3512. v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
  3513. IPA_MEM_PART(v6_rt_hash_ofst);
  3514. }
  3515. v6_cmd.nhash_rules_addr = mem.phys_base;
  3516. v6_cmd.nhash_rules_size = mem.size;
  3517. v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
  3518. IPA_MEM_PART(v6_rt_nhash_ofst);
  3519. IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
  3520. v6_cmd.hash_local_addr);
  3521. IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
  3522. v6_cmd.nhash_local_addr);
  3523. cmd_pyld = ipahal_construct_imm_cmd(
  3524. IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
  3525. if (!cmd_pyld) {
  3526. IPAERR("fail construct ip_v6_rt_init imm cmd\n");
  3527. rc = -EPERM;
  3528. goto free_mem;
  3529. }
  3530. ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
  3531. IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
  3532. if (ipa3_send_cmd(1, &desc)) {
  3533. IPAERR("fail to send immediate command\n");
  3534. rc = -EFAULT;
  3535. }
  3536. ipahal_destroy_imm_cmd(cmd_pyld);
  3537. free_mem:
  3538. ipahal_free_dma_mem(&mem);
  3539. return rc;
  3540. }
  3541. /**
  3542. * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
  3543. *
  3544. * Return codes: 0 for success, negative value for failure
  3545. */
  3546. int _ipa_init_flt4_v3(void)
  3547. {
  3548. struct ipa3_desc desc;
  3549. struct ipa_mem_buffer mem;
  3550. struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
  3551. struct ipahal_imm_cmd_pyld *cmd_pyld;
  3552. int rc;
  3553. rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
  3554. IPA_MEM_PART(v4_flt_hash_size),
  3555. IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
  3556. &mem, false);
  3557. if (rc) {
  3558. IPAERR("fail generate empty v4 flt img\n");
  3559. return rc;
  3560. }
  3561. /*
  3562. * SRAM memory not allocated to hash tables. Initializing/Sending
  3563. * command to hash tables(filer/routing) operation not supported.
  3564. */
  3565. if (ipa3_ctx->ipa_fltrt_not_hashable) {
  3566. v4_cmd.hash_rules_addr = 0;
  3567. v4_cmd.hash_rules_size = 0;
  3568. v4_cmd.hash_local_addr = 0;
  3569. } else {
  3570. v4_cmd.hash_rules_addr = mem.phys_base;
  3571. v4_cmd.hash_rules_size = mem.size;
  3572. v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
  3573. IPA_MEM_PART(v4_flt_hash_ofst);
  3574. }
  3575. v4_cmd.nhash_rules_addr = mem.phys_base;
  3576. v4_cmd.nhash_rules_size = mem.size;
  3577. v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
  3578. IPA_MEM_PART(v4_flt_nhash_ofst);
  3579. IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
  3580. v4_cmd.hash_local_addr);
  3581. IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
  3582. v4_cmd.nhash_local_addr);
  3583. cmd_pyld = ipahal_construct_imm_cmd(
  3584. IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
  3585. if (!cmd_pyld) {
  3586. IPAERR("fail construct ip_v4_flt_init imm cmd\n");
  3587. rc = -EPERM;
  3588. goto free_mem;
  3589. }
  3590. ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
  3591. IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
  3592. if (ipa3_send_cmd(1, &desc)) {
  3593. IPAERR("fail to send immediate command\n");
  3594. rc = -EFAULT;
  3595. }
  3596. ipahal_destroy_imm_cmd(cmd_pyld);
  3597. free_mem:
  3598. ipahal_free_dma_mem(&mem);
  3599. return rc;
  3600. }
  3601. /**
  3602. * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
  3603. *
  3604. * Return codes: 0 for success, negative value for failure
  3605. */
  3606. int _ipa_init_flt6_v3(void)
  3607. {
  3608. struct ipa3_desc desc;
  3609. struct ipa_mem_buffer mem;
  3610. struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
  3611. struct ipahal_imm_cmd_pyld *cmd_pyld;
  3612. int rc;
  3613. rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
  3614. IPA_MEM_PART(v6_flt_hash_size),
  3615. IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
  3616. &mem, false);
  3617. if (rc) {
  3618. IPAERR("fail generate empty v6 flt img\n");
  3619. return rc;
  3620. }
  3621. /*
  3622. * SRAM memory not allocated to hash tables. Initializing/Sending
  3623. * command to hash tables(filer/routing) operation not supported.
  3624. */
  3625. if (ipa3_ctx->ipa_fltrt_not_hashable) {
  3626. v6_cmd.hash_rules_addr = 0;
  3627. v6_cmd.hash_rules_size = 0;
  3628. v6_cmd.hash_local_addr = 0;
  3629. } else {
  3630. v6_cmd.hash_rules_addr = mem.phys_base;
  3631. v6_cmd.hash_rules_size = mem.size;
  3632. v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
  3633. IPA_MEM_PART(v6_flt_hash_ofst);
  3634. }
  3635. v6_cmd.nhash_rules_addr = mem.phys_base;
  3636. v6_cmd.nhash_rules_size = mem.size;
  3637. v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
  3638. IPA_MEM_PART(v6_flt_nhash_ofst);
  3639. IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
  3640. v6_cmd.hash_local_addr);
  3641. IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
  3642. v6_cmd.nhash_local_addr);
  3643. cmd_pyld = ipahal_construct_imm_cmd(
  3644. IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
  3645. if (!cmd_pyld) {
  3646. IPAERR("fail construct ip_v6_flt_init imm cmd\n");
  3647. rc = -EPERM;
  3648. goto free_mem;
  3649. }
  3650. ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
  3651. IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
  3652. if (ipa3_send_cmd(1, &desc)) {
  3653. IPAERR("fail to send immediate command\n");
  3654. rc = -EFAULT;
  3655. }
  3656. ipahal_destroy_imm_cmd(cmd_pyld);
  3657. free_mem:
  3658. ipahal_free_dma_mem(&mem);
  3659. return rc;
  3660. }
  3661. static int ipa3_setup_flt_hash_tuple(void)
  3662. {
  3663. int pipe_idx;
  3664. struct ipahal_reg_hash_tuple tuple;
  3665. memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
  3666. for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
  3667. if (!ipa_is_ep_support_flt(pipe_idx))
  3668. continue;
  3669. if (ipa_is_modem_pipe(pipe_idx))
  3670. continue;
  3671. if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
  3672. IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
  3673. return -EFAULT;
  3674. }
  3675. }
  3676. return 0;
  3677. }
  3678. static int ipa3_setup_rt_hash_tuple(void)
  3679. {
  3680. int tbl_idx;
  3681. struct ipahal_reg_hash_tuple tuple;
  3682. memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
  3683. for (tbl_idx = 0;
  3684. tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
  3685. IPA_MEM_PART(v4_rt_num_index));
  3686. tbl_idx++) {
  3687. if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
  3688. tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
  3689. continue;
  3690. if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
  3691. tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
  3692. continue;
  3693. if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
  3694. IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
  3695. return -EFAULT;
  3696. }
  3697. }
  3698. return 0;
  3699. }
  3700. static int ipa3_setup_apps_pipes(void)
  3701. {
  3702. struct ipa_sys_connect_params sys_in;
  3703. int result = 0;
  3704. if (ipa3_ctx->gsi_ch20_wa) {
  3705. IPADBG("Allocating GSI physical channel 20\n");
  3706. result = ipa_gsi_ch20_wa();
  3707. if (result) {
  3708. IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
  3709. goto fail_ch20_wa;
  3710. }
  3711. }
  3712. /* allocate the common PROD event ring */
  3713. if (ipa3_alloc_common_event_ring()) {
  3714. IPAERR("ipa3_alloc_common_event_ring failed.\n");
  3715. result = -EPERM;
  3716. goto fail_ch20_wa;
  3717. }
  3718. /* CMD OUT (AP->IPA) */
  3719. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  3720. sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
  3721. sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
  3722. sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
  3723. sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
  3724. if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
  3725. IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
  3726. result = -EPERM;
  3727. goto fail_ch20_wa;
  3728. }
  3729. IPADBG("Apps to IPA cmd pipe is connected\n");
  3730. IPADBG("Will initialize SRAM\n");
  3731. ipa3_ctx->ctrl->ipa_init_sram();
  3732. IPADBG("SRAM initialized\n");
  3733. IPADBG("Will initialize HDR\n");
  3734. ipa3_ctx->ctrl->ipa_init_hdr();
  3735. IPADBG("HDR initialized\n");
  3736. IPADBG("Will initialize V4 RT\n");
  3737. ipa3_ctx->ctrl->ipa_init_rt4();
  3738. IPADBG("V4 RT initialized\n");
  3739. IPADBG("Will initialize V6 RT\n");
  3740. ipa3_ctx->ctrl->ipa_init_rt6();
  3741. IPADBG("V6 RT initialized\n");
  3742. IPADBG("Will initialize V4 FLT\n");
  3743. ipa3_ctx->ctrl->ipa_init_flt4();
  3744. IPADBG("V4 FLT initialized\n");
  3745. IPADBG("Will initialize V6 FLT\n");
  3746. ipa3_ctx->ctrl->ipa_init_flt6();
  3747. IPADBG("V6 FLT initialized\n");
  3748. if (!ipa3_ctx->ipa_fltrt_not_hashable) {
  3749. if (ipa3_setup_flt_hash_tuple()) {
  3750. IPAERR(":fail to configure flt hash tuple\n");
  3751. result = -EPERM;
  3752. goto fail_flt_hash_tuple;
  3753. }
  3754. IPADBG("flt hash tuple is configured\n");
  3755. if (ipa3_setup_rt_hash_tuple()) {
  3756. IPAERR(":fail to configure rt hash tuple\n");
  3757. result = -EPERM;
  3758. goto fail_flt_hash_tuple;
  3759. }
  3760. IPADBG("rt hash tuple is configured\n");
  3761. }
  3762. if (ipa3_setup_exception_path()) {
  3763. IPAERR(":fail to setup excp path\n");
  3764. result = -EPERM;
  3765. goto fail_flt_hash_tuple;
  3766. }
  3767. IPADBG("Exception path was successfully set");
  3768. if (ipa3_setup_dflt_rt_tables()) {
  3769. IPAERR(":fail to setup dflt routes\n");
  3770. result = -EPERM;
  3771. goto fail_flt_hash_tuple;
  3772. }
  3773. IPADBG("default routing was set\n");
  3774. /* LAN IN (IPA->AP) */
  3775. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  3776. sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
  3777. sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
  3778. sys_in.notify = ipa3_lan_rx_cb;
  3779. sys_in.priv = NULL;
  3780. sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
  3781. sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
  3782. sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
  3783. sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
  3784. sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
  3785. sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
  3786. sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
  3787. sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
  3788. /**
  3789. * ipa_lan_rx_cb() intended to notify the source EP about packet
  3790. * being received on the LAN_CONS via calling the source EP call-back.
  3791. * There could be a race condition with calling this call-back. Other
  3792. * thread may nullify it - e.g. on EP disconnect.
  3793. * This lock intended to protect the access to the source EP call-back
  3794. */
  3795. spin_lock_init(&ipa3_ctx->disconnect_lock);
  3796. if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
  3797. IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
  3798. result = -EPERM;
  3799. goto fail_flt_hash_tuple;
  3800. }
  3801. /* LAN OUT (AP->IPA) */
  3802. if (!ipa3_ctx->ipa_config_is_mhi) {
  3803. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  3804. sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
  3805. sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
  3806. sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
  3807. if (ipa3_setup_sys_pipe(&sys_in,
  3808. &ipa3_ctx->clnt_hdl_data_out)) {
  3809. IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
  3810. result = -EPERM;
  3811. goto fail_lan_data_out;
  3812. }
  3813. }
  3814. return 0;
  3815. fail_lan_data_out:
  3816. ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
  3817. fail_flt_hash_tuple:
  3818. if (ipa3_ctx->dflt_v6_rt_rule_hdl)
  3819. __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
  3820. if (ipa3_ctx->dflt_v4_rt_rule_hdl)
  3821. __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
  3822. if (ipa3_ctx->excp_hdr_hdl)
  3823. __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
  3824. ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
  3825. fail_ch20_wa:
  3826. return result;
  3827. }
  3828. static void ipa3_teardown_apps_pipes(void)
  3829. {
  3830. if (!ipa3_ctx->ipa_config_is_mhi)
  3831. ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
  3832. ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
  3833. __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
  3834. __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
  3835. __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
  3836. ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
  3837. }
  3838. #ifdef CONFIG_COMPAT
  3839. static long compat_ipa3_nat_ipv6ct_alloc_table(unsigned long arg,
  3840. int (alloc_func)(struct ipa_ioc_nat_ipv6ct_table_alloc *))
  3841. {
  3842. long retval;
  3843. struct ipa_ioc_nat_ipv6ct_table_alloc32 table_alloc32;
  3844. struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
  3845. retval = copy_from_user(&table_alloc32, (const void __user *)arg,
  3846. sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
  3847. if (retval)
  3848. return retval;
  3849. table_alloc.size = (size_t)table_alloc32.size;
  3850. table_alloc.offset = (off_t)table_alloc32.offset;
  3851. retval = alloc_func(&table_alloc);
  3852. if (retval)
  3853. return retval;
  3854. if (table_alloc.offset) {
  3855. table_alloc32.offset = (compat_off_t)table_alloc.offset;
  3856. retval = copy_to_user((void __user *)arg, &table_alloc32,
  3857. sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
  3858. }
  3859. return retval;
  3860. }
  3861. long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  3862. {
  3863. long retval = 0;
  3864. struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
  3865. struct ipa_ioc_nat_alloc_mem nat_mem;
  3866. switch (cmd) {
  3867. case IPA_IOC_ADD_HDR32:
  3868. cmd = IPA_IOC_ADD_HDR;
  3869. break;
  3870. case IPA_IOC_DEL_HDR32:
  3871. cmd = IPA_IOC_DEL_HDR;
  3872. break;
  3873. case IPA_IOC_ADD_RT_RULE32:
  3874. cmd = IPA_IOC_ADD_RT_RULE;
  3875. break;
  3876. case IPA_IOC_DEL_RT_RULE32:
  3877. cmd = IPA_IOC_DEL_RT_RULE;
  3878. break;
  3879. case IPA_IOC_ADD_FLT_RULE32:
  3880. cmd = IPA_IOC_ADD_FLT_RULE;
  3881. break;
  3882. case IPA_IOC_DEL_FLT_RULE32:
  3883. cmd = IPA_IOC_DEL_FLT_RULE;
  3884. break;
  3885. case IPA_IOC_GET_RT_TBL32:
  3886. cmd = IPA_IOC_GET_RT_TBL;
  3887. break;
  3888. case IPA_IOC_COPY_HDR32:
  3889. cmd = IPA_IOC_COPY_HDR;
  3890. break;
  3891. case IPA_IOC_QUERY_INTF32:
  3892. cmd = IPA_IOC_QUERY_INTF;
  3893. break;
  3894. case IPA_IOC_QUERY_INTF_TX_PROPS32:
  3895. cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
  3896. break;
  3897. case IPA_IOC_QUERY_INTF_RX_PROPS32:
  3898. cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
  3899. break;
  3900. case IPA_IOC_QUERY_INTF_EXT_PROPS32:
  3901. cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
  3902. break;
  3903. case IPA_IOC_GET_HDR32:
  3904. cmd = IPA_IOC_GET_HDR;
  3905. break;
  3906. case IPA_IOC_ALLOC_NAT_MEM32:
  3907. retval = copy_from_user(&nat_mem32, (const void __user *)arg,
  3908. sizeof(struct ipa3_ioc_nat_alloc_mem32));
  3909. if (retval)
  3910. return retval;
  3911. memcpy(nat_mem.dev_name, nat_mem32.dev_name,
  3912. IPA_RESOURCE_NAME_MAX);
  3913. nat_mem.size = (size_t)nat_mem32.size;
  3914. nat_mem.offset = (off_t)nat_mem32.offset;
  3915. /* null terminate the string */
  3916. nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
  3917. retval = ipa3_allocate_nat_device(&nat_mem);
  3918. if (retval)
  3919. return retval;
  3920. nat_mem32.offset = (compat_off_t)nat_mem.offset;
  3921. retval = copy_to_user((void __user *)arg, &nat_mem32,
  3922. sizeof(struct ipa3_ioc_nat_alloc_mem32));
  3923. return retval;
  3924. case IPA_IOC_ALLOC_NAT_TABLE32:
  3925. return compat_ipa3_nat_ipv6ct_alloc_table(arg,
  3926. ipa3_allocate_nat_table);
  3927. case IPA_IOC_ALLOC_IPV6CT_TABLE32:
  3928. return compat_ipa3_nat_ipv6ct_alloc_table(arg,
  3929. ipa3_allocate_ipv6ct_table);
  3930. case IPA_IOC_V4_INIT_NAT32:
  3931. cmd = IPA_IOC_V4_INIT_NAT;
  3932. break;
  3933. case IPA_IOC_INIT_IPV6CT_TABLE32:
  3934. cmd = IPA_IOC_INIT_IPV6CT_TABLE;
  3935. break;
  3936. case IPA_IOC_TABLE_DMA_CMD32:
  3937. cmd = IPA_IOC_TABLE_DMA_CMD;
  3938. break;
  3939. case IPA_IOC_V4_DEL_NAT32:
  3940. cmd = IPA_IOC_V4_DEL_NAT;
  3941. break;
  3942. case IPA_IOC_DEL_NAT_TABLE32:
  3943. cmd = IPA_IOC_DEL_NAT_TABLE;
  3944. break;
  3945. case IPA_IOC_DEL_IPV6CT_TABLE32:
  3946. cmd = IPA_IOC_DEL_IPV6CT_TABLE;
  3947. break;
  3948. case IPA_IOC_NAT_MODIFY_PDN32:
  3949. cmd = IPA_IOC_NAT_MODIFY_PDN;
  3950. break;
  3951. case IPA_IOC_GET_NAT_OFFSET32:
  3952. cmd = IPA_IOC_GET_NAT_OFFSET;
  3953. break;
  3954. case IPA_IOC_PULL_MSG32:
  3955. cmd = IPA_IOC_PULL_MSG;
  3956. break;
  3957. case IPA_IOC_RM_ADD_DEPENDENCY32:
  3958. cmd = IPA_IOC_RM_ADD_DEPENDENCY;
  3959. break;
  3960. case IPA_IOC_RM_DEL_DEPENDENCY32:
  3961. cmd = IPA_IOC_RM_DEL_DEPENDENCY;
  3962. break;
  3963. case IPA_IOC_GENERATE_FLT_EQ32:
  3964. cmd = IPA_IOC_GENERATE_FLT_EQ;
  3965. break;
  3966. case IPA_IOC_QUERY_RT_TBL_INDEX32:
  3967. cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
  3968. break;
  3969. case IPA_IOC_WRITE_QMAPID32:
  3970. cmd = IPA_IOC_WRITE_QMAPID;
  3971. break;
  3972. case IPA_IOC_MDFY_FLT_RULE32:
  3973. cmd = IPA_IOC_MDFY_FLT_RULE;
  3974. break;
  3975. case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
  3976. cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
  3977. break;
  3978. case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
  3979. cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
  3980. break;
  3981. case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
  3982. cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
  3983. break;
  3984. case IPA_IOC_MDFY_RT_RULE32:
  3985. cmd = IPA_IOC_MDFY_RT_RULE;
  3986. break;
  3987. case IPA_IOC_COMMIT_HDR:
  3988. case IPA_IOC_RESET_HDR:
  3989. case IPA_IOC_COMMIT_RT:
  3990. case IPA_IOC_RESET_RT:
  3991. case IPA_IOC_COMMIT_FLT:
  3992. case IPA_IOC_RESET_FLT:
  3993. case IPA_IOC_DUMP:
  3994. case IPA_IOC_PUT_RT_TBL:
  3995. case IPA_IOC_PUT_HDR:
  3996. case IPA_IOC_SET_FLT:
  3997. case IPA_IOC_QUERY_EP_MAPPING:
  3998. break;
  3999. default:
  4000. return -ENOIOCTLCMD;
  4001. }
  4002. return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
  4003. }
  4004. #endif
  4005. static ssize_t ipa3_write(struct file *file, const char __user *buf,
  4006. size_t count, loff_t *ppos);
  4007. static const struct file_operations ipa3_drv_fops = {
  4008. .owner = THIS_MODULE,
  4009. .open = ipa3_open,
  4010. .read = ipa3_read,
  4011. .write = ipa3_write,
  4012. .unlocked_ioctl = ipa3_ioctl,
  4013. #ifdef CONFIG_COMPAT
  4014. .compat_ioctl = compat_ipa3_ioctl,
  4015. #endif
  4016. };
  4017. static int ipa3_get_clks(struct device *dev)
  4018. {
  4019. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
  4020. IPADBG("not supported in this HW mode\n");
  4021. ipa3_clk = NULL;
  4022. return 0;
  4023. }
  4024. if (ipa3_res.use_bw_vote) {
  4025. IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
  4026. ipa3_clk = NULL;
  4027. return 0;
  4028. }
  4029. ipa3_clk = clk_get(dev, "core_clk");
  4030. if (IS_ERR(ipa3_clk)) {
  4031. if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
  4032. IPAERR("fail to get ipa clk\n");
  4033. return PTR_ERR(ipa3_clk);
  4034. }
  4035. return 0;
  4036. }
  4037. /**
  4038. * _ipa_enable_clks_v3_0() - Enable IPA clocks.
  4039. */
  4040. void _ipa_enable_clks_v3_0(void)
  4041. {
  4042. IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
  4043. if (ipa3_clk) {
  4044. IPADBG_LOW("enabling gcc_ipa_clk\n");
  4045. clk_prepare(ipa3_clk);
  4046. clk_enable(ipa3_clk);
  4047. clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
  4048. }
  4049. ipa3_uc_notify_clk_state(true);
  4050. }
  4051. static unsigned int ipa3_get_bus_vote(void)
  4052. {
  4053. unsigned int idx = 1;
  4054. if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs2) {
  4055. idx = 1;
  4056. } else if (ipa3_ctx->curr_ipa_clk_rate ==
  4057. ipa3_ctx->ctrl->ipa_clk_rate_svs) {
  4058. idx = 2;
  4059. } else if (ipa3_ctx->curr_ipa_clk_rate ==
  4060. ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
  4061. idx = 3;
  4062. } else if (ipa3_ctx->curr_ipa_clk_rate ==
  4063. ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
  4064. idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
  4065. } else {
  4066. WARN(1, "unexpected clock rate");
  4067. }
  4068. IPADBG_LOW("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
  4069. return idx;
  4070. }
  4071. /**
  4072. * ipa3_enable_clks() - Turn on IPA clocks
  4073. *
  4074. * Return codes:
  4075. * None
  4076. */
  4077. void ipa3_enable_clks(void)
  4078. {
  4079. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
  4080. IPAERR("not supported in this mode\n");
  4081. return;
  4082. }
  4083. IPADBG("enabling IPA clocks and bus voting\n");
  4084. if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
  4085. ipa3_get_bus_vote()))
  4086. WARN(1, "bus scaling failed");
  4087. ipa3_ctx->ctrl->ipa3_enable_clks();
  4088. atomic_set(&ipa3_ctx->ipa_clk_vote, 1);
  4089. }
  4090. /**
  4091. * _ipa_disable_clks_v3_0() - Disable IPA clocks.
  4092. */
  4093. void _ipa_disable_clks_v3_0(void)
  4094. {
  4095. ipa3_uc_notify_clk_state(false);
  4096. if (ipa3_clk) {
  4097. IPADBG_LOW("disabling gcc_ipa_clk\n");
  4098. clk_disable_unprepare(ipa3_clk);
  4099. }
  4100. }
  4101. /**
  4102. * ipa3_disable_clks() - Turn off IPA clocks
  4103. *
  4104. * Return codes:
  4105. * None
  4106. */
  4107. void ipa3_disable_clks(void)
  4108. {
  4109. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
  4110. IPAERR("not supported in this mode\n");
  4111. return;
  4112. }
  4113. IPADBG("disabling IPA clocks and bus voting\n");
  4114. ipa3_ctx->ctrl->ipa3_disable_clks();
  4115. ipa_pm_set_clock_index(0);
  4116. if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
  4117. WARN(1, "bus scaling failed");
  4118. atomic_set(&ipa3_ctx->ipa_clk_vote, 0);
  4119. }
  4120. /**
  4121. * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
  4122. *
  4123. * This function is called prior to clock gating when active client counter
  4124. * is 1. TAG process ensures that there are no packets inside IPA HW that
  4125. * were not submitted to the IPA client via the transport. During TAG process
  4126. * all aggregation frames are (force) closed.
  4127. *
  4128. * Return codes:
  4129. * None
  4130. */
  4131. static void ipa3_start_tag_process(struct work_struct *work)
  4132. {
  4133. int res;
  4134. IPADBG("starting TAG process\n");
  4135. /* close aggregation frames on all pipes */
  4136. res = ipa3_tag_aggr_force_close(-1);
  4137. if (res)
  4138. IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
  4139. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
  4140. IPADBG("TAG process done\n");
  4141. }
  4142. /**
  4143. * ipa3_active_clients_log_mod() - Log a modification in the active clients
  4144. * reference count
  4145. *
  4146. * This method logs any modification in the active clients reference count:
  4147. * It logs the modification in the circular history buffer
  4148. * It logs the modification in the hash table - looking for an entry,
  4149. * creating one if needed and deleting one if needed.
  4150. *
  4151. * @id: ipa3_active client logging info struct to hold the log information
  4152. * @inc: a boolean variable to indicate whether the modification is an increase
  4153. * or decrease
  4154. * @int_ctx: a boolean variable to indicate whether this call is being made from
  4155. * an interrupt context and therefore should allocate GFP_ATOMIC memory
  4156. *
  4157. * Method process:
  4158. * - Hash the unique identifier string
  4159. * - Find the hash in the table
  4160. * 1)If found, increase or decrease the reference count
  4161. * 2)If not found, allocate a new hash table entry struct and initialize it
  4162. * - Remove and deallocate unneeded data structure
  4163. * - Log the call in the circular history buffer (unless it is a simple call)
  4164. */
  4165. #ifdef CONFIG_IPA_DEBUG
  4166. static void ipa3_active_clients_log_mod(
  4167. struct ipa_active_client_logging_info *id,
  4168. bool inc, bool int_ctx)
  4169. {
  4170. char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
  4171. unsigned long long t;
  4172. unsigned long nanosec_rem;
  4173. struct ipa3_active_client_htable_entry *hentry;
  4174. struct ipa3_active_client_htable_entry *hfound;
  4175. u32 hkey;
  4176. char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
  4177. unsigned long flags;
  4178. spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
  4179. int_ctx = true;
  4180. hfound = NULL;
  4181. memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
  4182. strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
  4183. hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
  4184. 0);
  4185. hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
  4186. hentry, list, hkey) {
  4187. if (!strcmp(hentry->id_string, id->id_string)) {
  4188. hentry->count = hentry->count + (inc ? 1 : -1);
  4189. hfound = hentry;
  4190. }
  4191. }
  4192. if (hfound == NULL) {
  4193. hentry = NULL;
  4194. hentry = kzalloc(sizeof(
  4195. struct ipa3_active_client_htable_entry),
  4196. int_ctx ? GFP_ATOMIC : GFP_KERNEL);
  4197. if (hentry == NULL) {
  4198. spin_unlock_irqrestore(
  4199. &ipa3_ctx->ipa3_active_clients_logging.lock,
  4200. flags);
  4201. return;
  4202. }
  4203. hentry->type = id->type;
  4204. strlcpy(hentry->id_string, id->id_string,
  4205. IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
  4206. INIT_HLIST_NODE(&hentry->list);
  4207. hentry->count = inc ? 1 : -1;
  4208. hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
  4209. &hentry->list, hkey);
  4210. } else if (hfound->count == 0) {
  4211. hash_del(&hfound->list);
  4212. kfree(hfound);
  4213. }
  4214. if (id->type != SIMPLE) {
  4215. t = local_clock();
  4216. nanosec_rem = do_div(t, 1000000000) / 1000;
  4217. snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
  4218. inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
  4219. "[%5lu.%06lu] v %s, %s: %d",
  4220. (unsigned long)t, nanosec_rem,
  4221. id->id_string, id->file, id->line);
  4222. ipa3_active_clients_log_insert(temp_str);
  4223. }
  4224. spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
  4225. flags);
  4226. }
  4227. #else
  4228. static void ipa3_active_clients_log_mod(
  4229. struct ipa_active_client_logging_info *id,
  4230. bool inc, bool int_ctx)
  4231. {
  4232. }
  4233. #endif
  4234. void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
  4235. bool int_ctx)
  4236. {
  4237. ipa3_active_clients_log_mod(id, false, int_ctx);
  4238. }
  4239. void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
  4240. bool int_ctx)
  4241. {
  4242. ipa3_active_clients_log_mod(id, true, int_ctx);
  4243. }
  4244. /**
  4245. * ipa3_inc_client_enable_clks() - Increase active clients counter, and
  4246. * enable ipa clocks if necessary
  4247. *
  4248. * Return codes:
  4249. * None
  4250. */
  4251. void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
  4252. {
  4253. int ret;
  4254. ipa3_active_clients_log_inc(id, false);
  4255. ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
  4256. if (ret) {
  4257. IPADBG_LOW("active clients = %d\n",
  4258. atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
  4259. return;
  4260. }
  4261. mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
  4262. /* somebody might voted to clocks meanwhile */
  4263. ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
  4264. if (ret) {
  4265. mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
  4266. IPADBG_LOW("active clients = %d\n",
  4267. atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
  4268. return;
  4269. }
  4270. ipa3_enable_clks();
  4271. ipa3_suspend_apps_pipes(false);
  4272. atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
  4273. IPADBG_LOW("active clients = %d\n",
  4274. atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
  4275. mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
  4276. }
  4277. /**
  4278. * ipa3_active_clks_status() - update the current msm bus clock vote
  4279. * status
  4280. */
  4281. int ipa3_active_clks_status(void)
  4282. {
  4283. return atomic_read(&ipa3_ctx->ipa_clk_vote);
  4284. }
  4285. /**
  4286. * ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
  4287. * clients if no asynchronous actions should be done. Asynchronous actions are
  4288. * locking a mutex and waking up IPA HW.
  4289. *
  4290. * Return codes: 0 for success
  4291. * -EPERM if an asynchronous action should have been done
  4292. */
  4293. int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
  4294. *id)
  4295. {
  4296. int ret;
  4297. ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
  4298. if (ret) {
  4299. ipa3_active_clients_log_inc(id, true);
  4300. IPADBG_LOW("active clients = %d\n",
  4301. atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
  4302. return 0;
  4303. }
  4304. return -EPERM;
  4305. }
  4306. static void __ipa3_dec_client_disable_clks(void)
  4307. {
  4308. int ret;
  4309. if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
  4310. IPAERR("trying to disable clocks with refcnt is 0\n");
  4311. ipa_assert();
  4312. return;
  4313. }
  4314. ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
  4315. if (ret)
  4316. goto bail;
  4317. /* seems like this is the only client holding the clocks */
  4318. mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
  4319. if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
  4320. ipa3_ctx->tag_process_before_gating) {
  4321. ipa3_ctx->tag_process_before_gating = false;
  4322. /*
  4323. * When TAG process ends, active clients will be
  4324. * decreased
  4325. */
  4326. queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
  4327. goto unlock_mutex;
  4328. }
  4329. /* a different context might increase the clock reference meanwhile */
  4330. ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
  4331. if (ret > 0)
  4332. goto unlock_mutex;
  4333. ipa3_suspend_apps_pipes(true);
  4334. ipa3_disable_clks();
  4335. unlock_mutex:
  4336. mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
  4337. bail:
  4338. IPADBG_LOW("active clients = %d\n",
  4339. atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
  4340. }
  4341. /**
  4342. * ipa3_dec_client_disable_clks() - Decrease active clients counter
  4343. *
  4344. * In case that there are no active clients this function also starts
  4345. * TAG process. When TAG progress ends ipa clocks will be gated.
  4346. * start_tag_process_again flag is set during this function to signal TAG
  4347. * process to start again as there was another client that may send data to ipa
  4348. *
  4349. * Return codes:
  4350. * None
  4351. */
  4352. void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
  4353. {
  4354. ipa3_active_clients_log_dec(id, false);
  4355. __ipa3_dec_client_disable_clks();
  4356. }
  4357. static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
  4358. {
  4359. __ipa3_dec_client_disable_clks();
  4360. }
  4361. /**
  4362. * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
  4363. * if possible without blocking. If this is the last client then the desrease
  4364. * will happen from work queue context.
  4365. *
  4366. * Return codes:
  4367. * None
  4368. */
  4369. void ipa3_dec_client_disable_clks_no_block(
  4370. struct ipa_active_client_logging_info *id)
  4371. {
  4372. int ret;
  4373. ipa3_active_clients_log_dec(id, true);
  4374. ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
  4375. if (ret) {
  4376. IPADBG_LOW("active clients = %d\n",
  4377. atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
  4378. return;
  4379. }
  4380. /* seems like this is the only client holding the clocks */
  4381. queue_work(ipa3_ctx->power_mgmt_wq,
  4382. &ipa_dec_clients_disable_clks_on_wq_work);
  4383. }
  4384. /**
  4385. * ipa3_inc_acquire_wakelock() - Increase active clients counter, and
  4386. * acquire wakelock if necessary
  4387. *
  4388. * Return codes:
  4389. * None
  4390. */
  4391. void ipa3_inc_acquire_wakelock(void)
  4392. {
  4393. unsigned long flags;
  4394. spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
  4395. ipa3_ctx->wakelock_ref_cnt.cnt++;
  4396. if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
  4397. __pm_stay_awake(&ipa3_ctx->w_lock);
  4398. IPADBG_LOW("active wakelock ref cnt = %d\n",
  4399. ipa3_ctx->wakelock_ref_cnt.cnt);
  4400. spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
  4401. }
  4402. /**
  4403. * ipa3_dec_release_wakelock() - Decrease active clients counter
  4404. *
  4405. * In case if the ref count is 0, release the wakelock.
  4406. *
  4407. * Return codes:
  4408. * None
  4409. */
  4410. void ipa3_dec_release_wakelock(void)
  4411. {
  4412. unsigned long flags;
  4413. spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
  4414. ipa3_ctx->wakelock_ref_cnt.cnt--;
  4415. IPADBG_LOW("active wakelock ref cnt = %d\n",
  4416. ipa3_ctx->wakelock_ref_cnt.cnt);
  4417. if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
  4418. __pm_relax(&ipa3_ctx->w_lock);
  4419. spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
  4420. }
  4421. int ipa3_set_clock_plan_from_pm(int idx)
  4422. {
  4423. u32 clk_rate;
  4424. IPADBG_LOW("idx = %d\n", idx);
  4425. if (!ipa3_ctx->enable_clock_scaling) {
  4426. ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
  4427. return 0;
  4428. }
  4429. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
  4430. IPAERR("not supported in this mode\n");
  4431. return 0;
  4432. }
  4433. if (idx <= 0 || idx >= ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases) {
  4434. IPAERR("bad voltage\n");
  4435. return -EINVAL;
  4436. }
  4437. if (idx == 1)
  4438. clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
  4439. else if (idx == 2)
  4440. clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
  4441. else if (idx == 3)
  4442. clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
  4443. else if (idx == 4)
  4444. clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
  4445. else {
  4446. IPAERR("bad voltage\n");
  4447. WARN_ON(1);
  4448. return -EFAULT;
  4449. }
  4450. if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
  4451. IPADBG_LOW("Same voltage\n");
  4452. return 0;
  4453. }
  4454. mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
  4455. ipa3_ctx->curr_ipa_clk_rate = clk_rate;
  4456. ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
  4457. IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
  4458. if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
  4459. if (ipa3_clk)
  4460. clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
  4461. if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
  4462. ipa3_get_bus_vote()))
  4463. WARN_ON(1);
  4464. } else {
  4465. IPADBG_LOW("clocks are gated, not setting rate\n");
  4466. }
  4467. mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
  4468. IPADBG_LOW("Done\n");
  4469. return 0;
  4470. }
  4471. int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
  4472. u32 bandwidth_mbps)
  4473. {
  4474. enum ipa_voltage_level needed_voltage;
  4475. u32 clk_rate;
  4476. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
  4477. IPAERR("not supported in this mode\n");
  4478. return 0;
  4479. }
  4480. IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
  4481. floor_voltage, bandwidth_mbps);
  4482. if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
  4483. floor_voltage >= IPA_VOLTAGE_MAX) {
  4484. IPAERR("bad voltage\n");
  4485. return -EINVAL;
  4486. }
  4487. if (ipa3_ctx->enable_clock_scaling) {
  4488. IPADBG_LOW("Clock scaling is enabled\n");
  4489. if (bandwidth_mbps >=
  4490. ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
  4491. needed_voltage = IPA_VOLTAGE_TURBO;
  4492. else if (bandwidth_mbps >=
  4493. ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
  4494. needed_voltage = IPA_VOLTAGE_NOMINAL;
  4495. else if (bandwidth_mbps >=
  4496. ipa3_ctx->ctrl->clock_scaling_bw_threshold_svs)
  4497. needed_voltage = IPA_VOLTAGE_SVS;
  4498. else
  4499. needed_voltage = IPA_VOLTAGE_SVS2;
  4500. } else {
  4501. IPADBG_LOW("Clock scaling is disabled\n");
  4502. needed_voltage = IPA_VOLTAGE_NOMINAL;
  4503. }
  4504. needed_voltage = max(needed_voltage, floor_voltage);
  4505. switch (needed_voltage) {
  4506. case IPA_VOLTAGE_SVS2:
  4507. clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
  4508. break;
  4509. case IPA_VOLTAGE_SVS:
  4510. clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
  4511. break;
  4512. case IPA_VOLTAGE_NOMINAL:
  4513. clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
  4514. break;
  4515. case IPA_VOLTAGE_TURBO:
  4516. clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
  4517. break;
  4518. default:
  4519. IPAERR("bad voltage\n");
  4520. WARN_ON(1);
  4521. return -EFAULT;
  4522. }
  4523. if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
  4524. IPADBG_LOW("Same voltage\n");
  4525. return 0;
  4526. }
  4527. /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
  4528. mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
  4529. ipa3_ctx->curr_ipa_clk_rate = clk_rate;
  4530. IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
  4531. if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
  4532. if (ipa3_clk)
  4533. clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
  4534. if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
  4535. ipa3_get_bus_vote()))
  4536. WARN_ON(1);
  4537. } else {
  4538. IPADBG_LOW("clocks are gated, not setting rate\n");
  4539. }
  4540. mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
  4541. IPADBG_LOW("Done\n");
  4542. return 0;
  4543. }
  4544. static void ipa3_process_irq_schedule_rel(void)
  4545. {
  4546. queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
  4547. &ipa3_transport_release_resource_work,
  4548. msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
  4549. }
  4550. /**
  4551. * ipa3_suspend_handler() - Handles the suspend interrupt:
  4552. * wakes up the suspended peripheral by requesting its consumer
  4553. * @interrupt: Interrupt type
  4554. * @private_data: The client's private data
  4555. * @interrupt_data: Interrupt specific information data
  4556. */
  4557. void ipa3_suspend_handler(enum ipa_irq_type interrupt,
  4558. void *private_data,
  4559. void *interrupt_data)
  4560. {
  4561. u32 suspend_data =
  4562. ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
  4563. u32 bmsk = 1;
  4564. u32 i = 0;
  4565. int res;
  4566. struct ipa_ep_cfg_holb holb_cfg;
  4567. u32 pipe_bitmask = 0;
  4568. IPADBG("interrupt=%d, interrupt_data=%u\n",
  4569. interrupt, suspend_data);
  4570. memset(&holb_cfg, 0, sizeof(holb_cfg));
  4571. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) {
  4572. if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
  4573. pipe_bitmask |= bmsk;
  4574. }
  4575. }
  4576. res = ipa_pm_handle_suspend(pipe_bitmask);
  4577. if (res) {
  4578. IPAERR("ipa_pm_handle_suspend failed %d\n", res);
  4579. return;
  4580. }
  4581. }
  4582. /**
  4583. * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
  4584. * as it was registered in the IPA init sequence.
  4585. * Return codes:
  4586. * 0: success
  4587. * -EPERM: failed to remove current handler or failed to add original handler
  4588. */
  4589. int ipa3_restore_suspend_handler(void)
  4590. {
  4591. int result = 0;
  4592. result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
  4593. if (result) {
  4594. IPAERR("remove handler for suspend interrupt failed\n");
  4595. return -EPERM;
  4596. }
  4597. result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
  4598. ipa3_suspend_handler, false, NULL);
  4599. if (result) {
  4600. IPAERR("register handler for suspend interrupt failed\n");
  4601. result = -EPERM;
  4602. }
  4603. IPADBG("suspend handler successfully restored\n");
  4604. return result;
  4605. }
  4606. static int ipa3_apps_cons_release_resource(void)
  4607. {
  4608. return 0;
  4609. }
  4610. static int ipa3_apps_cons_request_resource(void)
  4611. {
  4612. return 0;
  4613. }
  4614. static void ipa3_transport_release_resource(struct work_struct *work)
  4615. {
  4616. mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
  4617. /* check whether still need to decrease client usage */
  4618. if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
  4619. if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
  4620. IPADBG("EOT pending Re-scheduling\n");
  4621. ipa3_process_irq_schedule_rel();
  4622. } else {
  4623. atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
  4624. ipa3_dec_release_wakelock();
  4625. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
  4626. }
  4627. }
  4628. atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
  4629. mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
  4630. }
  4631. int ipa3_create_apps_resource(void)
  4632. {
  4633. struct ipa_rm_create_params apps_cons_create_params;
  4634. struct ipa_rm_perf_profile profile;
  4635. int result = 0;
  4636. memset(&apps_cons_create_params, 0,
  4637. sizeof(apps_cons_create_params));
  4638. apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
  4639. apps_cons_create_params.request_resource =
  4640. ipa3_apps_cons_request_resource;
  4641. apps_cons_create_params.release_resource =
  4642. ipa3_apps_cons_release_resource;
  4643. result = ipa_rm_create_resource(&apps_cons_create_params);
  4644. if (result) {
  4645. IPAERR("ipa_rm_create_resource failed\n");
  4646. return result;
  4647. }
  4648. profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
  4649. ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
  4650. return result;
  4651. }
  4652. /**
  4653. * ipa3_init_interrupts() - Register to IPA IRQs
  4654. *
  4655. * Return codes: 0 in success, negative in failure
  4656. *
  4657. */
  4658. int ipa3_init_interrupts(void)
  4659. {
  4660. int result;
  4661. /*register IPA IRQ handler*/
  4662. result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
  4663. &ipa3_ctx->master_pdev->dev);
  4664. if (result) {
  4665. IPAERR("ipa interrupts initialization failed\n");
  4666. return -ENODEV;
  4667. }
  4668. /*add handler for suspend interrupt*/
  4669. result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
  4670. ipa3_suspend_handler, false, NULL);
  4671. if (result) {
  4672. IPAERR("register handler for suspend interrupt failed\n");
  4673. result = -ENODEV;
  4674. goto fail_add_interrupt_handler;
  4675. }
  4676. return 0;
  4677. fail_add_interrupt_handler:
  4678. ipa3_interrupts_destroy(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev);
  4679. return result;
  4680. }
  4681. /**
  4682. * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
  4683. * The idr strcuture per filtering table is intended for rule id generation
  4684. * per filtering rule.
  4685. */
  4686. static void ipa3_destroy_flt_tbl_idrs(void)
  4687. {
  4688. int i;
  4689. struct ipa3_flt_tbl *flt_tbl;
  4690. idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
  4691. idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
  4692. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  4693. if (!ipa_is_ep_support_flt(i))
  4694. continue;
  4695. flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
  4696. flt_tbl->rule_ids = NULL;
  4697. flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
  4698. flt_tbl->rule_ids = NULL;
  4699. }
  4700. }
  4701. static void ipa3_freeze_clock_vote_and_notify_modem(void)
  4702. {
  4703. int res;
  4704. struct ipa_active_client_logging_info log_info;
  4705. if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
  4706. IPADBG("Ignore smp2p on APQ platform\n");
  4707. return;
  4708. }
  4709. if (ipa3_ctx->smp2p_info.res_sent)
  4710. return;
  4711. if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) {
  4712. IPAERR("fail to get smp2p clk resp bit %ld\n",
  4713. PTR_ERR(ipa3_ctx->smp2p_info.smem_state));
  4714. return;
  4715. }
  4716. IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
  4717. res = ipa3_inc_client_enable_clks_no_block(&log_info);
  4718. if (res)
  4719. ipa3_ctx->smp2p_info.ipa_clk_on = false;
  4720. else
  4721. ipa3_ctx->smp2p_info.ipa_clk_on = true;
  4722. qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state,
  4723. IPA_SMP2P_SMEM_STATE_MASK,
  4724. ((ipa3_ctx->smp2p_info.ipa_clk_on <<
  4725. IPA_SMP2P_OUT_CLK_VOTE_IDX) |
  4726. (1 << IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX)));
  4727. ipa3_ctx->smp2p_info.res_sent = true;
  4728. IPADBG("IPA clocks are %s\n",
  4729. ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
  4730. }
  4731. void ipa3_reset_freeze_vote(void)
  4732. {
  4733. if (!ipa3_ctx->smp2p_info.res_sent)
  4734. return;
  4735. if (ipa3_ctx->smp2p_info.ipa_clk_on)
  4736. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
  4737. qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state,
  4738. IPA_SMP2P_SMEM_STATE_MASK,
  4739. ((0 <<
  4740. IPA_SMP2P_OUT_CLK_VOTE_IDX) |
  4741. (0 << IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX)));
  4742. ipa3_ctx->smp2p_info.res_sent = false;
  4743. ipa3_ctx->smp2p_info.ipa_clk_on = false;
  4744. }
  4745. static int ipa3_panic_notifier(struct notifier_block *this,
  4746. unsigned long event, void *ptr)
  4747. {
  4748. int res;
  4749. ipa3_freeze_clock_vote_and_notify_modem();
  4750. IPADBG("Calling uC panic handler\n");
  4751. res = ipa3_uc_panic_notifier(this, event, ptr);
  4752. if (res)
  4753. IPAERR("uC panic handler failed %d\n", res);
  4754. if (atomic_read(&ipa3_ctx->ipa_clk_vote)) {
  4755. ipahal_print_all_regs(false);
  4756. ipa_save_registers();
  4757. ipa_wigig_save_regs();
  4758. }
  4759. return NOTIFY_DONE;
  4760. }
  4761. static struct notifier_block ipa3_panic_blk = {
  4762. .notifier_call = ipa3_panic_notifier,
  4763. /* IPA panic handler needs to run before modem shuts down */
  4764. .priority = INT_MAX,
  4765. };
  4766. static void ipa3_register_panic_hdlr(void)
  4767. {
  4768. atomic_notifier_chain_register(&panic_notifier_list,
  4769. &ipa3_panic_blk);
  4770. }
  4771. static void ipa3_trigger_ipa_ready_cbs(void)
  4772. {
  4773. struct ipa3_ready_cb_info *info;
  4774. mutex_lock(&ipa3_ctx->lock);
  4775. /* Call all the CBs */
  4776. list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
  4777. if (info->ready_cb)
  4778. info->ready_cb(info->user_data);
  4779. mutex_unlock(&ipa3_ctx->lock);
  4780. }
  4781. static void ipa3_uc_is_loaded(void)
  4782. {
  4783. IPADBG("\n");
  4784. complete_all(&ipa3_ctx->uc_loaded_completion_obj);
  4785. }
  4786. static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
  4787. {
  4788. enum gsi_ver gsi_ver;
  4789. switch (ipa_hw_type) {
  4790. case IPA_HW_v3_0:
  4791. case IPA_HW_v3_1:
  4792. gsi_ver = GSI_VER_1_0;
  4793. break;
  4794. case IPA_HW_v3_5:
  4795. gsi_ver = GSI_VER_1_2;
  4796. break;
  4797. case IPA_HW_v3_5_1:
  4798. gsi_ver = GSI_VER_1_3;
  4799. break;
  4800. case IPA_HW_v4_0:
  4801. case IPA_HW_v4_1:
  4802. gsi_ver = GSI_VER_2_0;
  4803. break;
  4804. case IPA_HW_v4_2:
  4805. gsi_ver = GSI_VER_2_2;
  4806. break;
  4807. case IPA_HW_v4_5:
  4808. gsi_ver = GSI_VER_2_5;
  4809. break;
  4810. case IPA_HW_v4_7:
  4811. gsi_ver = GSI_VER_2_7;
  4812. break;
  4813. default:
  4814. IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
  4815. WARN_ON(1);
  4816. gsi_ver = GSI_VER_ERR;
  4817. }
  4818. IPADBG("GSI version %d\n", gsi_ver);
  4819. return gsi_ver;
  4820. }
  4821. static int ipa3_gsi_pre_fw_load_init(void)
  4822. {
  4823. int result;
  4824. result = gsi_configure_regs(
  4825. ipa3_res.ipa_mem_base,
  4826. ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
  4827. if (result) {
  4828. IPAERR("Failed to configure GSI registers\n");
  4829. return -EINVAL;
  4830. }
  4831. return 0;
  4832. }
  4833. static int ipa3_alloc_gsi_channel(void)
  4834. {
  4835. const struct ipa_gsi_ep_config *gsi_ep_cfg;
  4836. enum ipa_client_type type;
  4837. int code = 0;
  4838. int ret = 0;
  4839. int i;
  4840. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  4841. type = ipa3_get_client_by_pipe(i);
  4842. gsi_ep_cfg = ipa3_get_gsi_ep_info(type);
  4843. IPADBG("for ep %d client is %d\n", i, type);
  4844. if (!gsi_ep_cfg)
  4845. continue;
  4846. ret = gsi_alloc_channel_ee(gsi_ep_cfg->ipa_gsi_chan_num,
  4847. gsi_ep_cfg->ee, &code);
  4848. if (ret == GSI_STATUS_SUCCESS) {
  4849. IPADBG("alloc gsi ch %d ee %d with code %d\n",
  4850. gsi_ep_cfg->ipa_gsi_chan_num,
  4851. gsi_ep_cfg->ee,
  4852. code);
  4853. } else {
  4854. IPAERR("failed to alloc ch %d ee %d code %d\n",
  4855. gsi_ep_cfg->ipa_gsi_chan_num,
  4856. gsi_ep_cfg->ee,
  4857. code);
  4858. return ret;
  4859. }
  4860. }
  4861. return ret;
  4862. }
  4863. /**
  4864. * ipa3_post_init() - Initialize the IPA Driver (Part II).
  4865. * This part contains all initialization which requires interaction with
  4866. * IPA HW (via GSI).
  4867. *
  4868. * @resource_p: contain platform specific values from DST file
  4869. * @pdev: The platform device structure representing the IPA driver
  4870. *
  4871. * Function initialization process:
  4872. * - Initialize endpoints bitmaps
  4873. * - Initialize resource groups min and max values
  4874. * - Initialize filtering lists heads and idr
  4875. * - Initialize interrupts
  4876. * - Register GSI
  4877. * - Setup APPS pipes
  4878. * - Initialize tethering bridge
  4879. * - Initialize IPA debugfs
  4880. * - Initialize IPA uC interface
  4881. * - Initialize WDI interface
  4882. * - Initialize USB interface
  4883. * - Register for panic handler
  4884. * - Trigger IPA ready callbacks (to all subscribers)
  4885. * - Trigger IPA completion object (to all who wait on it)
  4886. */
  4887. static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
  4888. struct device *ipa_dev)
  4889. {
  4890. int result;
  4891. struct gsi_per_props gsi_props;
  4892. struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
  4893. struct ipa3_flt_tbl *flt_tbl;
  4894. int i;
  4895. struct idr *idr;
  4896. if (ipa3_ctx == NULL) {
  4897. IPADBG("IPA driver haven't initialized\n");
  4898. return -ENXIO;
  4899. }
  4900. /* Prevent consequent calls from trying to load the FW again. */
  4901. if (ipa3_ctx->ipa_initialization_complete)
  4902. return 0;
  4903. IPADBG("active clients = %d\n",
  4904. atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
  4905. /* move proxy vote for modem on ipa3_post_init */
  4906. if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
  4907. ipa3_proxy_clk_vote();
  4908. /* The following will retrieve and save the gsi fw version */
  4909. ipa_save_gsi_ver();
  4910. if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
  4911. ipa3_ctx->pdev)) {
  4912. IPAERR("fail to init ipahal\n");
  4913. result = -EFAULT;
  4914. goto fail_ipahal;
  4915. }
  4916. result = ipa3_init_hw();
  4917. if (result) {
  4918. IPAERR(":error initializing HW\n");
  4919. result = -ENODEV;
  4920. goto fail_init_hw;
  4921. }
  4922. IPADBG("IPA HW initialization sequence completed");
  4923. ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
  4924. IPADBG("IPA Pipes num %u\n", ipa3_ctx->ipa_num_pipes);
  4925. if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
  4926. IPAERR("IPA has more pipes then supported has %d, max %d\n",
  4927. ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
  4928. result = -ENODEV;
  4929. goto fail_init_hw;
  4930. }
  4931. ipa3_ctx->ctrl->ipa_sram_read_settings();
  4932. IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
  4933. ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
  4934. IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
  4935. ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
  4936. ipa3_ctx->ip4_rt_tbl_nhash_lcl);
  4937. IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
  4938. ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
  4939. IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
  4940. ipa3_ctx->ip4_flt_tbl_hash_lcl,
  4941. ipa3_ctx->ip4_flt_tbl_nhash_lcl);
  4942. IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
  4943. ipa3_ctx->ip6_flt_tbl_hash_lcl,
  4944. ipa3_ctx->ip6_flt_tbl_nhash_lcl);
  4945. if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
  4946. IPAERR("SW expect more core memory, needed %d, avail %d\n",
  4947. ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
  4948. result = -ENOMEM;
  4949. goto fail_init_hw;
  4950. }
  4951. result = ipa3_allocate_dma_task_for_gsi();
  4952. if (result) {
  4953. IPAERR("failed to allocate dma task\n");
  4954. goto fail_dma_task;
  4955. }
  4956. if (ipa3_nat_ipv6ct_init_devices()) {
  4957. IPAERR("unable to init NAT and IPv6CT devices\n");
  4958. result = -ENODEV;
  4959. goto fail_nat_ipv6ct_init_dev;
  4960. }
  4961. result = ipa3_alloc_pkt_init();
  4962. if (result) {
  4963. IPAERR("Failed to alloc pkt_init payload\n");
  4964. result = -ENODEV;
  4965. goto fail_allok_pkt_init;
  4966. }
  4967. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
  4968. ipa3_enable_dcd();
  4969. /*
  4970. * indication whether working in MHI config or non MHI config is given
  4971. * in ipa3_write which is launched before ipa3_post_init. i.e. from
  4972. * this point it is safe to use ipa3_ep_mapping array and the correct
  4973. * entry will be returned from ipa3_get_hw_type_index()
  4974. */
  4975. ipa_init_ep_flt_bitmap();
  4976. IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
  4977. ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
  4978. /* Assign resource limitation to each group */
  4979. ipa3_set_resorce_groups_min_max_limits();
  4980. idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
  4981. idr_init(idr);
  4982. idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
  4983. idr_init(idr);
  4984. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  4985. if (!ipa_is_ep_support_flt(i))
  4986. continue;
  4987. flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
  4988. INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
  4989. flt_tbl->in_sys[IPA_RULE_HASHABLE] =
  4990. !ipa3_ctx->ip4_flt_tbl_hash_lcl;
  4991. flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
  4992. !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
  4993. flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
  4994. flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
  4995. INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
  4996. flt_tbl->in_sys[IPA_RULE_HASHABLE] =
  4997. !ipa3_ctx->ip6_flt_tbl_hash_lcl;
  4998. flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
  4999. !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
  5000. flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
  5001. }
  5002. result = ipa3_init_interrupts();
  5003. if (result) {
  5004. IPAERR("ipa initialization of interrupts failed\n");
  5005. result = -ENODEV;
  5006. goto fail_init_interrupts;
  5007. }
  5008. /*
  5009. * Disable prefetch for USB or MHI at IPAv3.5/IPA.3.5.1
  5010. * This is to allow MBIM to work.
  5011. */
  5012. if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
  5013. && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
  5014. (!ipa3_ctx->ipa_config_is_mhi))
  5015. ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
  5016. if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
  5017. && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
  5018. (ipa3_ctx->ipa_config_is_mhi))
  5019. ipa3_disable_prefetch(IPA_CLIENT_MHI_CONS);
  5020. memset(&gsi_props, 0, sizeof(gsi_props));
  5021. gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
  5022. gsi_props.ee = resource_p->ee;
  5023. gsi_props.intr = GSI_INTR_IRQ;
  5024. gsi_props.phys_addr = resource_p->transport_mem_base;
  5025. gsi_props.size = resource_p->transport_mem_size;
  5026. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
  5027. gsi_props.irq = resource_p->emulator_irq;
  5028. gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr();
  5029. gsi_props.emulator_intcntrlr_addr =
  5030. resource_p->emulator_intcntrlr_mem_base;
  5031. gsi_props.emulator_intcntrlr_size =
  5032. resource_p->emulator_intcntrlr_mem_size;
  5033. } else {
  5034. gsi_props.irq = resource_p->transport_irq;
  5035. }
  5036. gsi_props.notify_cb = ipa_gsi_notify_cb;
  5037. gsi_props.req_clk_cb = NULL;
  5038. gsi_props.rel_clk_cb = NULL;
  5039. gsi_props.clk_status_cb = ipa3_active_clks_status;
  5040. if (ipa3_ctx->ipa_config_is_mhi) {
  5041. gsi_props.mhi_er_id_limits_valid = true;
  5042. gsi_props.mhi_er_id_limits[0] = resource_p->mhi_evid_limits[0];
  5043. gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1];
  5044. }
  5045. result = gsi_register_device(&gsi_props,
  5046. &ipa3_ctx->gsi_dev_hdl);
  5047. if (result != GSI_STATUS_SUCCESS) {
  5048. IPAERR(":gsi register error - %d\n", result);
  5049. result = -ENODEV;
  5050. goto fail_register_device;
  5051. }
  5052. IPADBG("IPA gsi is registered\n");
  5053. /* GSI 2.2 requires to allocate all EE GSI channel
  5054. * during device bootup.
  5055. */
  5056. if (ipa3_get_gsi_ver(resource_p->ipa_hw_type) == GSI_VER_2_2) {
  5057. result = ipa3_alloc_gsi_channel();
  5058. if (result) {
  5059. IPAERR("Failed to alloc the GSI channels\n");
  5060. result = -ENODEV;
  5061. goto fail_alloc_gsi_channel;
  5062. }
  5063. }
  5064. /* setup the AP-IPA pipes */
  5065. if (ipa3_setup_apps_pipes()) {
  5066. IPAERR(":failed to setup IPA-Apps pipes\n");
  5067. result = -ENODEV;
  5068. goto fail_setup_apps_pipes;
  5069. }
  5070. IPADBG("IPA GPI pipes were connected\n");
  5071. if (ipa3_ctx->use_ipa_teth_bridge) {
  5072. /* Initialize the tethering bridge driver */
  5073. result = ipa3_teth_bridge_driver_init();
  5074. if (result) {
  5075. IPAERR(":teth_bridge init failed (%d)\n", -result);
  5076. result = -ENODEV;
  5077. goto fail_teth_bridge_driver_init;
  5078. }
  5079. IPADBG("teth_bridge initialized");
  5080. }
  5081. result = ipa3_uc_interface_init();
  5082. if (result)
  5083. IPAERR(":ipa Uc interface init failed (%d)\n", -result);
  5084. else
  5085. IPADBG(":ipa Uc interface init ok\n");
  5086. uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
  5087. ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
  5088. result = ipa3_wdi_init();
  5089. if (result)
  5090. IPAERR(":wdi init failed (%d)\n", -result);
  5091. else
  5092. IPADBG(":wdi init ok\n");
  5093. result = ipa3_wigig_init_i();
  5094. if (result)
  5095. IPAERR(":wigig init failed (%d)\n", -result);
  5096. else
  5097. IPADBG(":wigig init ok\n");
  5098. result = ipa3_ntn_init();
  5099. if (result)
  5100. IPAERR(":ntn init failed (%d)\n", -result);
  5101. else
  5102. IPADBG(":ntn init ok\n");
  5103. result = ipa_hw_stats_init();
  5104. if (result)
  5105. IPAERR("fail to init stats %d\n", result);
  5106. else
  5107. IPADBG(":stats init ok\n");
  5108. ipa3_register_panic_hdlr();
  5109. ipa3_debugfs_init();
  5110. mutex_lock(&ipa3_ctx->lock);
  5111. ipa3_ctx->ipa_initialization_complete = true;
  5112. mutex_unlock(&ipa3_ctx->lock);
  5113. ipa3_trigger_ipa_ready_cbs();
  5114. complete_all(&ipa3_ctx->init_completion_obj);
  5115. pr_info("IPA driver initialization was successful.\n");
  5116. return 0;
  5117. fail_teth_bridge_driver_init:
  5118. ipa3_teardown_apps_pipes();
  5119. fail_alloc_gsi_channel:
  5120. fail_setup_apps_pipes:
  5121. gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
  5122. fail_register_device:
  5123. ipa3_destroy_flt_tbl_idrs();
  5124. fail_init_interrupts:
  5125. ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
  5126. ipa3_interrupts_destroy(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev);
  5127. fail_allok_pkt_init:
  5128. ipa3_nat_ipv6ct_destroy_devices();
  5129. fail_nat_ipv6ct_init_dev:
  5130. ipa3_free_dma_task_for_gsi();
  5131. fail_dma_task:
  5132. fail_init_hw:
  5133. ipahal_destroy();
  5134. fail_ipahal:
  5135. ipa3_proxy_clk_unvote();
  5136. return result;
  5137. }
  5138. static int ipa3_manual_load_ipa_fws(void)
  5139. {
  5140. int result;
  5141. const struct firmware *fw;
  5142. const char *path = IPA_FWS_PATH;
  5143. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
  5144. switch (ipa3_get_emulation_type()) {
  5145. case IPA_HW_v3_5_1:
  5146. path = IPA_FWS_PATH_3_5_1;
  5147. break;
  5148. case IPA_HW_v4_0:
  5149. path = IPA_FWS_PATH_4_0;
  5150. break;
  5151. case IPA_HW_v4_5:
  5152. path = IPA_FWS_PATH_4_5;
  5153. break;
  5154. default:
  5155. break;
  5156. }
  5157. }
  5158. IPADBG("Manual FW loading (%s) process initiated\n", path);
  5159. result = request_firmware(&fw, path, ipa3_ctx->cdev.dev);
  5160. if (result < 0) {
  5161. IPAERR("request_firmware failed, error %d\n", result);
  5162. return result;
  5163. }
  5164. IPADBG("FWs are available for loading\n");
  5165. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
  5166. result = emulator_load_fws(fw,
  5167. ipa3_res.transport_mem_base,
  5168. ipa3_res.transport_mem_size,
  5169. ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
  5170. } else {
  5171. result = ipa3_load_fws(fw, ipa3_res.transport_mem_base,
  5172. ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
  5173. }
  5174. if (result) {
  5175. IPAERR("Manual IPA FWs loading has failed\n");
  5176. release_firmware(fw);
  5177. return result;
  5178. }
  5179. result = gsi_enable_fw(ipa3_res.transport_mem_base,
  5180. ipa3_res.transport_mem_size,
  5181. ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
  5182. if (result) {
  5183. IPAERR("Failed to enable GSI FW\n");
  5184. release_firmware(fw);
  5185. return result;
  5186. }
  5187. release_firmware(fw);
  5188. IPADBG("Manual FW loading process is complete\n");
  5189. return 0;
  5190. }
  5191. static int ipa3_pil_load_ipa_fws(const char *sub_sys)
  5192. {
  5193. void *subsystem_get_retval = NULL;
  5194. IPADBG("PIL FW loading process initiated sub_sys=%s\n",
  5195. sub_sys);
  5196. subsystem_get_retval = subsystem_get(sub_sys);
  5197. if (IS_ERR_OR_NULL(subsystem_get_retval)) {
  5198. IPAERR("Unable to PIL load FW for sub_sys=%s\n", sub_sys);
  5199. return -EINVAL;
  5200. }
  5201. IPADBG("PIL FW loading process is complete sub_sys=%s\n", sub_sys);
  5202. return 0;
  5203. }
  5204. static void ipa3_load_ipa_fw(struct work_struct *work)
  5205. {
  5206. int result;
  5207. IPADBG("Entry\n");
  5208. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  5209. result = ipa3_attach_to_smmu();
  5210. if (result) {
  5211. IPAERR("IPA attach to smmu failed %d\n", result);
  5212. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  5213. return;
  5214. }
  5215. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
  5216. ((ipa3_ctx->platform_type != IPA_PLAT_TYPE_MDM) ||
  5217. (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)))
  5218. result = ipa3_pil_load_ipa_fws(IPA_SUBSYSTEM_NAME);
  5219. else
  5220. result = ipa3_manual_load_ipa_fws();
  5221. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  5222. if (result) {
  5223. IPAERR("IPA FW loading process has failed result=%d\n",
  5224. result);
  5225. return;
  5226. }
  5227. pr_info("IPA FW loaded successfully\n");
  5228. result = ipa3_post_init(&ipa3_res, ipa3_ctx->cdev.dev);
  5229. if (result) {
  5230. IPAERR("IPA post init failed %d\n", result);
  5231. return;
  5232. }
  5233. if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
  5234. ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
  5235. ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
  5236. IPADBG("Loading IPA uC via PIL\n");
  5237. /* Unvoting will happen when uC loaded event received. */
  5238. ipa3_proxy_clk_vote();
  5239. result = ipa3_pil_load_ipa_fws(IPA_UC_SUBSYSTEM_NAME);
  5240. if (result) {
  5241. IPAERR("IPA uC loading process has failed result=%d\n",
  5242. result);
  5243. return;
  5244. }
  5245. IPADBG("IPA uC PIL loading succeeded\n");
  5246. }
  5247. }
  5248. static ssize_t ipa3_write(struct file *file, const char __user *buf,
  5249. size_t count, loff_t *ppos)
  5250. {
  5251. unsigned long missing;
  5252. char dbg_buff[32] = { 0 };
  5253. if (count >= sizeof(dbg_buff))
  5254. return -EFAULT;
  5255. missing = copy_from_user(dbg_buff, buf, count);
  5256. if (missing) {
  5257. IPAERR("Unable to copy data from user\n");
  5258. return -EFAULT;
  5259. }
  5260. if (count > 0)
  5261. dbg_buff[count] = '\0';
  5262. IPADBG("user input string %s\n", dbg_buff);
  5263. /* Prevent consequent calls from trying to load the FW again. */
  5264. if (ipa3_is_ready())
  5265. return count;
  5266. /* Check MHI configuration on MDM devices */
  5267. if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) {
  5268. if (strnstr(dbg_buff, "vlan", strlen(dbg_buff))) {
  5269. if (strnstr(dbg_buff, "eth", strlen(dbg_buff)))
  5270. ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_EMAC] =
  5271. true;
  5272. if (strnstr(dbg_buff, "rndis", strlen(dbg_buff)))
  5273. ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_RNDIS] =
  5274. true;
  5275. if (strnstr(dbg_buff, "ecm", strlen(dbg_buff)))
  5276. ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_ECM] =
  5277. true;
  5278. /*
  5279. * when vlan mode is passed to our dev we expect
  5280. * another write
  5281. */
  5282. return count;
  5283. }
  5284. /* trim ending newline character if any */
  5285. if (count && (dbg_buff[count - 1] == '\n'))
  5286. dbg_buff[count - 1] = '\0';
  5287. /*
  5288. * This logic enforeces MHI mode based on userspace input.
  5289. * Note that MHI mode could be already determined due
  5290. * to previous logic.
  5291. */
  5292. if (!strcasecmp(dbg_buff, "MHI")) {
  5293. ipa3_ctx->ipa_config_is_mhi = true;
  5294. } else if (strcmp(dbg_buff, "1")) {
  5295. IPAERR("got invalid string %s not loading FW\n",
  5296. dbg_buff);
  5297. return count;
  5298. }
  5299. pr_info("IPA is loading with %sMHI configuration\n",
  5300. ipa3_ctx->ipa_config_is_mhi ? "" : "non ");
  5301. }
  5302. /* Prevent multiple calls from trying to load the FW again. */
  5303. if (ipa3_ctx->fw_loaded) {
  5304. IPAERR("not load FW again\n");
  5305. return count;
  5306. }
  5307. /* Schedule WQ to load ipa-fws */
  5308. ipa3_ctx->fw_loaded = true;
  5309. queue_work(ipa3_ctx->transport_power_mgmt_wq,
  5310. &ipa3_fw_loading_work);
  5311. IPADBG("Scheduled a work to load IPA FW\n");
  5312. return count;
  5313. }
  5314. /**
  5315. * ipa3_tz_unlock_reg - Unlocks memory regions so that they become accessible
  5316. * from AP.
  5317. * @reg_info - Pointer to array of memory regions to unlock
  5318. * @num_regs - Number of elements in the array
  5319. *
  5320. * Converts the input array of regions to a struct that TZ understands and
  5321. * issues an SCM call.
  5322. * Also flushes the memory cache to DDR in order to make sure that TZ sees the
  5323. * correct data structure.
  5324. *
  5325. * Returns: 0 on success, negative on failure
  5326. */
  5327. int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
  5328. {
  5329. int i, size, ret;
  5330. struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
  5331. struct tz_smmu_ipa_protect_region_s cmd_buf;
  5332. struct scm_desc desc = {0};
  5333. if (reg_info == NULL || num_regs == 0) {
  5334. IPAERR("Bad parameters\n");
  5335. return -EFAULT;
  5336. }
  5337. size = num_regs * sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
  5338. ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
  5339. if (ipa_tz_unlock_vec == NULL)
  5340. return -ENOMEM;
  5341. for (i = 0; i < num_regs; i++) {
  5342. ipa_tz_unlock_vec[i].input_addr = reg_info[i].reg_addr ^
  5343. (reg_info[i].reg_addr & 0xFFF);
  5344. ipa_tz_unlock_vec[i].output_addr = reg_info[i].reg_addr ^
  5345. (reg_info[i].reg_addr & 0xFFF);
  5346. ipa_tz_unlock_vec[i].size = reg_info[i].size;
  5347. ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
  5348. }
  5349. /* pass physical address of command buffer */
  5350. cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
  5351. cmd_buf.size_bytes = size;
  5352. desc.args[0] = virt_to_phys((void *)ipa_tz_unlock_vec);
  5353. desc.args[1] = size;
  5354. desc.arginfo = SCM_ARGS(2);
  5355. ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
  5356. TZ_MEM_PROTECT_REGION_ID), &desc);
  5357. if (ret) {
  5358. IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
  5359. kfree(ipa_tz_unlock_vec);
  5360. return -EFAULT;
  5361. }
  5362. kfree(ipa_tz_unlock_vec);
  5363. return 0;
  5364. }
  5365. static int ipa3_alloc_pkt_init(void)
  5366. {
  5367. struct ipa_mem_buffer mem;
  5368. struct ipahal_imm_cmd_pyld *cmd_pyld;
  5369. struct ipahal_imm_cmd_ip_packet_init cmd = {0};
  5370. int i;
  5371. cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
  5372. &cmd, false);
  5373. if (!cmd_pyld) {
  5374. IPAERR("failed to construct IMM cmd\n");
  5375. return -ENOMEM;
  5376. }
  5377. ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
  5378. mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
  5379. mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
  5380. &mem.phys_base, GFP_KERNEL);
  5381. if (!mem.base) {
  5382. IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
  5383. ipahal_destroy_imm_cmd(cmd_pyld);
  5384. return -ENOMEM;
  5385. }
  5386. ipahal_destroy_imm_cmd(cmd_pyld);
  5387. memset(mem.base, 0, mem.size);
  5388. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  5389. cmd.destination_pipe_index = i;
  5390. cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
  5391. &cmd, false);
  5392. if (!cmd_pyld) {
  5393. IPAERR("failed to construct IMM cmd\n");
  5394. dma_free_coherent(ipa3_ctx->pdev,
  5395. mem.size,
  5396. mem.base,
  5397. mem.phys_base);
  5398. return -ENOMEM;
  5399. }
  5400. memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
  5401. cmd_pyld->len);
  5402. ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
  5403. ipahal_destroy_imm_cmd(cmd_pyld);
  5404. }
  5405. return 0;
  5406. }
  5407. /*
  5408. * SCM call to check if secure dump is allowed.
  5409. *
  5410. * Returns true in secure dump allowed.
  5411. * Return false when secure dump not allowed.
  5412. */
  5413. #define TZ_UTIL_GET_SEC_DUMP_STATE 0x10
  5414. static bool ipa_is_mem_dump_allowed(void)
  5415. {
  5416. struct scm_desc desc = {0};
  5417. int ret = 0;
  5418. desc.args[0] = 0;
  5419. desc.arginfo = 0;
  5420. ret = scm_call2(
  5421. SCM_SIP_FNID(SCM_SVC_UTIL, TZ_UTIL_GET_SEC_DUMP_STATE),
  5422. &desc);
  5423. if (ret) {
  5424. IPAERR("SCM DUMP_STATE call failed\n");
  5425. return false;
  5426. }
  5427. return (desc.ret[0] == 1);
  5428. }
  5429. /**
  5430. * ipa3_pre_init() - Initialize the IPA Driver.
  5431. * This part contains all initialization which doesn't require IPA HW, such
  5432. * as structure allocations and initializations, register writes, etc.
  5433. *
  5434. * @resource_p: contain platform specific values from DST file
  5435. * @pdev: The platform device structure representing the IPA driver
  5436. *
  5437. * Function initialization process:
  5438. * Allocate memory for the driver context data struct
  5439. * Initializing the ipa3_ctx with :
  5440. * 1)parsed values from the dts file
  5441. * 2)parameters passed to the module initialization
  5442. * 3)read HW values(such as core memory size)
  5443. * Map IPA core registers to CPU memory
  5444. * Restart IPA core(HW reset)
  5445. * Initialize the look-aside caches(kmem_cache/slab) for filter,
  5446. * routing and IPA-tree
  5447. * Create memory pool with 4 objects for DMA operations(each object
  5448. * is 512Bytes long), this object will be use for tx(A5->IPA)
  5449. * Initialize lists head(routing, hdr, system pipes)
  5450. * Initialize mutexes (for ipa_ctx and NAT memory mutexes)
  5451. * Initialize spinlocks (for list related to A5<->IPA pipes)
  5452. * Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
  5453. * Initialize Red-Black-Tree(s) for handles of header,routing rule,
  5454. * routing table ,filtering rule
  5455. * Initialize the filter block by committing IPV4 and IPV6 default rules
  5456. * Create empty routing table in system memory(no committing)
  5457. * Create a char-device for IPA
  5458. * Initialize IPA PM (power manager)
  5459. * Configure GSI registers (in GSI case)
  5460. */
  5461. static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
  5462. struct platform_device *ipa_pdev)
  5463. {
  5464. int result = 0;
  5465. int i, j;
  5466. struct ipa3_rt_tbl_set *rset;
  5467. struct ipa_active_client_logging_info log_info;
  5468. struct cdev *cdev;
  5469. IPADBG("IPA Driver initialization started\n");
  5470. ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
  5471. if (!ipa3_ctx) {
  5472. result = -ENOMEM;
  5473. goto fail_mem_ctx;
  5474. }
  5475. ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
  5476. if (ipa3_ctx->logbuf == NULL)
  5477. IPADBG("failed to create IPC log, continue...\n");
  5478. /* ipa3_ctx->pdev and ipa3_ctx->uc_pdev will be set in the smmu probes*/
  5479. ipa3_ctx->master_pdev = ipa_pdev;
  5480. for (i = 0; i < IPA_SMMU_CB_MAX; i++)
  5481. ipa3_ctx->s1_bypass_arr[i] = true;
  5482. /* initialize the gsi protocol info for uC debug stats */
  5483. for (i = 0; i < IPA_HW_PROTOCOL_MAX; i++) {
  5484. ipa3_ctx->gsi_info[i].protocol = i;
  5485. /* initialize all to be not started */
  5486. for (j = 0; j < MAX_CH_STATS_SUPPORTED; j++)
  5487. ipa3_ctx->gsi_info[i].ch_id_info[j].ch_id =
  5488. 0xFF;
  5489. }
  5490. ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
  5491. ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
  5492. ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
  5493. ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
  5494. ipa3_ctx->platform_type = resource_p->platform_type;
  5495. ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
  5496. ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
  5497. ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
  5498. ipa3_ctx->ipa_wdi2_over_gsi = resource_p->ipa_wdi2_over_gsi;
  5499. ipa3_ctx->ipa_wdi3_over_gsi = resource_p->ipa_wdi3_over_gsi;
  5500. ipa3_ctx->ipa_fltrt_not_hashable = resource_p->ipa_fltrt_not_hashable;
  5501. ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
  5502. ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
  5503. ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
  5504. ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
  5505. ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
  5506. ipa3_ctx->ee = resource_p->ee;
  5507. ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
  5508. ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie;
  5509. ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
  5510. ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
  5511. ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
  5512. ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1];
  5513. ipa3_ctx->entire_ipa_block_size = resource_p->entire_ipa_block_size;
  5514. ipa3_ctx->do_register_collection_on_crash =
  5515. resource_p->do_register_collection_on_crash;
  5516. ipa3_ctx->do_testbus_collection_on_crash =
  5517. resource_p->do_testbus_collection_on_crash;
  5518. ipa3_ctx->do_non_tn_collection_on_crash =
  5519. resource_p->do_non_tn_collection_on_crash;
  5520. ipa3_ctx->secure_debug_check_action =
  5521. resource_p->secure_debug_check_action;
  5522. ipa3_ctx->do_ram_collection_on_crash =
  5523. resource_p->do_ram_collection_on_crash;
  5524. if (ipa3_ctx->secure_debug_check_action == USE_SCM) {
  5525. if (ipa_is_mem_dump_allowed())
  5526. ipa3_ctx->sd_state = SD_ENABLED;
  5527. else
  5528. ipa3_ctx->sd_state = SD_DISABLED;
  5529. } else {
  5530. if (ipa3_ctx->secure_debug_check_action == OVERRIDE_SCM_TRUE)
  5531. ipa3_ctx->sd_state = SD_ENABLED;
  5532. else
  5533. /* secure_debug_check_action == OVERRIDE_SCM_FALSE */
  5534. ipa3_ctx->sd_state = SD_DISABLED;
  5535. }
  5536. if (ipa3_ctx->sd_state == SD_ENABLED) {
  5537. /* secure debug is enabled. */
  5538. IPADBG("secure debug enabled\n");
  5539. } else {
  5540. /* secure debug is disabled. */
  5541. IPADBG("secure debug disabled\n");
  5542. ipa3_ctx->do_testbus_collection_on_crash = false;
  5543. }
  5544. ipa3_ctx->ipa_endp_delay_wa = resource_p->ipa_endp_delay_wa;
  5545. WARN(ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL,
  5546. "Non NORMAL IPA HW mode, is this emulation platform ?");
  5547. if (resource_p->ipa_tz_unlock_reg) {
  5548. ipa3_ctx->ipa_tz_unlock_reg_num =
  5549. resource_p->ipa_tz_unlock_reg_num;
  5550. ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
  5551. ipa3_ctx->ipa_tz_unlock_reg_num,
  5552. sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
  5553. GFP_KERNEL);
  5554. if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
  5555. result = -ENOMEM;
  5556. goto fail_tz_unlock_reg;
  5557. }
  5558. for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
  5559. ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
  5560. resource_p->ipa_tz_unlock_reg[i].reg_addr;
  5561. ipa3_ctx->ipa_tz_unlock_reg[i].size =
  5562. resource_p->ipa_tz_unlock_reg[i].size;
  5563. }
  5564. /* unlock registers for uc */
  5565. result = ipa3_tz_unlock_reg(ipa3_ctx->ipa_tz_unlock_reg,
  5566. ipa3_ctx->ipa_tz_unlock_reg_num);
  5567. if (result)
  5568. IPAERR("Failed to unlock memory region using TZ\n");
  5569. }
  5570. /* default aggregation parameters */
  5571. ipa3_ctx->aggregation_type = IPA_MBIM_16;
  5572. ipa3_ctx->aggregation_byte_limit = 1;
  5573. ipa3_ctx->aggregation_time_limit = 0;
  5574. ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
  5575. if (!ipa3_ctx->ctrl) {
  5576. result = -ENOMEM;
  5577. goto fail_mem_ctrl;
  5578. }
  5579. result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
  5580. ipa3_ctx->ipa_hw_type);
  5581. if (result) {
  5582. IPAERR("fail to static bind IPA ctrl\n");
  5583. result = -EFAULT;
  5584. goto fail_bind;
  5585. }
  5586. result = ipa3_init_mem_partition(ipa3_ctx->ipa_hw_type);
  5587. if (result) {
  5588. IPAERR(":ipa3_init_mem_partition failed\n");
  5589. result = -ENODEV;
  5590. goto fail_init_mem_partition;
  5591. }
  5592. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
  5593. ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
  5594. ipa3_ctx->ctrl->msm_bus_data_ptr =
  5595. msm_bus_cl_get_pdata(ipa3_ctx->master_pdev);
  5596. if (ipa3_ctx->ctrl->msm_bus_data_ptr == NULL) {
  5597. IPAERR("failed to get bus scaling\n");
  5598. goto fail_bus_reg;
  5599. }
  5600. IPADBG("Use bus scaling info from device tree #usecases=%d\n",
  5601. ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases);
  5602. /* get BUS handle */
  5603. ipa3_ctx->ipa_bus_hdl =
  5604. msm_bus_scale_register_client(
  5605. ipa3_ctx->ctrl->msm_bus_data_ptr);
  5606. if (!ipa3_ctx->ipa_bus_hdl) {
  5607. IPAERR("fail to register with bus mgr!\n");
  5608. ipa3_ctx->ctrl->msm_bus_data_ptr = NULL;
  5609. result = -EPROBE_DEFER;
  5610. goto fail_bus_reg;
  5611. }
  5612. }
  5613. /* get IPA clocks */
  5614. result = ipa3_get_clks(&ipa3_ctx->master_pdev->dev);
  5615. if (result)
  5616. goto fail_clk;
  5617. /* init active_clients_log after getting ipa-clk */
  5618. result = ipa3_active_clients_log_init();
  5619. if (result)
  5620. goto fail_init_active_client;
  5621. /* Enable ipa3_ctx->enable_clock_scaling */
  5622. ipa3_ctx->enable_clock_scaling = 1;
  5623. /* vote for svs2 on bootup */
  5624. ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
  5625. /* Enable ipa3_ctx->enable_napi_chain */
  5626. ipa3_ctx->enable_napi_chain = 1;
  5627. /* enable IPA clocks explicitly to allow the initialization */
  5628. ipa3_enable_clks();
  5629. /* setup IPA register access */
  5630. IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
  5631. ipa3_ctx->ctrl->ipa_reg_base_ofst);
  5632. ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
  5633. ipa3_ctx->ctrl->ipa_reg_base_ofst,
  5634. resource_p->ipa_mem_size);
  5635. if (!ipa3_ctx->mmio) {
  5636. IPAERR(":ipa-base ioremap err\n");
  5637. result = -EFAULT;
  5638. goto fail_remap;
  5639. }
  5640. IPADBG(
  5641. "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n",
  5642. resource_p->ipa_mem_base,
  5643. ipa3_ctx->ctrl->ipa_reg_base_ofst,
  5644. resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst,
  5645. ipa3_ctx->mmio,
  5646. resource_p->ipa_mem_size);
  5647. /*
  5648. * Setup access for register collection/dump on crash
  5649. */
  5650. if (ipa_reg_save_init(IPA_MEM_INIT_VAL) != 0) {
  5651. result = -EFAULT;
  5652. goto fail_gsi_map;
  5653. }
  5654. /*
  5655. * Since we now know where the transport's registers live,
  5656. * let's set up access to them. This is done since subseqent
  5657. * functions, that deal with the transport, require the
  5658. * access.
  5659. */
  5660. if (gsi_map_base(
  5661. ipa3_res.transport_mem_base,
  5662. ipa3_res.transport_mem_size) != 0) {
  5663. IPAERR("Allocation of gsi base failed\n");
  5664. result = -EFAULT;
  5665. goto fail_gsi_map;
  5666. }
  5667. mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
  5668. IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
  5669. ipa3_active_clients_log_inc(&log_info, false);
  5670. ipa3_ctx->q6_proxy_clk_vote_valid = true;
  5671. ipa3_ctx->q6_proxy_clk_vote_cnt = 1;
  5672. /*Updating the proxy vote cnt 1 */
  5673. atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
  5674. /* Create workqueues for power management */
  5675. ipa3_ctx->power_mgmt_wq =
  5676. create_singlethread_workqueue("ipa_power_mgmt");
  5677. if (!ipa3_ctx->power_mgmt_wq) {
  5678. IPAERR("failed to create power mgmt wq\n");
  5679. result = -ENOMEM;
  5680. goto fail_init_hw;
  5681. }
  5682. ipa3_ctx->transport_power_mgmt_wq =
  5683. create_singlethread_workqueue("transport_power_mgmt");
  5684. if (!ipa3_ctx->transport_power_mgmt_wq) {
  5685. IPAERR("failed to create transport power mgmt wq\n");
  5686. result = -ENOMEM;
  5687. goto fail_create_transport_wq;
  5688. }
  5689. mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
  5690. /* init the lookaside cache */
  5691. ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
  5692. sizeof(struct ipa3_flt_entry), 0, 0, NULL);
  5693. if (!ipa3_ctx->flt_rule_cache) {
  5694. IPAERR(":ipa flt cache create failed\n");
  5695. result = -ENOMEM;
  5696. goto fail_flt_rule_cache;
  5697. }
  5698. ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
  5699. sizeof(struct ipa3_rt_entry), 0, 0, NULL);
  5700. if (!ipa3_ctx->rt_rule_cache) {
  5701. IPAERR(":ipa rt cache create failed\n");
  5702. result = -ENOMEM;
  5703. goto fail_rt_rule_cache;
  5704. }
  5705. ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
  5706. sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
  5707. if (!ipa3_ctx->hdr_cache) {
  5708. IPAERR(":ipa hdr cache create failed\n");
  5709. result = -ENOMEM;
  5710. goto fail_hdr_cache;
  5711. }
  5712. ipa3_ctx->hdr_offset_cache =
  5713. kmem_cache_create("IPA_HDR_OFFSET",
  5714. sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
  5715. if (!ipa3_ctx->hdr_offset_cache) {
  5716. IPAERR(":ipa hdr off cache create failed\n");
  5717. result = -ENOMEM;
  5718. goto fail_hdr_offset_cache;
  5719. }
  5720. ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
  5721. sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
  5722. if (!ipa3_ctx->hdr_proc_ctx_cache) {
  5723. IPAERR(":ipa hdr proc ctx cache create failed\n");
  5724. result = -ENOMEM;
  5725. goto fail_hdr_proc_ctx_cache;
  5726. }
  5727. ipa3_ctx->hdr_proc_ctx_offset_cache =
  5728. kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
  5729. sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
  5730. if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
  5731. IPAERR(":ipa hdr proc ctx off cache create failed\n");
  5732. result = -ENOMEM;
  5733. goto fail_hdr_proc_ctx_offset_cache;
  5734. }
  5735. ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
  5736. sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
  5737. if (!ipa3_ctx->rt_tbl_cache) {
  5738. IPAERR(":ipa rt tbl cache create failed\n");
  5739. result = -ENOMEM;
  5740. goto fail_rt_tbl_cache;
  5741. }
  5742. ipa3_ctx->tx_pkt_wrapper_cache =
  5743. kmem_cache_create("IPA_TX_PKT_WRAPPER",
  5744. sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
  5745. if (!ipa3_ctx->tx_pkt_wrapper_cache) {
  5746. IPAERR(":ipa tx pkt wrapper cache create failed\n");
  5747. result = -ENOMEM;
  5748. goto fail_tx_pkt_wrapper_cache;
  5749. }
  5750. ipa3_ctx->rx_pkt_wrapper_cache =
  5751. kmem_cache_create("IPA_RX_PKT_WRAPPER",
  5752. sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
  5753. if (!ipa3_ctx->rx_pkt_wrapper_cache) {
  5754. IPAERR(":ipa rx pkt wrapper cache create failed\n");
  5755. result = -ENOMEM;
  5756. goto fail_rx_pkt_wrapper_cache;
  5757. }
  5758. /* init the various list heads */
  5759. INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
  5760. for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
  5761. INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
  5762. INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
  5763. }
  5764. INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
  5765. for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
  5766. INIT_LIST_HEAD(
  5767. &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
  5768. INIT_LIST_HEAD(
  5769. &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i]);
  5770. }
  5771. INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
  5772. idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
  5773. INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
  5774. idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
  5775. rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
  5776. INIT_LIST_HEAD(&rset->head_rt_tbl_list);
  5777. idr_init(&rset->rule_ids);
  5778. rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
  5779. INIT_LIST_HEAD(&rset->head_rt_tbl_list);
  5780. idr_init(&rset->rule_ids);
  5781. idr_init(&ipa3_ctx->flt_rt_counters.hdl);
  5782. spin_lock_init(&ipa3_ctx->flt_rt_counters.hdl_lock);
  5783. memset(&ipa3_ctx->flt_rt_counters.used_hw, 0,
  5784. sizeof(ipa3_ctx->flt_rt_counters.used_hw));
  5785. memset(&ipa3_ctx->flt_rt_counters.used_sw, 0,
  5786. sizeof(ipa3_ctx->flt_rt_counters.used_sw));
  5787. INIT_LIST_HEAD(&ipa3_ctx->intf_list);
  5788. INIT_LIST_HEAD(&ipa3_ctx->msg_list);
  5789. INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
  5790. init_waitqueue_head(&ipa3_ctx->msg_waitq);
  5791. mutex_init(&ipa3_ctx->msg_lock);
  5792. /* store wlan client-connect-msg-list */
  5793. INIT_LIST_HEAD(&ipa3_ctx->msg_wlan_client_list);
  5794. mutex_init(&ipa3_ctx->msg_wlan_client_lock);
  5795. mutex_init(&ipa3_ctx->lock);
  5796. mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
  5797. mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
  5798. idr_init(&ipa3_ctx->ipa_idr);
  5799. spin_lock_init(&ipa3_ctx->idr_lock);
  5800. /* wlan related member */
  5801. memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
  5802. spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
  5803. spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
  5804. INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
  5805. ipa3_ctx->cdev.class = class_create(THIS_MODULE, DRV_NAME);
  5806. result = alloc_chrdev_region(&ipa3_ctx->cdev.dev_num, 0, 1, DRV_NAME);
  5807. if (result) {
  5808. IPAERR("alloc_chrdev_region err\n");
  5809. result = -ENODEV;
  5810. goto fail_alloc_chrdev_region;
  5811. }
  5812. ipa3_ctx->cdev.dev = device_create(ipa3_ctx->cdev.class, NULL,
  5813. ipa3_ctx->cdev.dev_num, ipa3_ctx, DRV_NAME);
  5814. if (IS_ERR(ipa3_ctx->cdev.dev)) {
  5815. IPAERR(":device_create err.\n");
  5816. result = -ENODEV;
  5817. goto fail_device_create;
  5818. }
  5819. /* Create a wakeup source. */
  5820. wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
  5821. spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
  5822. /* Initialize Power Management framework */
  5823. result = ipa_pm_init(&ipa3_res.pm_init);
  5824. if (result) {
  5825. IPAERR("IPA PM initialization failed (%d)\n", -result);
  5826. result = -ENODEV;
  5827. goto fail_ipa_pm_init;
  5828. }
  5829. IPADBG("IPA power manager initialized\n");
  5830. INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
  5831. init_completion(&ipa3_ctx->init_completion_obj);
  5832. init_completion(&ipa3_ctx->uc_loaded_completion_obj);
  5833. result = ipa3_dma_setup();
  5834. if (result) {
  5835. IPAERR("Failed to setup IPA DMA\n");
  5836. result = -ENODEV;
  5837. goto fail_ipa_dma_setup;
  5838. }
  5839. /*
  5840. * We can't register the GSI driver yet, as it expects
  5841. * the GSI FW to be up and running before the registration.
  5842. *
  5843. * For IPA3.0 and the emulation system, the GSI configuration
  5844. * is done by the GSI driver.
  5845. *
  5846. * For IPA3.1 (and on), the GSI configuration is done by TZ.
  5847. */
  5848. if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 ||
  5849. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
  5850. result = ipa3_gsi_pre_fw_load_init();
  5851. if (result) {
  5852. IPAERR("gsi pre FW loading config failed\n");
  5853. result = -ENODEV;
  5854. goto fail_gsi_pre_fw_load_init;
  5855. }
  5856. }
  5857. cdev = &ipa3_ctx->cdev.cdev;
  5858. cdev_init(cdev, &ipa3_drv_fops);
  5859. cdev->owner = THIS_MODULE;
  5860. cdev->ops = &ipa3_drv_fops; /* from LDD3 */
  5861. result = cdev_add(cdev, ipa3_ctx->cdev.dev_num, 1);
  5862. if (result) {
  5863. IPAERR(":cdev_add err=%d\n", -result);
  5864. result = -ENODEV;
  5865. goto fail_cdev_add;
  5866. }
  5867. IPADBG("ipa cdev added successful. major:%d minor:%d\n",
  5868. MAJOR(ipa3_ctx->cdev.dev_num),
  5869. MINOR(ipa3_ctx->cdev.dev_num));
  5870. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1) {
  5871. result = ipa_odl_init();
  5872. if (result) {
  5873. IPADBG("Error: ODL init fialed\n");
  5874. result = -ENODEV;
  5875. goto fail_cdev_add;
  5876. }
  5877. }
  5878. /*
  5879. * for IPA 4.0 offline charge is not needed and we need to prevent
  5880. * power collapse until IPA uC is loaded.
  5881. */
  5882. /* proxy vote for modem is added in ipa3_post_init() phase */
  5883. if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
  5884. ipa3_proxy_clk_unvote();
  5885. return 0;
  5886. fail_cdev_add:
  5887. fail_gsi_pre_fw_load_init:
  5888. ipa3_dma_shutdown();
  5889. fail_ipa_dma_setup:
  5890. ipa_pm_destroy();
  5891. fail_ipa_pm_init:
  5892. device_destroy(ipa3_ctx->cdev.class, ipa3_ctx->cdev.dev_num);
  5893. fail_device_create:
  5894. unregister_chrdev_region(ipa3_ctx->cdev.dev_num, 1);
  5895. fail_alloc_chrdev_region:
  5896. idr_destroy(&ipa3_ctx->ipa_idr);
  5897. rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
  5898. idr_destroy(&rset->rule_ids);
  5899. rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
  5900. idr_destroy(&rset->rule_ids);
  5901. idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
  5902. idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
  5903. kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
  5904. fail_rx_pkt_wrapper_cache:
  5905. kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
  5906. fail_tx_pkt_wrapper_cache:
  5907. kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
  5908. fail_rt_tbl_cache:
  5909. kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
  5910. fail_hdr_proc_ctx_offset_cache:
  5911. kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
  5912. fail_hdr_proc_ctx_cache:
  5913. kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
  5914. fail_hdr_offset_cache:
  5915. kmem_cache_destroy(ipa3_ctx->hdr_cache);
  5916. fail_hdr_cache:
  5917. kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
  5918. fail_rt_rule_cache:
  5919. kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
  5920. fail_flt_rule_cache:
  5921. destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
  5922. fail_create_transport_wq:
  5923. destroy_workqueue(ipa3_ctx->power_mgmt_wq);
  5924. fail_init_hw:
  5925. gsi_unmap_base();
  5926. fail_gsi_map:
  5927. if (ipa3_ctx->reg_collection_base)
  5928. iounmap(ipa3_ctx->reg_collection_base);
  5929. iounmap(ipa3_ctx->mmio);
  5930. fail_remap:
  5931. ipa3_disable_clks();
  5932. ipa3_active_clients_log_destroy();
  5933. fail_init_active_client:
  5934. if (ipa3_clk)
  5935. clk_put(ipa3_clk);
  5936. ipa3_clk = NULL;
  5937. fail_clk:
  5938. if (ipa3_ctx->ipa_bus_hdl)
  5939. msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
  5940. fail_bus_reg:
  5941. if (ipa3_ctx->ctrl->msm_bus_data_ptr)
  5942. msm_bus_cl_clear_pdata(ipa3_ctx->ctrl->msm_bus_data_ptr);
  5943. fail_init_mem_partition:
  5944. fail_bind:
  5945. kfree(ipa3_ctx->ctrl);
  5946. fail_mem_ctrl:
  5947. kfree(ipa3_ctx->ipa_tz_unlock_reg);
  5948. fail_tz_unlock_reg:
  5949. if (ipa3_ctx->logbuf)
  5950. ipc_log_context_destroy(ipa3_ctx->logbuf);
  5951. kfree(ipa3_ctx);
  5952. ipa3_ctx = NULL;
  5953. fail_mem_ctx:
  5954. return result;
  5955. }
  5956. static int get_ipa_dts_pm_info(struct platform_device *pdev,
  5957. struct ipa3_plat_drv_res *ipa_drv_res)
  5958. {
  5959. int result;
  5960. int i, j;
  5961. result = of_property_read_u32(pdev->dev.of_node,
  5962. "qcom,msm-bus,num-cases",
  5963. &ipa_drv_res->pm_init.threshold_size);
  5964. /* No vote is ignored */
  5965. ipa_drv_res->pm_init.threshold_size -= 2;
  5966. if (result || ipa_drv_res->pm_init.threshold_size >
  5967. IPA_PM_THRESHOLD_MAX) {
  5968. IPAERR("invalid property qcom,msm-bus,num-cases %d\n",
  5969. ipa_drv_res->pm_init.threshold_size);
  5970. return -EFAULT;
  5971. }
  5972. result = of_property_read_u32_array(pdev->dev.of_node,
  5973. "qcom,throughput-threshold",
  5974. ipa_drv_res->pm_init.default_threshold,
  5975. ipa_drv_res->pm_init.threshold_size);
  5976. if (result) {
  5977. IPAERR("failed to read qcom,throughput-thresholds\n");
  5978. return -EFAULT;
  5979. }
  5980. result = of_property_count_strings(pdev->dev.of_node,
  5981. "qcom,scaling-exceptions");
  5982. if (result < 0) {
  5983. IPADBG("no exception list for ipa pm\n");
  5984. result = 0;
  5985. }
  5986. if (result % (ipa_drv_res->pm_init.threshold_size + 1)) {
  5987. IPAERR("failed to read qcom,scaling-exceptions\n");
  5988. return -EFAULT;
  5989. }
  5990. ipa_drv_res->pm_init.exception_size = result /
  5991. (ipa_drv_res->pm_init.threshold_size + 1);
  5992. if (ipa_drv_res->pm_init.exception_size >=
  5993. IPA_PM_EXCEPTION_MAX) {
  5994. IPAERR("exception list larger then max %d\n",
  5995. ipa_drv_res->pm_init.exception_size);
  5996. return -EFAULT;
  5997. }
  5998. for (i = 0; i < ipa_drv_res->pm_init.exception_size; i++) {
  5999. struct ipa_pm_exception *ex = ipa_drv_res->pm_init.exceptions;
  6000. result = of_property_read_string_index(pdev->dev.of_node,
  6001. "qcom,scaling-exceptions",
  6002. i * (ipa_drv_res->pm_init.threshold_size + 1),
  6003. &ex[i].usecase);
  6004. if (result) {
  6005. IPAERR("failed to read qcom,scaling-exceptions");
  6006. return -EFAULT;
  6007. }
  6008. for (j = 0; j < ipa_drv_res->pm_init.threshold_size; j++) {
  6009. const char *str;
  6010. result = of_property_read_string_index(
  6011. pdev->dev.of_node,
  6012. "qcom,scaling-exceptions",
  6013. i * (ipa_drv_res->pm_init.threshold_size + 1)
  6014. + j + 1,
  6015. &str);
  6016. if (result) {
  6017. IPAERR("failed to read qcom,scaling-exceptions"
  6018. );
  6019. return -EFAULT;
  6020. }
  6021. if (kstrtou32(str, 0, &ex[i].threshold[j])) {
  6022. IPAERR("error str=%s\n", str);
  6023. return -EFAULT;
  6024. }
  6025. }
  6026. }
  6027. return 0;
  6028. }
  6029. static int get_ipa_dts_configuration(struct platform_device *pdev,
  6030. struct ipa3_plat_drv_res *ipa_drv_res)
  6031. {
  6032. int i, result, pos;
  6033. struct resource *resource;
  6034. u32 *ipa_tz_unlock_reg;
  6035. int elem_num;
  6036. u32 mhi_evid_limits[2];
  6037. /* initialize ipa3_res */
  6038. ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
  6039. ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
  6040. ipa_drv_res->ipa_hw_type = 0;
  6041. ipa_drv_res->ipa3_hw_mode = 0;
  6042. ipa_drv_res->platform_type = 0;
  6043. ipa_drv_res->modem_cfg_emb_pipe_flt = false;
  6044. ipa_drv_res->ipa_wdi2 = false;
  6045. ipa_drv_res->ipa_wdi2_over_gsi = false;
  6046. ipa_drv_res->ipa_wdi3_over_gsi = false;
  6047. ipa_drv_res->ipa_mhi_dynamic_config = false;
  6048. ipa_drv_res->use_64_bit_dma_mask = false;
  6049. ipa_drv_res->use_bw_vote = false;
  6050. ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
  6051. ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
  6052. ipa_drv_res->apply_rg10_wa = false;
  6053. ipa_drv_res->gsi_ch20_wa = false;
  6054. ipa_drv_res->ipa_tz_unlock_reg_num = 0;
  6055. ipa_drv_res->ipa_tz_unlock_reg = NULL;
  6056. ipa_drv_res->mhi_evid_limits[0] = IPA_MHI_GSI_EVENT_RING_ID_START;
  6057. ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END;
  6058. ipa_drv_res->ipa_fltrt_not_hashable = false;
  6059. ipa_drv_res->ipa_endp_delay_wa = false;
  6060. /* Get IPA HW Version */
  6061. result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
  6062. &ipa_drv_res->ipa_hw_type);
  6063. if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
  6064. IPAERR(":get resource failed for ipa-hw-ver\n");
  6065. return -ENODEV;
  6066. }
  6067. IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
  6068. if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
  6069. IPAERR(":IPA version below 3.0 not supported\n");
  6070. return -ENODEV;
  6071. }
  6072. if (ipa_drv_res->ipa_hw_type >= IPA_HW_MAX) {
  6073. IPAERR(":IPA version is greater than the MAX\n");
  6074. return -ENODEV;
  6075. }
  6076. /* Get IPA HW mode */
  6077. result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
  6078. &ipa_drv_res->ipa3_hw_mode);
  6079. if (result)
  6080. IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
  6081. else
  6082. IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
  6083. ipa_drv_res->ipa3_hw_mode);
  6084. /* Get Platform Type */
  6085. result = of_property_read_u32(pdev->dev.of_node, "qcom,platform-type",
  6086. &ipa_drv_res->platform_type);
  6087. if (result)
  6088. IPADBG("using default (IPA_PLAT_TYPE_MDM) for platform-type\n");
  6089. else
  6090. IPADBG(": found ipa_drv_res->platform_type = %d",
  6091. ipa_drv_res->platform_type);
  6092. /* Get IPA WAN / LAN RX pool size */
  6093. result = of_property_read_u32(pdev->dev.of_node,
  6094. "qcom,wan-rx-ring-size",
  6095. &ipa_drv_res->wan_rx_ring_size);
  6096. if (result)
  6097. IPADBG("using default for wan-rx-ring-size = %u\n",
  6098. ipa_drv_res->wan_rx_ring_size);
  6099. else
  6100. IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
  6101. ipa_drv_res->wan_rx_ring_size);
  6102. result = of_property_read_u32(pdev->dev.of_node,
  6103. "qcom,lan-rx-ring-size",
  6104. &ipa_drv_res->lan_rx_ring_size);
  6105. if (result)
  6106. IPADBG("using default for lan-rx-ring-size = %u\n",
  6107. ipa_drv_res->lan_rx_ring_size);
  6108. else
  6109. IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
  6110. ipa_drv_res->lan_rx_ring_size);
  6111. ipa_drv_res->use_ipa_teth_bridge =
  6112. of_property_read_bool(pdev->dev.of_node,
  6113. "qcom,use-ipa-tethering-bridge");
  6114. IPADBG(": using ipa teth bridge = %s",
  6115. ipa_drv_res->use_ipa_teth_bridge
  6116. ? "True" : "False");
  6117. ipa_drv_res->ipa_mhi_dynamic_config =
  6118. of_property_read_bool(pdev->dev.of_node,
  6119. "qcom,use-ipa-in-mhi-mode");
  6120. IPADBG(": ipa_mhi_dynamic_config (%s)\n",
  6121. ipa_drv_res->ipa_mhi_dynamic_config
  6122. ? "True" : "False");
  6123. ipa_drv_res->modem_cfg_emb_pipe_flt =
  6124. of_property_read_bool(pdev->dev.of_node,
  6125. "qcom,modem-cfg-emb-pipe-flt");
  6126. IPADBG(": modem configure embedded pipe filtering = %s\n",
  6127. ipa_drv_res->modem_cfg_emb_pipe_flt
  6128. ? "True" : "False");
  6129. ipa_drv_res->ipa_wdi2_over_gsi =
  6130. of_property_read_bool(pdev->dev.of_node,
  6131. "qcom,ipa-wdi2_over_gsi");
  6132. IPADBG(": WDI-2.0 over gsi= %s\n",
  6133. ipa_drv_res->ipa_wdi2_over_gsi
  6134. ? "True" : "False");
  6135. ipa_drv_res->ipa_endp_delay_wa =
  6136. of_property_read_bool(pdev->dev.of_node,
  6137. "qcom,ipa-endp-delay-wa");
  6138. IPADBG(": endppoint delay wa = %s\n",
  6139. ipa_drv_res->ipa_endp_delay_wa
  6140. ? "True" : "False");
  6141. ipa_drv_res->ipa_wdi3_over_gsi =
  6142. of_property_read_bool(pdev->dev.of_node,
  6143. "qcom,ipa-wdi3-over-gsi");
  6144. IPADBG(": WDI-3.0 over gsi= %s\n",
  6145. ipa_drv_res->ipa_wdi3_over_gsi
  6146. ? "True" : "False");
  6147. ipa_drv_res->ipa_wdi2 =
  6148. of_property_read_bool(pdev->dev.of_node,
  6149. "qcom,ipa-wdi2");
  6150. IPADBG(": WDI-2.0 = %s\n",
  6151. ipa_drv_res->ipa_wdi2
  6152. ? "True" : "False");
  6153. ipa_drv_res->ipa_fltrt_not_hashable =
  6154. of_property_read_bool(pdev->dev.of_node,
  6155. "qcom,ipa-fltrt-not-hashable");
  6156. IPADBG(": IPA filter/route rule hashable = %s\n",
  6157. ipa_drv_res->ipa_fltrt_not_hashable
  6158. ? "True" : "False");
  6159. ipa_drv_res->use_64_bit_dma_mask =
  6160. of_property_read_bool(pdev->dev.of_node,
  6161. "qcom,use-64-bit-dma-mask");
  6162. IPADBG(": use_64_bit_dma_mask = %s\n",
  6163. ipa_drv_res->use_64_bit_dma_mask
  6164. ? "True" : "False");
  6165. ipa_drv_res->use_bw_vote =
  6166. of_property_read_bool(pdev->dev.of_node,
  6167. "qcom,bandwidth-vote-for-ipa");
  6168. IPADBG(": use_bw_vote = %s\n",
  6169. ipa_drv_res->use_bw_vote
  6170. ? "True" : "False");
  6171. ipa_drv_res->skip_uc_pipe_reset =
  6172. of_property_read_bool(pdev->dev.of_node,
  6173. "qcom,skip-uc-pipe-reset");
  6174. IPADBG(": skip uC pipe reset = %s\n",
  6175. ipa_drv_res->skip_uc_pipe_reset
  6176. ? "True" : "False");
  6177. ipa_drv_res->tethered_flow_control =
  6178. of_property_read_bool(pdev->dev.of_node,
  6179. "qcom,tethered-flow-control");
  6180. IPADBG(": Use apps based flow control = %s\n",
  6181. ipa_drv_res->tethered_flow_control
  6182. ? "True" : "False");
  6183. /* Get IPA wrapper address */
  6184. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  6185. "ipa-base");
  6186. if (!resource) {
  6187. IPAERR(":get resource failed for ipa-base!\n");
  6188. return -ENODEV;
  6189. }
  6190. ipa_drv_res->ipa_mem_base = resource->start;
  6191. ipa_drv_res->ipa_mem_size = resource_size(resource);
  6192. IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
  6193. ipa_drv_res->ipa_mem_base,
  6194. ipa_drv_res->ipa_mem_size);
  6195. smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
  6196. smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
  6197. /* Get IPA GSI address */
  6198. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  6199. "gsi-base");
  6200. if (!resource) {
  6201. IPAERR(":get resource failed for gsi-base\n");
  6202. return -ENODEV;
  6203. }
  6204. ipa_drv_res->transport_mem_base = resource->start;
  6205. ipa_drv_res->transport_mem_size = resource_size(resource);
  6206. IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
  6207. ipa_drv_res->transport_mem_base,
  6208. ipa_drv_res->transport_mem_size);
  6209. /* Get IPA GSI IRQ number */
  6210. resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  6211. "gsi-irq");
  6212. if (!resource) {
  6213. IPAERR(":get resource failed for gsi-irq\n");
  6214. return -ENODEV;
  6215. }
  6216. ipa_drv_res->transport_irq = resource->start;
  6217. IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
  6218. /* Get IPA pipe mem start ofst */
  6219. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  6220. "ipa-pipe-mem");
  6221. if (!resource) {
  6222. IPADBG(":not using pipe memory - resource nonexisting\n");
  6223. } else {
  6224. ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
  6225. ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
  6226. IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
  6227. ipa_drv_res->ipa_pipe_mem_start_ofst,
  6228. ipa_drv_res->ipa_pipe_mem_size);
  6229. }
  6230. /* Get IPA IRQ number */
  6231. resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  6232. "ipa-irq");
  6233. if (!resource) {
  6234. IPAERR(":get resource failed for ipa-irq\n");
  6235. return -ENODEV;
  6236. }
  6237. ipa_drv_res->ipa_irq = resource->start;
  6238. IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
  6239. result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
  6240. &ipa_drv_res->ee);
  6241. if (result)
  6242. ipa_drv_res->ee = 0;
  6243. IPADBG(":ee = %u\n", ipa_drv_res->ee);
  6244. ipa_drv_res->apply_rg10_wa =
  6245. of_property_read_bool(pdev->dev.of_node,
  6246. "qcom,use-rg10-limitation-mitigation");
  6247. IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
  6248. ipa_drv_res->apply_rg10_wa
  6249. ? "True" : "False");
  6250. ipa_drv_res->gsi_ch20_wa =
  6251. of_property_read_bool(pdev->dev.of_node,
  6252. "qcom,do-not-use-ch-gsi-20");
  6253. IPADBG(": GSI CH 20 WA is = %s\n",
  6254. ipa_drv_res->gsi_ch20_wa
  6255. ? "Needed" : "Not needed");
  6256. elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
  6257. "qcom,mhi-event-ring-id-limits", sizeof(u32));
  6258. if (elem_num == 2) {
  6259. if (of_property_read_u32_array(pdev->dev.of_node,
  6260. "qcom,mhi-event-ring-id-limits", mhi_evid_limits, 2)) {
  6261. IPAERR("failed to read mhi event ring id limits\n");
  6262. return -EFAULT;
  6263. }
  6264. if (mhi_evid_limits[0] > mhi_evid_limits[1]) {
  6265. IPAERR("mhi event ring id low limit > high limit\n");
  6266. return -EFAULT;
  6267. }
  6268. ipa_drv_res->mhi_evid_limits[0] = mhi_evid_limits[0];
  6269. ipa_drv_res->mhi_evid_limits[1] = mhi_evid_limits[1];
  6270. IPADBG(": mhi-event-ring-id-limits start=%u end=%u\n",
  6271. mhi_evid_limits[0], mhi_evid_limits[1]);
  6272. } else {
  6273. if (elem_num > 0) {
  6274. IPAERR("Invalid mhi event ring id limits number %d\n",
  6275. elem_num);
  6276. return -EINVAL;
  6277. }
  6278. IPADBG("use default mhi evt ring id limits start=%u end=%u\n",
  6279. ipa_drv_res->mhi_evid_limits[0],
  6280. ipa_drv_res->mhi_evid_limits[1]);
  6281. }
  6282. elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
  6283. "qcom,ipa-tz-unlock-reg", sizeof(u32));
  6284. if (elem_num > 0 && elem_num % 2 == 0) {
  6285. ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
  6286. ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
  6287. if (ipa_tz_unlock_reg == NULL)
  6288. return -ENOMEM;
  6289. ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
  6290. ipa_drv_res->ipa_tz_unlock_reg_num,
  6291. sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
  6292. GFP_KERNEL);
  6293. if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
  6294. kfree(ipa_tz_unlock_reg);
  6295. return -ENOMEM;
  6296. }
  6297. if (of_property_read_u32_array(pdev->dev.of_node,
  6298. "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
  6299. elem_num)) {
  6300. IPAERR("failed to read register addresses\n");
  6301. kfree(ipa_tz_unlock_reg);
  6302. kfree(ipa_drv_res->ipa_tz_unlock_reg);
  6303. return -EFAULT;
  6304. }
  6305. pos = 0;
  6306. for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
  6307. ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
  6308. ipa_tz_unlock_reg[pos++];
  6309. ipa_drv_res->ipa_tz_unlock_reg[i].size =
  6310. ipa_tz_unlock_reg[pos++];
  6311. IPADBG("tz unlock reg %d: addr 0x%pa size %llu\n", i,
  6312. &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
  6313. ipa_drv_res->ipa_tz_unlock_reg[i].size);
  6314. }
  6315. kfree(ipa_tz_unlock_reg);
  6316. }
  6317. /* get IPA PM related information */
  6318. result = get_ipa_dts_pm_info(pdev, ipa_drv_res);
  6319. if (result) {
  6320. IPAERR("failed to get pm info from dts %d\n", result);
  6321. return result;
  6322. }
  6323. ipa_drv_res->wdi_over_pcie =
  6324. of_property_read_bool(pdev->dev.of_node,
  6325. "qcom,wlan-ce-db-over-pcie");
  6326. IPADBG("Is wdi_over_pcie ? (%s)\n",
  6327. ipa_drv_res->wdi_over_pcie ? "Yes":"No");
  6328. /*
  6329. * If we're on emulator, get its interrupt controller's mem
  6330. * start and size
  6331. */
  6332. if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
  6333. resource = platform_get_resource_byname(
  6334. pdev, IORESOURCE_MEM, "intctrl-base");
  6335. if (!resource) {
  6336. IPAERR(":Can't find intctrl-base resource\n");
  6337. return -ENODEV;
  6338. }
  6339. ipa_drv_res->emulator_intcntrlr_mem_base =
  6340. resource->start;
  6341. ipa_drv_res->emulator_intcntrlr_mem_size =
  6342. resource_size(resource);
  6343. IPADBG(":using intctrl-base at 0x%x of size 0x%x\n",
  6344. ipa_drv_res->emulator_intcntrlr_mem_base,
  6345. ipa_drv_res->emulator_intcntrlr_mem_size);
  6346. }
  6347. ipa_drv_res->entire_ipa_block_size = 0x100000;
  6348. result = of_property_read_u32(pdev->dev.of_node,
  6349. "qcom,entire-ipa-block-size",
  6350. &ipa_drv_res->entire_ipa_block_size);
  6351. IPADBG(": entire_ipa_block_size = %d\n",
  6352. ipa_drv_res->entire_ipa_block_size);
  6353. /*
  6354. * We'll read register-collection-on-crash here, but log it
  6355. * later below because its value may change based on other
  6356. * subsequent dtsi reads......
  6357. */
  6358. ipa_drv_res->do_register_collection_on_crash =
  6359. of_property_read_bool(pdev->dev.of_node,
  6360. "qcom,register-collection-on-crash");
  6361. /*
  6362. * We'll read testbus-collection-on-crash here...
  6363. */
  6364. ipa_drv_res->do_testbus_collection_on_crash =
  6365. of_property_read_bool(pdev->dev.of_node,
  6366. "qcom,testbus-collection-on-crash");
  6367. IPADBG(": doing testbus collection on crash = %u\n",
  6368. ipa_drv_res->do_testbus_collection_on_crash);
  6369. /*
  6370. * We'll read non-tn-collection-on-crash here...
  6371. */
  6372. ipa_drv_res->do_non_tn_collection_on_crash =
  6373. of_property_read_bool(pdev->dev.of_node,
  6374. "qcom,non-tn-collection-on-crash");
  6375. IPADBG(": doing non-tn collection on crash = %u\n",
  6376. ipa_drv_res->do_non_tn_collection_on_crash);
  6377. /*
  6378. * We'll read ram-collection-on-crash here...
  6379. */
  6380. ipa_drv_res->do_ram_collection_on_crash =
  6381. of_property_read_bool(
  6382. pdev->dev.of_node,
  6383. "qcom,ram-collection-on-crash");
  6384. IPADBG(": doing ram collection on crash = %u\n",
  6385. ipa_drv_res->do_ram_collection_on_crash);
  6386. if (ipa_drv_res->do_testbus_collection_on_crash ||
  6387. ipa_drv_res->do_non_tn_collection_on_crash ||
  6388. ipa_drv_res->do_ram_collection_on_crash)
  6389. ipa_drv_res->do_register_collection_on_crash = true;
  6390. IPADBG(": doing register collection on crash = %u\n",
  6391. ipa_drv_res->do_register_collection_on_crash);
  6392. result = of_property_read_u32(
  6393. pdev->dev.of_node,
  6394. "qcom,secure-debug-check-action",
  6395. &ipa_drv_res->secure_debug_check_action);
  6396. if (result ||
  6397. (ipa_drv_res->secure_debug_check_action != 0 &&
  6398. ipa_drv_res->secure_debug_check_action != 1 &&
  6399. ipa_drv_res->secure_debug_check_action != 2))
  6400. ipa_drv_res->secure_debug_check_action = USE_SCM;
  6401. IPADBG(": secure-debug-check-action = %d\n",
  6402. ipa_drv_res->secure_debug_check_action);
  6403. return 0;
  6404. }
  6405. static int ipa_smmu_wlan_cb_probe(struct device *dev)
  6406. {
  6407. struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
  6408. int fast = 0;
  6409. int bypass = 0;
  6410. u32 add_map_size;
  6411. const u32 *add_map;
  6412. int i;
  6413. u32 iova_ap_mapping[2];
  6414. IPADBG("WLAN CB PROBE dev=%pK\n", dev);
  6415. if (!smmu_info.present[IPA_SMMU_CB_WLAN]) {
  6416. IPAERR("WLAN SMMU is disabled\n");
  6417. return 0;
  6418. }
  6419. IPADBG("WLAN CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);
  6420. cb->iommu_domain = iommu_get_domain_for_dev(dev);
  6421. if (IS_ERR_OR_NULL(cb->iommu_domain)) {
  6422. IPAERR("could not get iommu domain\n");
  6423. return -EINVAL;
  6424. }
  6425. IPADBG("WLAN CB PROBE mapping retrieved\n");
  6426. cb->is_cache_coherent = of_property_read_bool(dev->of_node,
  6427. "dma-coherent");
  6428. cb->dev = dev;
  6429. cb->valid = true;
  6430. cb->va_start = cb->va_end = cb->va_size = 0;
  6431. if (of_property_read_u32_array(
  6432. dev->of_node, "qcom,iommu-dma-addr-pool",
  6433. iova_ap_mapping, 2) == 0) {
  6434. cb->va_start = iova_ap_mapping[0];
  6435. cb->va_size = iova_ap_mapping[1];
  6436. cb->va_end = cb->va_start + cb->va_size;
  6437. }
  6438. IPADBG("WLAN CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
  6439. dev, cb->va_start, cb->va_size);
  6440. /*
  6441. * Prior to these calls to iommu_domain_get_attr(), these
  6442. * attributes were set in this function relative to dtsi values
  6443. * defined for this driver. In other words, if corresponding ipa
  6444. * driver owned values were found in the dtsi, they were read and
  6445. * set here.
  6446. *
  6447. * In this new world, the developer will use iommu owned dtsi
  6448. * settings to set them there. This new logic below, simply
  6449. * checks to see if they've been set in dtsi. If so, the logic
  6450. * further below acts accordingly...
  6451. */
  6452. iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
  6453. iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);
  6454. IPADBG(
  6455. "WLAN CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
  6456. dev, bypass, fast);
  6457. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = (bypass != 0);
  6458. /* MAP ipa-uc ram */
  6459. add_map = of_get_property(dev->of_node,
  6460. "qcom,additional-mapping", &add_map_size);
  6461. if (add_map) {
  6462. /* mapping size is an array of 3-tuple of u32 */
  6463. if (add_map_size % (3 * sizeof(u32))) {
  6464. IPAERR("wrong additional mapping format\n");
  6465. cb->valid = false;
  6466. return -EFAULT;
  6467. }
  6468. /* iterate of each entry of the additional mapping array */
  6469. for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
  6470. u32 iova = be32_to_cpu(add_map[i]);
  6471. u32 pa = be32_to_cpu(add_map[i + 1]);
  6472. u32 size = be32_to_cpu(add_map[i + 2]);
  6473. unsigned long iova_p;
  6474. phys_addr_t pa_p;
  6475. u32 size_p;
  6476. IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
  6477. iova_p, pa_p, size_p);
  6478. IPADBG_LOW("mapping 0x%lx to 0x%pa size %d\n",
  6479. iova_p, &pa_p, size_p);
  6480. ipa3_iommu_map(cb->iommu_domain,
  6481. iova_p, pa_p, size_p,
  6482. IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
  6483. }
  6484. }
  6485. return 0;
  6486. }
  6487. static int ipa_smmu_uc_cb_probe(struct device *dev)
  6488. {
  6489. struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
  6490. int bypass = 0;
  6491. int fast = 0;
  6492. u32 iova_ap_mapping[2];
  6493. IPADBG("UC CB PROBE dev=%pK\n", dev);
  6494. if (!smmu_info.present[IPA_SMMU_CB_UC]) {
  6495. IPAERR("UC SMMU is disabled\n");
  6496. return 0;
  6497. }
  6498. if (smmu_info.use_64_bit_dma_mask) {
  6499. if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
  6500. dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
  6501. IPAERR("DMA set 64bit mask failed\n");
  6502. return -EOPNOTSUPP;
  6503. }
  6504. } else {
  6505. if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
  6506. dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
  6507. IPAERR("DMA set 32bit mask failed\n");
  6508. return -EOPNOTSUPP;
  6509. }
  6510. }
  6511. IPADBG("UC CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);
  6512. cb->iommu_domain = iommu_get_domain_for_dev(dev);
  6513. if (IS_ERR_OR_NULL(cb->iommu_domain)) {
  6514. IPAERR("could not get iommu domain\n");
  6515. return -EINVAL;
  6516. }
  6517. IPADBG("UC CB PROBE mapping retrieved\n");
  6518. cb->is_cache_coherent = of_property_read_bool(dev->of_node,
  6519. "dma-coherent");
  6520. cb->dev = dev;
  6521. cb->valid = true;
  6522. cb->va_start = cb->va_end = cb->va_size = 0;
  6523. if (of_property_read_u32_array(
  6524. dev->of_node, "qcom,iommu-dma-addr-pool",
  6525. iova_ap_mapping, 2) == 0) {
  6526. cb->va_start = iova_ap_mapping[0];
  6527. cb->va_size = iova_ap_mapping[1];
  6528. cb->va_end = cb->va_start + cb->va_size;
  6529. }
  6530. IPADBG("UC CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
  6531. dev, cb->va_start, cb->va_size);
  6532. /*
  6533. * Prior to these calls to iommu_domain_get_attr(), these
  6534. * attributes were set in this function relative to dtsi values
  6535. * defined for this driver. In other words, if corresponding ipa
  6536. * driver owned values were found in the dtsi, they were read and
  6537. * set here.
  6538. *
  6539. * In this new world, the developer will use iommu owned dtsi
  6540. * settings to set them there. This new logic below, simply
  6541. * checks to see if they've been set in dtsi. If so, the logic
  6542. * further below acts accordingly...
  6543. */
  6544. iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
  6545. iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);
  6546. IPADBG("UC CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
  6547. dev, bypass, fast);
  6548. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = (bypass != 0);
  6549. ipa3_ctx->uc_pdev = dev;
  6550. return 0;
  6551. }
  6552. static int ipa_smmu_ap_cb_probe(struct device *dev)
  6553. {
  6554. struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
  6555. int fast = 0;
  6556. int bypass = 0;
  6557. u32 add_map_size;
  6558. const u32 *add_map;
  6559. void *smem_addr;
  6560. size_t smem_size;
  6561. u32 ipa_smem_size = 0;
  6562. int ret;
  6563. int i;
  6564. unsigned long iova_p;
  6565. phys_addr_t pa_p;
  6566. u32 size_p;
  6567. phys_addr_t iova;
  6568. phys_addr_t pa;
  6569. u32 iova_ap_mapping[2];
  6570. IPADBG("AP CB PROBE dev=%pK\n", dev);
  6571. if (!smmu_info.present[IPA_SMMU_CB_AP]) {
  6572. IPAERR("AP SMMU is disabled");
  6573. return 0;
  6574. }
  6575. if (smmu_info.use_64_bit_dma_mask) {
  6576. if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
  6577. dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
  6578. IPAERR("DMA set 64bit mask failed\n");
  6579. return -EOPNOTSUPP;
  6580. }
  6581. } else {
  6582. if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
  6583. dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
  6584. IPAERR("DMA set 32bit mask failed\n");
  6585. return -EOPNOTSUPP;
  6586. }
  6587. }
  6588. IPADBG("AP CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);
  6589. cb->iommu_domain = iommu_get_domain_for_dev(dev);
  6590. if (IS_ERR_OR_NULL(cb->iommu_domain)) {
  6591. IPAERR("could not get iommu domain\n");
  6592. return -EINVAL;
  6593. }
  6594. IPADBG("AP CB PROBE mapping retrieved\n");
  6595. cb->is_cache_coherent = of_property_read_bool(dev->of_node,
  6596. "dma-coherent");
  6597. cb->dev = dev;
  6598. cb->valid = true;
  6599. cb->va_start = cb->va_end = cb->va_size = 0;
  6600. if (of_property_read_u32_array(
  6601. dev->of_node, "qcom,iommu-dma-addr-pool",
  6602. iova_ap_mapping, 2) == 0) {
  6603. cb->va_start = iova_ap_mapping[0];
  6604. cb->va_size = iova_ap_mapping[1];
  6605. cb->va_end = cb->va_start + cb->va_size;
  6606. }
  6607. IPADBG("AP CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
  6608. dev, cb->va_start, cb->va_size);
  6609. /*
  6610. * Prior to these calls to iommu_domain_get_attr(), these
  6611. * attributes were set in this function relative to dtsi values
  6612. * defined for this driver. In other words, if corresponding ipa
  6613. * driver owned values were found in the dtsi, they were read and
  6614. * set here.
  6615. *
  6616. * In this new world, the developer will use iommu owned dtsi
  6617. * settings to set them there. This new logic below, simply
  6618. * checks to see if they've been set in dtsi. If so, the logic
  6619. * further below acts accordingly...
  6620. */
  6621. iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
  6622. iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);
  6623. IPADBG("AP CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
  6624. dev, bypass, fast);
  6625. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = (bypass != 0);
  6626. add_map = of_get_property(dev->of_node,
  6627. "qcom,additional-mapping", &add_map_size);
  6628. if (add_map) {
  6629. /* mapping size is an array of 3-tuple of u32 */
  6630. if (add_map_size % (3 * sizeof(u32))) {
  6631. IPAERR("wrong additional mapping format\n");
  6632. cb->valid = false;
  6633. return -EFAULT;
  6634. }
  6635. /* iterate of each entry of the additional mapping array */
  6636. for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
  6637. u32 iova = be32_to_cpu(add_map[i]);
  6638. u32 pa = be32_to_cpu(add_map[i + 1]);
  6639. u32 size = be32_to_cpu(add_map[i + 2]);
  6640. unsigned long iova_p;
  6641. phys_addr_t pa_p;
  6642. u32 size_p;
  6643. IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
  6644. iova_p, pa_p, size_p);
  6645. IPADBG_LOW("mapping 0x%lx to 0x%pa size %d\n",
  6646. iova_p, &pa_p, size_p);
  6647. ipa3_iommu_map(cb->iommu_domain,
  6648. iova_p, pa_p, size_p,
  6649. IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
  6650. }
  6651. }
  6652. ret = of_property_read_u32(dev->of_node, "qcom,ipa-q6-smem-size",
  6653. &ipa_smem_size);
  6654. if (ret) {
  6655. IPADBG("ipa q6 smem size (default) = %u\n", IPA_SMEM_SIZE);
  6656. ipa_smem_size = IPA_SMEM_SIZE;
  6657. } else {
  6658. IPADBG("ipa q6 smem size = %u\n", ipa_smem_size);
  6659. }
  6660. if (ipa3_ctx->platform_type != IPA_PLAT_TYPE_APQ) {
  6661. /* map SMEM memory for IPA table accesses */
  6662. ret = qcom_smem_alloc(SMEM_MODEM,
  6663. SMEM_IPA_FILTER_TABLE,
  6664. ipa_smem_size);
  6665. if (ret < 0 && ret != -EEXIST) {
  6666. IPAERR("unable to allocate smem MODEM entry\n");
  6667. cb->valid = false;
  6668. return -EFAULT;
  6669. }
  6670. smem_addr = qcom_smem_get(SMEM_MODEM,
  6671. SMEM_IPA_FILTER_TABLE,
  6672. &smem_size);
  6673. if (IS_ERR(smem_addr)) {
  6674. IPAERR("unable to acquire smem MODEM entry\n");
  6675. cb->valid = false;
  6676. return -EFAULT;
  6677. }
  6678. if (smem_size != ipa_smem_size)
  6679. IPAERR("unexpected read q6 smem size %zu %u\n",
  6680. smem_size, ipa_smem_size);
  6681. iova = qcom_smem_virt_to_phys(smem_addr);
  6682. pa = iova;
  6683. IPA_SMMU_ROUND_TO_PAGE(iova, pa, ipa_smem_size,
  6684. iova_p, pa_p, size_p);
  6685. IPADBG("mapping 0x%lx to 0x%pa size %d\n",
  6686. iova_p, &pa_p, size_p);
  6687. ipa3_iommu_map(cb->iommu_domain,
  6688. iova_p, pa_p, size_p,
  6689. IOMMU_READ | IOMMU_WRITE);
  6690. }
  6691. smmu_info.present[IPA_SMMU_CB_AP] = true;
  6692. ipa3_ctx->pdev = dev;
  6693. cb->next_addr = cb->va_end;
  6694. return 0;
  6695. }
  6696. static int ipa_smmu_11ad_cb_probe(struct device *dev)
  6697. {
  6698. int bypass = 0;
  6699. struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
  6700. u32 iova_ap_mapping[2];
  6701. IPADBG("11AD CB probe: dev=%pK\n", dev);
  6702. if (!smmu_info.present[IPA_SMMU_CB_11AD]) {
  6703. IPAERR("11AD SMMU is disabled");
  6704. return 0;
  6705. }
  6706. cb->iommu_domain = iommu_get_domain_for_dev(dev);
  6707. if (IS_ERR_OR_NULL(cb->iommu_domain)) {
  6708. IPAERR("could not get iommu domain\n");
  6709. return -EINVAL;
  6710. }
  6711. cb->is_cache_coherent = of_property_read_bool(dev->of_node,
  6712. "dma-coherent");
  6713. cb->dev = dev;
  6714. cb->valid = true;
  6715. cb->va_start = cb->va_end = cb->va_size = 0;
  6716. if (of_property_read_u32_array(
  6717. dev->of_node, "qcom,iommu-dma-addr-pool",
  6718. iova_ap_mapping, 2) == 0) {
  6719. cb->va_start = iova_ap_mapping[0];
  6720. cb->va_size = iova_ap_mapping[1];
  6721. cb->va_end = cb->va_start + cb->va_size;
  6722. }
  6723. IPADBG("11AD CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
  6724. dev, cb->va_start, cb->va_size);
  6725. iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
  6726. IPADBG("11AD CB PROBE dev=%pK DOMAIN ATTRS bypass=%d\n",
  6727. dev, bypass);
  6728. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] = (bypass != 0);
  6729. if (of_property_read_bool(dev->of_node, "qcom,shared-cb")) {
  6730. IPADBG("11AD using shared CB\n");
  6731. cb->shared = true;
  6732. }
  6733. return 0;
  6734. }
  6735. static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
  6736. {
  6737. switch (cb_type) {
  6738. case IPA_SMMU_CB_AP:
  6739. return ipa_smmu_ap_cb_probe(dev);
  6740. case IPA_SMMU_CB_WLAN:
  6741. return ipa_smmu_wlan_cb_probe(dev);
  6742. case IPA_SMMU_CB_UC:
  6743. return ipa_smmu_uc_cb_probe(dev);
  6744. case IPA_SMMU_CB_11AD:
  6745. return ipa_smmu_11ad_cb_probe(dev);
  6746. case IPA_SMMU_CB_MAX:
  6747. IPAERR("Invalid cb_type\n");
  6748. }
  6749. return 0;
  6750. }
  6751. static int ipa3_attach_to_smmu(void)
  6752. {
  6753. struct ipa_smmu_cb_ctx *cb;
  6754. int i, result;
  6755. ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
  6756. ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
  6757. if (smmu_info.arm_smmu) {
  6758. IPADBG("smmu is enabled\n");
  6759. for (i = 0; i < IPA_SMMU_CB_MAX; i++) {
  6760. cb = ipa3_get_smmu_ctx(i);
  6761. result = ipa_smmu_cb_probe(cb->dev, i);
  6762. if (result)
  6763. IPAERR("probe failed for cb %d\n", i);
  6764. }
  6765. } else {
  6766. IPADBG("smmu is disabled\n");
  6767. }
  6768. return 0;
  6769. }
  6770. static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
  6771. {
  6772. ipa3_freeze_clock_vote_and_notify_modem();
  6773. return IRQ_HANDLED;
  6774. }
  6775. static int ipa3_smp2p_probe(struct device *dev)
  6776. {
  6777. struct device_node *node = dev->of_node;
  6778. int res;
  6779. int irq = 0;
  6780. if (ipa3_ctx == NULL) {
  6781. IPAERR("ipa3_ctx was not initialized\n");
  6782. return -EPROBE_DEFER;
  6783. }
  6784. IPADBG("node->name=%s\n", node->name);
  6785. if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
  6786. IPADBG("Ignore smp2p on APQ platform\n");
  6787. return 0;
  6788. }
  6789. if (strcmp("qcom,smp2p_map_ipa_1_out", node->name) == 0) {
  6790. if (of_find_property(node, "qcom,smem-states", NULL)) {
  6791. ipa3_ctx->smp2p_info.smem_state =
  6792. qcom_smem_state_get(dev, "ipa-smp2p-out",
  6793. &ipa3_ctx->smp2p_info.smem_bit);
  6794. if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) {
  6795. IPAERR("fail to get smp2p clk resp bit %ld\n",
  6796. PTR_ERR(ipa3_ctx->smp2p_info.smem_state));
  6797. return PTR_ERR(ipa3_ctx->smp2p_info.smem_state);
  6798. }
  6799. IPADBG("smem_bit=%d\n", ipa3_ctx->smp2p_info.smem_bit);
  6800. }
  6801. } else if (strcmp("qcom,smp2p_map_ipa_1_in", node->name) == 0) {
  6802. res = irq = of_irq_get_byname(node, "ipa-smp2p-in");
  6803. if (res < 0) {
  6804. IPADBG("of_irq_get_byname returned %d\n", irq);
  6805. return res;
  6806. }
  6807. ipa3_ctx->smp2p_info.in_base_id = irq;
  6808. IPADBG("smp2p irq#=%d\n", irq);
  6809. res = devm_request_threaded_irq(dev, irq, NULL,
  6810. (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
  6811. IRQF_TRIGGER_RISING | IRQF_ONESHOT,
  6812. "ipa_smp2p_clk_vote", dev);
  6813. if (res) {
  6814. IPAERR("fail to register smp2p irq=%d\n", irq);
  6815. return -ENODEV;
  6816. }
  6817. }
  6818. return 0;
  6819. }
  6820. int ipa3_plat_drv_probe(struct platform_device *pdev_p,
  6821. struct ipa_api_controller *api_ctrl,
  6822. const struct of_device_id *pdrv_match)
  6823. {
  6824. int result;
  6825. struct device *dev = &pdev_p->dev;
  6826. struct ipa_smmu_cb_ctx *cb;
  6827. IPADBG("IPA driver probing started\n");
  6828. IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
  6829. if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) {
  6830. if (ipa3_ctx == NULL) {
  6831. IPAERR("ipa3_ctx was not initialized\n");
  6832. return -EPROBE_DEFER;
  6833. }
  6834. cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
  6835. cb->dev = dev;
  6836. smmu_info.present[IPA_SMMU_CB_AP] = true;
  6837. return 0;
  6838. }
  6839. if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) {
  6840. if (ipa3_ctx == NULL) {
  6841. IPAERR("ipa3_ctx was not initialized\n");
  6842. return -EPROBE_DEFER;
  6843. }
  6844. cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
  6845. cb->dev = dev;
  6846. smmu_info.present[IPA_SMMU_CB_WLAN] = true;
  6847. return 0;
  6848. }
  6849. if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) {
  6850. if (ipa3_ctx == NULL) {
  6851. IPAERR("ipa3_ctx was not initialized\n");
  6852. return -EPROBE_DEFER;
  6853. }
  6854. cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
  6855. cb->dev = dev;
  6856. smmu_info.present[IPA_SMMU_CB_UC] = true;
  6857. return 0;
  6858. }
  6859. if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-11ad-cb")) {
  6860. if (ipa3_ctx == NULL) {
  6861. IPAERR("ipa3_ctx was not initialized\n");
  6862. return -EPROBE_DEFER;
  6863. }
  6864. cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
  6865. cb->dev = dev;
  6866. smmu_info.present[IPA_SMMU_CB_11AD] = true;
  6867. return 0;
  6868. }
  6869. if (of_device_is_compatible(dev->of_node,
  6870. "qcom,smp2p-map-ipa-1-out"))
  6871. return ipa3_smp2p_probe(dev);
  6872. if (of_device_is_compatible(dev->of_node,
  6873. "qcom,smp2p-map-ipa-1-in"))
  6874. return ipa3_smp2p_probe(dev);
  6875. result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
  6876. if (result) {
  6877. IPAERR("IPA dts parsing failed\n");
  6878. return result;
  6879. }
  6880. result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
  6881. if (result) {
  6882. IPAERR("IPA API binding failed\n");
  6883. return result;
  6884. }
  6885. if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
  6886. if (of_property_read_bool(pdev_p->dev.of_node,
  6887. "qcom,use-64-bit-dma-mask"))
  6888. smmu_info.use_64_bit_dma_mask = true;
  6889. smmu_info.arm_smmu = true;
  6890. } else {
  6891. if (of_property_read_bool(pdev_p->dev.of_node,
  6892. "qcom,use-64-bit-dma-mask")) {
  6893. if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
  6894. dma_set_coherent_mask(&pdev_p->dev,
  6895. DMA_BIT_MASK(64))) {
  6896. IPAERR("DMA set 64bit mask failed\n");
  6897. return -EOPNOTSUPP;
  6898. }
  6899. } else {
  6900. if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
  6901. dma_set_coherent_mask(&pdev_p->dev,
  6902. DMA_BIT_MASK(32))) {
  6903. IPAERR("DMA set 32bit mask failed\n");
  6904. return -EOPNOTSUPP;
  6905. }
  6906. }
  6907. }
  6908. /* Proceed to real initialization */
  6909. result = ipa3_pre_init(&ipa3_res, pdev_p);
  6910. if (result) {
  6911. IPAERR("ipa3_init failed\n");
  6912. return result;
  6913. }
  6914. result = of_platform_populate(pdev_p->dev.of_node,
  6915. pdrv_match, NULL, &pdev_p->dev);
  6916. if (result) {
  6917. IPAERR("failed to populate platform\n");
  6918. return result;
  6919. }
  6920. return result;
  6921. }
  6922. /**
  6923. * ipa3_ap_suspend() - suspend callback for runtime_pm
  6924. * @dev: pointer to device
  6925. *
  6926. * This callback will be invoked by the runtime_pm framework when an AP suspend
  6927. * operation is invoked, usually by pressing a suspend button.
  6928. *
  6929. * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
  6930. * This will postpone the suspend operation until IPA is no longer used by AP.
  6931. */
  6932. int ipa3_ap_suspend(struct device *dev)
  6933. {
  6934. int i;
  6935. IPADBG("Enter...\n");
  6936. /* In case there is a tx/rx handler in polling mode fail to suspend */
  6937. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  6938. if (ipa3_ctx->ep[i].sys &&
  6939. atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
  6940. IPAERR("EP %d is in polling state, do not suspend\n",
  6941. i);
  6942. return -EAGAIN;
  6943. }
  6944. }
  6945. ipa_pm_deactivate_all_deferred();
  6946. IPADBG("Exit\n");
  6947. return 0;
  6948. }
  6949. /**
  6950. * ipa3_ap_resume() - resume callback for runtime_pm
  6951. * @dev: pointer to device
  6952. *
  6953. * This callback will be invoked by the runtime_pm framework when an AP resume
  6954. * operation is invoked.
  6955. *
  6956. * Always returns 0 since resume should always succeed.
  6957. */
  6958. int ipa3_ap_resume(struct device *dev)
  6959. {
  6960. return 0;
  6961. }
  6962. struct ipa3_context *ipa3_get_ctx(void)
  6963. {
  6964. return ipa3_ctx;
  6965. }
  6966. static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
  6967. {
  6968. /*
  6969. * These values are reported by hardware. Any error indicates
  6970. * hardware unexpected state.
  6971. */
  6972. switch (notify->evt_id) {
  6973. case GSI_PER_EVT_GLOB_ERROR:
  6974. IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
  6975. IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
  6976. break;
  6977. case GSI_PER_EVT_GLOB_GP1:
  6978. IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
  6979. ipa_assert();
  6980. break;
  6981. case GSI_PER_EVT_GLOB_GP2:
  6982. IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
  6983. ipa_assert();
  6984. break;
  6985. case GSI_PER_EVT_GLOB_GP3:
  6986. IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
  6987. ipa_assert();
  6988. break;
  6989. case GSI_PER_EVT_GENERAL_BREAK_POINT:
  6990. IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
  6991. break;
  6992. case GSI_PER_EVT_GENERAL_BUS_ERROR:
  6993. IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
  6994. ipa_assert();
  6995. break;
  6996. case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
  6997. IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
  6998. ipa_assert();
  6999. break;
  7000. case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
  7001. IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
  7002. ipa_assert();
  7003. break;
  7004. default:
  7005. IPAERR("Received unexpected evt: %d\n",
  7006. notify->evt_id);
  7007. ipa_assert();
  7008. }
  7009. }
  7010. int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
  7011. {
  7012. struct ipa3_ready_cb_info *cb_info = NULL;
  7013. /* check ipa3_ctx existed or not */
  7014. if (!ipa3_ctx) {
  7015. IPADBG("IPA driver haven't initialized\n");
  7016. return -ENXIO;
  7017. }
  7018. mutex_lock(&ipa3_ctx->lock);
  7019. if (ipa3_ctx->ipa_initialization_complete) {
  7020. mutex_unlock(&ipa3_ctx->lock);
  7021. IPADBG("IPA driver finished initialization already\n");
  7022. return -EEXIST;
  7023. }
  7024. cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
  7025. if (!cb_info) {
  7026. mutex_unlock(&ipa3_ctx->lock);
  7027. return -ENOMEM;
  7028. }
  7029. cb_info->ready_cb = ipa_ready_cb;
  7030. cb_info->user_data = user_data;
  7031. list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
  7032. mutex_unlock(&ipa3_ctx->lock);
  7033. return 0;
  7034. }
  7035. int ipa3_iommu_map(struct iommu_domain *domain,
  7036. unsigned long iova, phys_addr_t paddr, size_t size, int prot)
  7037. {
  7038. struct ipa_smmu_cb_ctx *cb = NULL;
  7039. IPADBG_LOW("domain =0x%pK iova 0x%lx\n", domain, iova);
  7040. IPADBG_LOW("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
  7041. /* make sure no overlapping */
  7042. if (domain == ipa3_get_smmu_domain()) {
  7043. cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
  7044. if (iova >= cb->va_start && iova < cb->va_end) {
  7045. IPAERR("iommu AP overlap addr 0x%lx\n", iova);
  7046. ipa_assert();
  7047. return -EFAULT;
  7048. }
  7049. } else if (domain == ipa3_get_wlan_smmu_domain()) {
  7050. /* wlan is one time map */
  7051. cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
  7052. } else if (domain == ipa3_get_11ad_smmu_domain()) {
  7053. /* 11ad is one time map */
  7054. cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
  7055. } else if (domain == ipa3_get_uc_smmu_domain()) {
  7056. cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
  7057. if (iova >= cb->va_start && iova < cb->va_end) {
  7058. IPAERR("iommu uC overlap addr 0x%lx\n", iova);
  7059. ipa_assert();
  7060. return -EFAULT;
  7061. }
  7062. } else {
  7063. IPAERR("Unexpected domain 0x%pK\n", domain);
  7064. ipa_assert();
  7065. return -EFAULT;
  7066. }
  7067. if (cb == NULL) {
  7068. IPAERR("Unexpected cb turning NULL for domain 0x%pK\n", domain);
  7069. ipa_assert();
  7070. }
  7071. /*
  7072. * IOMMU_CACHE is needed to make the entries cachable
  7073. * if cache coherency is enabled in dtsi.
  7074. */
  7075. if (cb->is_cache_coherent)
  7076. prot |= IOMMU_CACHE;
  7077. return iommu_map(domain, iova, paddr, size, prot);
  7078. }
  7079. /**
  7080. * ipa3_get_smmu_params()- Return the ipa3 smmu related params.
  7081. */
  7082. int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
  7083. struct ipa_smmu_out_params *out)
  7084. {
  7085. bool is_smmu_enable = false;
  7086. if (out == NULL || in == NULL) {
  7087. IPAERR("bad parms for Client SMMU out params\n");
  7088. return -EINVAL;
  7089. }
  7090. if (!ipa3_ctx) {
  7091. IPAERR("IPA not yet initialized\n");
  7092. return -EINVAL;
  7093. }
  7094. out->shared_cb = false;
  7095. switch (in->smmu_client) {
  7096. case IPA_SMMU_WLAN_CLIENT:
  7097. if (ipa3_ctx->ipa_wdi3_over_gsi ||
  7098. ipa3_ctx->ipa_wdi2_over_gsi)
  7099. is_smmu_enable =
  7100. !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] ||
  7101. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
  7102. else
  7103. is_smmu_enable =
  7104. !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] ||
  7105. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
  7106. break;
  7107. case IPA_SMMU_WIGIG_CLIENT:
  7108. is_smmu_enable = !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] ||
  7109. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] ||
  7110. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]);
  7111. if (is_smmu_enable) {
  7112. if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] ||
  7113. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] ||
  7114. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
  7115. IPAERR("11AD SMMU Discrepancy (%d %d %d)\n",
  7116. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC],
  7117. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP],
  7118. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD]);
  7119. WARN_ON(1);
  7120. return -EINVAL;
  7121. }
  7122. } else {
  7123. if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] ||
  7124. !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] ||
  7125. !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
  7126. IPAERR("11AD SMMU Discrepancy (%d %d %d)\n",
  7127. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC],
  7128. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP],
  7129. ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD]);
  7130. WARN_ON(1);
  7131. return -EINVAL;
  7132. }
  7133. }
  7134. out->shared_cb = (ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD))->shared;
  7135. break;
  7136. case IPA_SMMU_AP_CLIENT:
  7137. is_smmu_enable =
  7138. !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]);
  7139. break;
  7140. default:
  7141. is_smmu_enable = false;
  7142. IPAERR("Trying to get illegal clients SMMU status");
  7143. return -EINVAL;
  7144. }
  7145. out->smmu_enable = is_smmu_enable;
  7146. return 0;
  7147. }
  7148. #define MAX_LEN 96
  7149. void ipa_pc_qmp_enable(void)
  7150. {
  7151. char buf[MAX_LEN] = "{class: bcm, res: ipa_pc, val: 1}";
  7152. struct qmp_pkt pkt;
  7153. int ret = 0;
  7154. struct ipa3_pc_mbox_data *mbox_data = &ipa3_ctx->pc_mbox;
  7155. IPADBG("Enter\n");
  7156. /* prepare the mailbox struct */
  7157. mbox_data->mbox_client.dev = &ipa3_ctx->master_pdev->dev;
  7158. mbox_data->mbox_client.tx_block = true;
  7159. mbox_data->mbox_client.tx_tout = MBOX_TOUT_MS;
  7160. mbox_data->mbox_client.knows_txdone = false;
  7161. mbox_data->mbox = mbox_request_channel(&mbox_data->mbox_client, 0);
  7162. if (IS_ERR(mbox_data->mbox)) {
  7163. ret = PTR_ERR(mbox_data->mbox);
  7164. if (ret != -EPROBE_DEFER)
  7165. IPAERR("mailbox channel request failed, ret=%d\n", ret);
  7166. return;
  7167. }
  7168. /* prepare the QMP packet to send */
  7169. pkt.size = MAX_LEN;
  7170. pkt.data = buf;
  7171. /* send the QMP packet to AOP */
  7172. ret = mbox_send_message(mbox_data->mbox, &pkt);
  7173. if (ret < 0)
  7174. IPAERR("qmp message send failed, ret=%d\n", ret);
  7175. if (mbox_data->mbox) {
  7176. mbox_free_channel(mbox_data->mbox);
  7177. mbox_data->mbox = NULL;
  7178. }
  7179. }
  7180. /**************************************************************
  7181. * PCIe Version
  7182. *************************************************************/
  7183. int ipa3_pci_drv_probe(
  7184. struct pci_dev *pci_dev,
  7185. struct ipa_api_controller *api_ctrl,
  7186. const struct of_device_id *pdrv_match)
  7187. {
  7188. int result;
  7189. struct ipa3_plat_drv_res *ipa_drv_res;
  7190. u32 bar0_offset;
  7191. u32 mem_start;
  7192. u32 mem_end;
  7193. uint32_t bits;
  7194. uint32_t ipa_start, gsi_start, intctrl_start;
  7195. struct device *dev;
  7196. static struct platform_device platform_dev;
  7197. if (!pci_dev || !api_ctrl || !pdrv_match) {
  7198. IPAERR(
  7199. "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n",
  7200. pci_dev, api_ctrl, pdrv_match);
  7201. return -EOPNOTSUPP;
  7202. }
  7203. dev = &(pci_dev->dev);
  7204. IPADBG("IPA PCI driver probing started\n");
  7205. /*
  7206. * Follow PCI driver flow here.
  7207. * pci_enable_device: Enables device and assigns resources
  7208. * pci_request_region: Makes BAR0 address region usable
  7209. */
  7210. result = pci_enable_device(pci_dev);
  7211. if (result < 0) {
  7212. IPAERR("pci_enable_device() failed\n");
  7213. return -EOPNOTSUPP;
  7214. }
  7215. result = pci_request_region(pci_dev, 0, "IPA Memory");
  7216. if (result < 0) {
  7217. IPAERR("pci_request_region() failed\n");
  7218. pci_disable_device(pci_dev);
  7219. return -EOPNOTSUPP;
  7220. }
  7221. /*
  7222. * When in the PCI/emulation environment, &platform_dev is
  7223. * passed to get_ipa_dts_configuration(), but is unused, since
  7224. * all usages of it in the function are replaced by CPP
  7225. * relative to definitions in ipa_emulation_stubs.h. Passing
  7226. * &platform_dev makes code validity tools happy.
  7227. */
  7228. if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) {
  7229. IPAERR("get_ipa_dts_configuration() failed\n");
  7230. pci_release_region(pci_dev, 0);
  7231. pci_disable_device(pci_dev);
  7232. return -EOPNOTSUPP;
  7233. }
  7234. ipa_drv_res = &ipa3_res;
  7235. result =
  7236. of_property_read_u32(NULL, "emulator-bar0-offset",
  7237. &bar0_offset);
  7238. if (result) {
  7239. IPAERR(":get resource failed for emulator-bar0-offset!\n");
  7240. pci_release_region(pci_dev, 0);
  7241. pci_disable_device(pci_dev);
  7242. return -ENODEV;
  7243. }
  7244. IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset);
  7245. ipa_start = ipa_drv_res->ipa_mem_base;
  7246. gsi_start = ipa_drv_res->transport_mem_base;
  7247. intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base;
  7248. /*
  7249. * Where will we be inerrupted at?
  7250. */
  7251. ipa_drv_res->emulator_irq = pci_dev->irq;
  7252. IPADBG(
  7253. "EMULATION PCI_INTERRUPT_PIN(%u)\n",
  7254. ipa_drv_res->emulator_irq);
  7255. /*
  7256. * Set the ipa_mem_base to the PCI base address of BAR0
  7257. */
  7258. mem_start = pci_resource_start(pci_dev, 0);
  7259. mem_end = pci_resource_end(pci_dev, 0);
  7260. IPADBG("PCI START = 0x%x\n", mem_start);
  7261. IPADBG("PCI END = 0x%x\n", mem_end);
  7262. ipa_drv_res->ipa_mem_base = mem_start + bar0_offset;
  7263. smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
  7264. smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
  7265. ipa_drv_res->transport_mem_base =
  7266. ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start);
  7267. ipa_drv_res->emulator_intcntrlr_mem_base =
  7268. ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start);
  7269. IPADBG("ipa_mem_base = 0x%x\n",
  7270. ipa_drv_res->ipa_mem_base);
  7271. IPADBG("ipa_mem_size = 0x%x\n",
  7272. ipa_drv_res->ipa_mem_size);
  7273. IPADBG("transport_mem_base = 0x%x\n",
  7274. ipa_drv_res->transport_mem_base);
  7275. IPADBG("transport_mem_size = 0x%x\n",
  7276. ipa_drv_res->transport_mem_size);
  7277. IPADBG("emulator_intcntrlr_mem_base = 0x%x\n",
  7278. ipa_drv_res->emulator_intcntrlr_mem_base);
  7279. IPADBG("emulator_intcntrlr_mem_size = 0x%x\n",
  7280. ipa_drv_res->emulator_intcntrlr_mem_size);
  7281. result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl);
  7282. if (result != 0) {
  7283. IPAERR("ipa3_bind_api_controller() failed\n");
  7284. pci_release_region(pci_dev, 0);
  7285. pci_disable_device(pci_dev);
  7286. return result;
  7287. }
  7288. bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32;
  7289. if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) {
  7290. IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits);
  7291. pci_release_region(pci_dev, 0);
  7292. pci_disable_device(pci_dev);
  7293. return -EOPNOTSUPP;
  7294. }
  7295. if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) {
  7296. IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits);
  7297. pci_release_region(pci_dev, 0);
  7298. pci_disable_device(pci_dev);
  7299. return -EOPNOTSUPP;
  7300. }
  7301. pci_set_master(pci_dev);
  7302. memset(&platform_dev, 0, sizeof(platform_dev));
  7303. platform_dev.dev = *dev;
  7304. /* Proceed to real initialization */
  7305. result = ipa3_pre_init(&ipa3_res, &platform_dev);
  7306. if (result) {
  7307. IPAERR("ipa3_init failed\n");
  7308. pci_clear_master(pci_dev);
  7309. pci_release_region(pci_dev, 0);
  7310. pci_disable_device(pci_dev);
  7311. return result;
  7312. }
  7313. return result;
  7314. }
  7315. /*
  7316. * The following returns transport register memory location and
  7317. * size...
  7318. */
  7319. int ipa3_get_transport_info(
  7320. phys_addr_t *phys_addr_ptr,
  7321. unsigned long *size_ptr)
  7322. {
  7323. if (!phys_addr_ptr || !size_ptr) {
  7324. IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n",
  7325. phys_addr_ptr, size_ptr);
  7326. return -EINVAL;
  7327. }
  7328. *phys_addr_ptr = ipa3_res.transport_mem_base;
  7329. *size_ptr = ipa3_res.transport_mem_size;
  7330. return 0;
  7331. }
  7332. EXPORT_SYMBOL(ipa3_get_transport_info);
  7333. static uint emulation_type = IPA_HW_v4_0;
  7334. /*
  7335. * The following returns emulation type...
  7336. */
  7337. uint ipa3_get_emulation_type(void)
  7338. {
  7339. return emulation_type;
  7340. }
  7341. MODULE_LICENSE("GPL v2");
  7342. MODULE_DESCRIPTION("IPA HW device driver");
  7343. /*
  7344. * Module parameter. Invoke as follows:
  7345. * insmod ipat.ko emulation_type=[13|14|17|...|N]
  7346. * Examples:
  7347. * insmod ipat.ko emulation_type=13 # for IPA 3.5.1
  7348. * insmod ipat.ko emulation_type=14 # for IPA 4.0
  7349. * insmod ipat.ko emulation_type=17 # for IPA 4.5
  7350. *
  7351. * NOTE: The emulation_type values need to come from: enum ipa_hw_type
  7352. *
  7353. */
  7354. module_param(emulation_type, uint, 0000);
  7355. MODULE_PARM_DESC(
  7356. emulation_type,
  7357. "emulation_type=N N can be 13 for IPA 3.5.1, 14 for IPA 4.0, 17 for IPA 4.5");