ipa_dp.c 142 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/delay.h>
  6. #include <linux/device.h>
  7. #include <linux/dmapool.h>
  8. #include <linux/list.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/msm_gsi.h>
  11. #include <net/sock.h>
  12. #include "ipa_i.h"
  13. #include "ipa_trace.h"
  14. #include "ipahal/ipahal.h"
  15. #include "ipahal/ipahal_fltrt.h"
  16. #define IPA_WAN_AGGR_PKT_CNT 5
  17. #define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
  18. #define IPA_WAN_PAGE_ORDER 3
  19. #define IPA_LAN_AGGR_PKT_CNT 5
  20. #define IPA_LAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_LAN_AGGR_PKT_CNT)
  21. #define IPA_LAST_DESC_CNT 0xFFFF
  22. #define POLLING_INACTIVITY_RX 40
  23. #define POLLING_MIN_SLEEP_RX 1010
  24. #define POLLING_MAX_SLEEP_RX 1050
  25. #define POLLING_INACTIVITY_TX 40
  26. #define POLLING_MIN_SLEEP_TX 400
  27. #define POLLING_MAX_SLEEP_TX 500
  28. #define SUSPEND_MIN_SLEEP_RX 1000
  29. #define SUSPEND_MAX_SLEEP_RX 1005
  30. /* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
  31. #define IPA_MTU 1500
  32. #define IPA_GENERIC_AGGR_BYTE_LIMIT 6
  33. #define IPA_GENERIC_AGGR_TIME_LIMIT 500 /* 0.5msec */
  34. #define IPA_GENERIC_AGGR_PKT_LIMIT 0
  35. #define IPA_GSB_AGGR_BYTE_LIMIT 14
  36. #define IPA_GSB_RX_BUFF_BASE_SZ 16384
  37. #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
  38. #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
  39. (X) + NET_SKB_PAD) +\
  40. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  41. #define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
  42. (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
  43. #define IPA_GENERIC_RX_BUFF_LIMIT (\
  44. IPA_REAL_GENERIC_RX_BUFF_SZ(\
  45. IPA_GENERIC_RX_BUFF_BASE_SZ) -\
  46. IPA_GENERIC_RX_BUFF_BASE_SZ)
  47. /* less 1 nominal MTU (1500 bytes) rounded to units of KB */
  48. #define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
  49. #define IPA_RX_BUFF_CLIENT_HEADROOM 256
  50. #define IPA_WLAN_RX_POOL_SZ 100
  51. #define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
  52. #define IPA_WLAN_RX_BUFF_SZ 2048
  53. #define IPA_WLAN_COMM_RX_POOL_LOW 100
  54. #define IPA_WLAN_COMM_RX_POOL_HIGH 900
  55. #define IPA_ODU_RX_BUFF_SZ 2048
  56. #define IPA_ODU_RX_POOL_SZ 64
  57. #define IPA_ODL_RX_BUFF_SZ (16 * 1024)
  58. #define IPA_GSI_MAX_CH_LOW_WEIGHT 15
  59. #define IPA_GSI_EVT_RING_INT_MODT (16) /* 0.5ms under 32KHz clock */
  60. #define IPA_GSI_EVT_RING_INT_MODC (20)
  61. #define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
  62. /* The below virtual channel cannot be used by any entity */
  63. #define IPA_GSI_CH_20_WA_VIRT_CHAN 29
  64. #define IPA_DEFAULT_SYS_YELLOW_WM 32
  65. #define IPA_REPL_XFER_THRESH 20
  66. #define IPA_REPL_XFER_MAX 36
  67. #define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
  68. #define IPA_APPS_BW_FOR_PM 700
  69. #define IPA_SEND_MAX_DESC (20)
  70. #define IPA_EOT_THRESH 32
  71. #define IPA_QMAP_ID_BYTE 0
  72. static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
  73. static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
  74. static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
  75. static void ipa3_replenish_rx_work_func(struct work_struct *work);
  76. static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
  77. static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys);
  78. static void ipa3_wq_page_repl(struct work_struct *work);
  79. static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys);
  80. static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(gfp_t flag,
  81. bool is_tmp_alloc);
  82. static void ipa3_wq_handle_rx(struct work_struct *work);
  83. static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
  84. struct gsi_chan_xfer_notify *notify);
  85. static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
  86. struct gsi_chan_xfer_notify *notify, uint32_t num);
  87. static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
  88. struct gsi_chan_xfer_notify *notify);
  89. static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
  90. struct ipa3_sys_context *sys);
  91. static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
  92. static void ipa3_wq_rx_avail(struct work_struct *work);
  93. static void ipa3_alloc_wlan_rx_common_cache(u32 size);
  94. static void ipa3_cleanup_wlan_rx_common_cache(void);
  95. static void ipa3_wq_repl_rx(struct work_struct *work);
  96. static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys);
  97. static int ipa_gsi_setup_coal_def_channel(struct ipa_sys_connect_params *in,
  98. struct ipa3_ep_context *ep, struct ipa3_ep_context *coal_ep);
  99. static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
  100. struct ipa3_ep_context *ep);
  101. static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
  102. u32 ring_size, gfp_t mem_flag);
  103. static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
  104. u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag);
  105. static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl);
  106. static int ipa_populate_tag_field(struct ipa3_desc *desc,
  107. struct ipa3_tx_pkt_wrapper *tx_pkt,
  108. struct ipahal_imm_cmd_pyld **tag_pyld_ret);
  109. static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
  110. struct gsi_chan_xfer_notify *notify);
  111. static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
  112. struct gsi_chan_xfer_notify *notify, int expected_num,
  113. int *actual_num);
  114. static unsigned long tag_to_pointer_wa(uint64_t tag);
  115. static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
  116. static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
  117. static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
  118. struct ipa3_tx_pkt_wrapper *tx_pkt)
  119. {
  120. struct ipa3_tx_pkt_wrapper *next_pkt;
  121. int i, cnt;
  122. if (unlikely(tx_pkt == NULL)) {
  123. IPAERR("tx_pkt is NULL\n");
  124. return;
  125. }
  126. cnt = tx_pkt->cnt;
  127. IPADBG_LOW("cnt: %d\n", cnt);
  128. for (i = 0; i < cnt; i++) {
  129. spin_lock_bh(&sys->spinlock);
  130. if (unlikely(list_empty(&sys->head_desc_list))) {
  131. spin_unlock_bh(&sys->spinlock);
  132. return;
  133. }
  134. next_pkt = list_next_entry(tx_pkt, link);
  135. list_del(&tx_pkt->link);
  136. sys->len--;
  137. spin_unlock_bh(&sys->spinlock);
  138. if (!tx_pkt->no_unmap_dma) {
  139. if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
  140. dma_unmap_single(ipa3_ctx->pdev,
  141. tx_pkt->mem.phys_base,
  142. tx_pkt->mem.size,
  143. DMA_TO_DEVICE);
  144. } else {
  145. dma_unmap_page(ipa3_ctx->pdev,
  146. tx_pkt->mem.phys_base,
  147. tx_pkt->mem.size,
  148. DMA_TO_DEVICE);
  149. }
  150. }
  151. if (tx_pkt->callback)
  152. tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
  153. kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
  154. tx_pkt = next_pkt;
  155. }
  156. }
  157. static void ipa3_wq_write_done_status(int src_pipe,
  158. struct ipa3_tx_pkt_wrapper *tx_pkt)
  159. {
  160. struct ipa3_sys_context *sys;
  161. WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes);
  162. if (!ipa3_ctx->ep[src_pipe].status.status_en)
  163. return;
  164. sys = ipa3_ctx->ep[src_pipe].sys;
  165. if (!sys)
  166. return;
  167. ipa3_wq_write_done_common(sys, tx_pkt);
  168. }
  169. /**
  170. * ipa_write_done() - this function will be (eventually) called when a Tx
  171. * operation is complete
  172. * @data: user pointer point to the ipa3_sys_context
  173. *
  174. * Will be called in deferred context.
  175. * - invoke the callback supplied by the client who sent this command
  176. * - iterate over all packets and validate that
  177. * the order for sent packet is the same as expected
  178. * - delete all the tx packet descriptors from the system
  179. * pipe context (not needed anymore)
  180. */
  181. static void ipa3_tasklet_write_done(unsigned long data)
  182. {
  183. struct ipa3_sys_context *sys;
  184. struct ipa3_tx_pkt_wrapper *this_pkt;
  185. bool xmit_done = false;
  186. sys = (struct ipa3_sys_context *)data;
  187. spin_lock_bh(&sys->spinlock);
  188. while (atomic_add_unless(&sys->xmit_eot_cnt, -1, 0)) {
  189. while (!list_empty(&sys->head_desc_list)) {
  190. this_pkt = list_first_entry(&sys->head_desc_list,
  191. struct ipa3_tx_pkt_wrapper, link);
  192. xmit_done = this_pkt->xmit_done;
  193. spin_unlock_bh(&sys->spinlock);
  194. ipa3_wq_write_done_common(sys, this_pkt);
  195. spin_lock_bh(&sys->spinlock);
  196. if (xmit_done)
  197. break;
  198. }
  199. }
  200. spin_unlock_bh(&sys->spinlock);
  201. }
  202. static void ipa3_send_nop_desc(struct work_struct *work)
  203. {
  204. struct ipa3_sys_context *sys = container_of(work,
  205. struct ipa3_sys_context, work);
  206. struct gsi_xfer_elem nop_xfer;
  207. struct ipa3_tx_pkt_wrapper *tx_pkt;
  208. IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
  209. if (atomic_read(&sys->workqueue_flushed))
  210. return;
  211. tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
  212. if (!tx_pkt) {
  213. queue_work(sys->wq, &sys->work);
  214. return;
  215. }
  216. INIT_LIST_HEAD(&tx_pkt->link);
  217. tx_pkt->cnt = 1;
  218. tx_pkt->no_unmap_dma = true;
  219. tx_pkt->sys = sys;
  220. spin_lock_bh(&sys->spinlock);
  221. if (unlikely(!sys->nop_pending)) {
  222. spin_unlock_bh(&sys->spinlock);
  223. kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
  224. return;
  225. }
  226. list_add_tail(&tx_pkt->link, &sys->head_desc_list);
  227. sys->nop_pending = false;
  228. memset(&nop_xfer, 0, sizeof(nop_xfer));
  229. nop_xfer.type = GSI_XFER_ELEM_NOP;
  230. nop_xfer.flags = GSI_XFER_FLAG_EOT;
  231. nop_xfer.xfer_user_data = tx_pkt;
  232. if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) {
  233. spin_unlock_bh(&sys->spinlock);
  234. IPAERR("gsi_queue_xfer for ch:%lu failed\n",
  235. sys->ep->gsi_chan_hdl);
  236. queue_work(sys->wq, &sys->work);
  237. return;
  238. }
  239. spin_unlock_bh(&sys->spinlock);
  240. /* make sure TAG process is sent before clocks are gated */
  241. ipa3_ctx->tag_process_before_gating = true;
  242. }
  243. /**
  244. * ipa3_send() - Send multiple descriptors in one HW transaction
  245. * @sys: system pipe context
  246. * @num_desc: number of packets
  247. * @desc: packets to send (may be immediate command or data)
  248. * @in_atomic: whether caller is in atomic context
  249. *
  250. * This function is used for GPI connection.
  251. * - ipa3_tx_pkt_wrapper will be used for each ipa
  252. * descriptor (allocated from wrappers cache)
  253. * - The wrapper struct will be configured for each ipa-desc payload and will
  254. * contain information which will be later used by the user callbacks
  255. * - Each packet (command or data) that will be sent will also be saved in
  256. * ipa3_sys_context for later check that all data was sent
  257. *
  258. * Return codes: 0: success, -EFAULT: failure
  259. */
  260. int ipa3_send(struct ipa3_sys_context *sys,
  261. u32 num_desc,
  262. struct ipa3_desc *desc,
  263. bool in_atomic)
  264. {
  265. struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first = NULL;
  266. struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
  267. struct ipa3_tx_pkt_wrapper *next_pkt;
  268. struct gsi_xfer_elem gsi_xfer[IPA_SEND_MAX_DESC];
  269. int i = 0;
  270. int j;
  271. int result;
  272. u32 mem_flag = GFP_ATOMIC;
  273. const struct ipa_gsi_ep_config *gsi_ep_cfg;
  274. bool send_nop = false;
  275. unsigned int max_desc;
  276. if (unlikely(!in_atomic))
  277. mem_flag = GFP_KERNEL;
  278. gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client);
  279. if (unlikely(!gsi_ep_cfg)) {
  280. IPAERR("failed to get gsi EP config for client=%d\n",
  281. sys->ep->client);
  282. return -EFAULT;
  283. }
  284. if (unlikely(num_desc > IPA_SEND_MAX_DESC)) {
  285. IPAERR("max descriptors reached need=%d max=%d\n",
  286. num_desc, IPA_SEND_MAX_DESC);
  287. WARN_ON(1);
  288. return -EPERM;
  289. }
  290. max_desc = gsi_ep_cfg->ipa_if_tlv;
  291. if (gsi_ep_cfg->prefetch_mode == GSI_SMART_PRE_FETCH ||
  292. gsi_ep_cfg->prefetch_mode == GSI_FREE_PRE_FETCH)
  293. max_desc -= gsi_ep_cfg->prefetch_threshold;
  294. if (unlikely(num_desc > max_desc)) {
  295. IPAERR("Too many chained descriptors need=%d max=%d\n",
  296. num_desc, max_desc);
  297. WARN_ON(1);
  298. return -EPERM;
  299. }
  300. /* initialize only the xfers we use */
  301. memset(gsi_xfer, 0, sizeof(gsi_xfer[0]) * num_desc);
  302. spin_lock_bh(&sys->spinlock);
  303. for (i = 0; i < num_desc; i++) {
  304. tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
  305. GFP_ATOMIC);
  306. if (!tx_pkt) {
  307. IPAERR("failed to alloc tx wrapper\n");
  308. result = -ENOMEM;
  309. goto failure;
  310. }
  311. INIT_LIST_HEAD(&tx_pkt->link);
  312. if (i == 0) {
  313. tx_pkt_first = tx_pkt;
  314. tx_pkt->cnt = num_desc;
  315. }
  316. /* populate tag field */
  317. if (desc[i].is_tag_status) {
  318. if (ipa_populate_tag_field(&desc[i], tx_pkt,
  319. &tag_pyld_ret)) {
  320. IPAERR("Failed to populate tag field\n");
  321. result = -EFAULT;
  322. goto failure_dma_map;
  323. }
  324. }
  325. tx_pkt->type = desc[i].type;
  326. if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
  327. tx_pkt->mem.base = desc[i].pyld;
  328. tx_pkt->mem.size = desc[i].len;
  329. if (!desc[i].dma_address_valid) {
  330. tx_pkt->mem.phys_base =
  331. dma_map_single(ipa3_ctx->pdev,
  332. tx_pkt->mem.base,
  333. tx_pkt->mem.size,
  334. DMA_TO_DEVICE);
  335. } else {
  336. tx_pkt->mem.phys_base =
  337. desc[i].dma_address;
  338. tx_pkt->no_unmap_dma = true;
  339. }
  340. } else {
  341. tx_pkt->mem.base = desc[i].frag;
  342. tx_pkt->mem.size = desc[i].len;
  343. if (!desc[i].dma_address_valid) {
  344. tx_pkt->mem.phys_base =
  345. skb_frag_dma_map(ipa3_ctx->pdev,
  346. desc[i].frag,
  347. 0, tx_pkt->mem.size,
  348. DMA_TO_DEVICE);
  349. } else {
  350. tx_pkt->mem.phys_base =
  351. desc[i].dma_address;
  352. tx_pkt->no_unmap_dma = true;
  353. }
  354. }
  355. if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
  356. IPAERR("failed to do dma map.\n");
  357. result = -EFAULT;
  358. goto failure_dma_map;
  359. }
  360. tx_pkt->sys = sys;
  361. tx_pkt->callback = desc[i].callback;
  362. tx_pkt->user1 = desc[i].user1;
  363. tx_pkt->user2 = desc[i].user2;
  364. tx_pkt->xmit_done = false;
  365. list_add_tail(&tx_pkt->link, &sys->head_desc_list);
  366. gsi_xfer[i].addr = tx_pkt->mem.phys_base;
  367. /*
  368. * Special treatment for immediate commands, where
  369. * the structure of the descriptor is different
  370. */
  371. if (desc[i].type == IPA_IMM_CMD_DESC) {
  372. gsi_xfer[i].len = desc[i].opcode;
  373. gsi_xfer[i].type =
  374. GSI_XFER_ELEM_IMME_CMD;
  375. } else {
  376. gsi_xfer[i].len = desc[i].len;
  377. gsi_xfer[i].type =
  378. GSI_XFER_ELEM_DATA;
  379. }
  380. if (i == (num_desc - 1)) {
  381. if (!sys->use_comm_evt_ring ||
  382. (sys->pkt_sent % IPA_EOT_THRESH == 0)) {
  383. gsi_xfer[i].flags |=
  384. GSI_XFER_FLAG_EOT;
  385. gsi_xfer[i].flags |=
  386. GSI_XFER_FLAG_BEI;
  387. } else {
  388. send_nop = true;
  389. }
  390. gsi_xfer[i].xfer_user_data =
  391. tx_pkt_first;
  392. } else {
  393. gsi_xfer[i].flags |=
  394. GSI_XFER_FLAG_CHAIN;
  395. }
  396. }
  397. IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
  398. result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
  399. gsi_xfer, true);
  400. if (result != GSI_STATUS_SUCCESS) {
  401. IPAERR_RL("GSI xfer failed.\n");
  402. result = -EFAULT;
  403. goto failure;
  404. }
  405. if (send_nop && !sys->nop_pending)
  406. sys->nop_pending = true;
  407. else
  408. send_nop = false;
  409. sys->pkt_sent++;
  410. spin_unlock_bh(&sys->spinlock);
  411. /* set the timer for sending the NOP descriptor */
  412. if (send_nop) {
  413. ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
  414. IPADBG_LOW("scheduling timer for ch %lu\n",
  415. sys->ep->gsi_chan_hdl);
  416. hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
  417. }
  418. /* make sure TAG process is sent before clocks are gated */
  419. ipa3_ctx->tag_process_before_gating = true;
  420. return 0;
  421. failure_dma_map:
  422. kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
  423. failure:
  424. ipahal_destroy_imm_cmd(tag_pyld_ret);
  425. tx_pkt = tx_pkt_first;
  426. for (j = 0; j < i; j++) {
  427. next_pkt = list_next_entry(tx_pkt, link);
  428. list_del(&tx_pkt->link);
  429. if (!tx_pkt->no_unmap_dma) {
  430. if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
  431. dma_unmap_single(ipa3_ctx->pdev,
  432. tx_pkt->mem.phys_base,
  433. tx_pkt->mem.size, DMA_TO_DEVICE);
  434. } else {
  435. dma_unmap_page(ipa3_ctx->pdev,
  436. tx_pkt->mem.phys_base,
  437. tx_pkt->mem.size,
  438. DMA_TO_DEVICE);
  439. }
  440. }
  441. kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
  442. tx_pkt = next_pkt;
  443. }
  444. spin_unlock_bh(&sys->spinlock);
  445. return result;
  446. }
  447. /**
  448. * ipa3_send_one() - Send a single descriptor
  449. * @sys: system pipe context
  450. * @desc: descriptor to send
  451. * @in_atomic: whether caller is in atomic context
  452. *
  453. * - Allocate tx_packet wrapper
  454. * - transfer data to the IPA
  455. * - after the transfer was done the SPS will
  456. * notify the sending user via ipa_sps_irq_comp_tx()
  457. *
  458. * Return codes: 0: success, -EFAULT: failure
  459. */
  460. int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
  461. bool in_atomic)
  462. {
  463. return ipa3_send(sys, 1, desc, in_atomic);
  464. }
  465. /**
  466. * ipa3_transport_irq_cmd_ack - callback function which will be called by
  467. * the transport driver after an immediate command is complete.
  468. * @user1: pointer to the descriptor of the transfer
  469. * @user2:
  470. *
  471. * Complete the immediate commands completion object, this will release the
  472. * thread which waits on this completion object (ipa3_send_cmd())
  473. */
  474. static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
  475. {
  476. struct ipa3_desc *desc = (struct ipa3_desc *)user1;
  477. if (WARN(!desc, "desc is NULL"))
  478. return;
  479. IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
  480. complete(&desc->xfer_done);
  481. }
  482. /**
  483. * ipa3_transport_irq_cmd_ack_free - callback function which will be
  484. * called by the transport driver after an immediate command is complete.
  485. * This function will also free the completion object once it is done.
  486. * @tag_comp: pointer to the completion object
  487. * @ignored: parameter not used
  488. *
  489. * Complete the immediate commands completion object, this will release the
  490. * thread which waits on this completion object (ipa3_send_cmd())
  491. */
  492. static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored)
  493. {
  494. struct ipa3_tag_completion *comp = tag_comp;
  495. if (!comp) {
  496. IPAERR("comp is NULL\n");
  497. return;
  498. }
  499. complete(&comp->comp);
  500. if (atomic_dec_return(&comp->cnt) == 0)
  501. kfree(comp);
  502. }
  503. /**
  504. * ipa3_send_cmd - send immediate commands
  505. * @num_desc: number of descriptors within the desc struct
  506. * @descr: descriptor structure
  507. *
  508. * Function will block till command gets ACK from IPA HW, caller needs
  509. * to free any resources it allocated after function returns
  510. * The callback in ipa3_desc should not be set by the caller
  511. * for this function.
  512. */
  513. int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
  514. {
  515. struct ipa3_desc *desc;
  516. int i, result = 0;
  517. struct ipa3_sys_context *sys;
  518. int ep_idx;
  519. for (i = 0; i < num_desc; i++)
  520. IPADBG("sending imm cmd %d\n", descr[i].opcode);
  521. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
  522. if (-1 == ep_idx) {
  523. IPAERR("Client %u is not mapped\n",
  524. IPA_CLIENT_APPS_CMD_PROD);
  525. return -EFAULT;
  526. }
  527. sys = ipa3_ctx->ep[ep_idx].sys;
  528. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  529. if (num_desc == 1) {
  530. init_completion(&descr->xfer_done);
  531. if (descr->callback || descr->user1)
  532. WARN_ON(1);
  533. descr->callback = ipa3_transport_irq_cmd_ack;
  534. descr->user1 = descr;
  535. if (ipa3_send_one(sys, descr, true)) {
  536. IPAERR("fail to send immediate command\n");
  537. result = -EFAULT;
  538. goto bail;
  539. }
  540. wait_for_completion(&descr->xfer_done);
  541. } else {
  542. desc = &descr[num_desc - 1];
  543. init_completion(&desc->xfer_done);
  544. if (desc->callback || desc->user1)
  545. WARN_ON(1);
  546. desc->callback = ipa3_transport_irq_cmd_ack;
  547. desc->user1 = desc;
  548. if (ipa3_send(sys, num_desc, descr, true)) {
  549. IPAERR("fail to send multiple immediate command set\n");
  550. result = -EFAULT;
  551. goto bail;
  552. }
  553. wait_for_completion(&desc->xfer_done);
  554. }
  555. bail:
  556. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  557. return result;
  558. }
  559. /**
  560. * ipa3_send_cmd_timeout - send immediate commands with limited time
  561. * waiting for ACK from IPA HW
  562. * @num_desc: number of descriptors within the desc struct
  563. * @descr: descriptor structure
  564. * @timeout: millisecond to wait till get ACK from IPA HW
  565. *
  566. * Function will block till command gets ACK from IPA HW or timeout.
  567. * Caller needs to free any resources it allocated after function returns
  568. * The callback in ipa3_desc should not be set by the caller
  569. * for this function.
  570. */
  571. int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
  572. {
  573. struct ipa3_desc *desc;
  574. int i, result = 0;
  575. struct ipa3_sys_context *sys;
  576. int ep_idx;
  577. int completed;
  578. struct ipa3_tag_completion *comp;
  579. for (i = 0; i < num_desc; i++)
  580. IPADBG("sending imm cmd %d\n", descr[i].opcode);
  581. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
  582. if (-1 == ep_idx) {
  583. IPAERR("Client %u is not mapped\n",
  584. IPA_CLIENT_APPS_CMD_PROD);
  585. return -EFAULT;
  586. }
  587. comp = kzalloc(sizeof(*comp), GFP_ATOMIC);
  588. if (!comp)
  589. return -ENOMEM;
  590. init_completion(&comp->comp);
  591. /* completion needs to be released from both here and in ack callback */
  592. atomic_set(&comp->cnt, 2);
  593. sys = ipa3_ctx->ep[ep_idx].sys;
  594. if (num_desc == 1) {
  595. if (descr->callback || descr->user1)
  596. WARN_ON(1);
  597. descr->callback = ipa3_transport_irq_cmd_ack_free;
  598. descr->user1 = comp;
  599. if (ipa3_send_one(sys, descr, true)) {
  600. IPAERR("fail to send immediate command\n");
  601. kfree(comp);
  602. result = -EFAULT;
  603. goto bail;
  604. }
  605. } else {
  606. desc = &descr[num_desc - 1];
  607. if (desc->callback || desc->user1)
  608. WARN_ON(1);
  609. desc->callback = ipa3_transport_irq_cmd_ack_free;
  610. desc->user1 = comp;
  611. if (ipa3_send(sys, num_desc, descr, true)) {
  612. IPAERR("fail to send multiple immediate command set\n");
  613. kfree(comp);
  614. result = -EFAULT;
  615. goto bail;
  616. }
  617. }
  618. completed = wait_for_completion_timeout(
  619. &comp->comp, msecs_to_jiffies(timeout));
  620. if (!completed) {
  621. IPADBG("timeout waiting for imm-cmd ACK\n");
  622. result = -EBUSY;
  623. }
  624. if (atomic_dec_return(&comp->cnt) == 0)
  625. kfree(comp);
  626. bail:
  627. return result;
  628. }
  629. /**
  630. * ipa3_handle_rx_core() - The core functionality of packet reception. This
  631. * function is read from multiple code paths.
  632. *
  633. * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
  634. * endpoint. The function runs as long as there are packets in the pipe.
  635. * For each packet:
  636. * - Disconnect the packet from the system pipe linked list
  637. * - Unmap the packets skb, make it non DMAable
  638. * - Free the packet from the cache
  639. * - Prepare a proper skb
  640. * - Call the endpoints notify function, passing the skb in the parameters
  641. * - Replenish the rx cache
  642. */
  643. static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
  644. bool in_poll_state)
  645. {
  646. int ret;
  647. int cnt = 0;
  648. struct gsi_chan_xfer_notify notify = { 0 };
  649. while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
  650. !atomic_read(&sys->curr_polling_state))) {
  651. if (cnt && !process_all)
  652. break;
  653. ret = ipa_poll_gsi_pkt(sys, &notify);
  654. if (ret)
  655. break;
  656. if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
  657. ipa3_dma_memcpy_notify(sys);
  658. else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
  659. ipa3_wlan_wq_rx_common(sys, &notify);
  660. else
  661. ipa3_wq_rx_common(sys, &notify);
  662. ++cnt;
  663. }
  664. return cnt;
  665. }
  666. /**
  667. * __ipa3_update_curr_poll_state -> update current polling for default wan and
  668. * coalescing pipe.
  669. * In RSC/RSB enabled cases using common event ring, so both the pipe
  670. * polling state should be in sync.
  671. */
  672. void __ipa3_update_curr_poll_state(enum ipa_client_type client, int state)
  673. {
  674. int ep_idx = IPA_EP_NOT_ALLOCATED;
  675. if (client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  676. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
  677. if (client == IPA_CLIENT_APPS_WAN_CONS)
  678. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  679. if (ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[ep_idx].sys)
  680. atomic_set(&ipa3_ctx->ep[ep_idx].sys->curr_polling_state,
  681. state);
  682. }
  683. /**
  684. * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
  685. */
  686. static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
  687. {
  688. int ret;
  689. atomic_set(&sys->curr_polling_state, 0);
  690. __ipa3_update_curr_poll_state(sys->ep->client, 0);
  691. ipa3_dec_release_wakelock();
  692. ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
  693. GSI_CHAN_MODE_CALLBACK);
  694. if ((ret != GSI_STATUS_SUCCESS) &&
  695. !atomic_read(&sys->curr_polling_state)) {
  696. if (ret == -GSI_STATUS_PENDING_IRQ) {
  697. ipa3_inc_acquire_wakelock();
  698. atomic_set(&sys->curr_polling_state, 1);
  699. __ipa3_update_curr_poll_state(sys->ep->client, 1);
  700. } else {
  701. IPAERR("Failed to switch to intr mode %d ch_id %d\n",
  702. sys->curr_polling_state, sys->ep->gsi_chan_hdl);
  703. }
  704. }
  705. return ret;
  706. }
  707. /**
  708. * ipa3_handle_rx() - handle packet reception. This function is executed in the
  709. * context of a work queue.
  710. * @work: work struct needed by the work queue
  711. *
  712. * ipa3_handle_rx_core() is run in polling mode. After all packets has been
  713. * received, the driver switches back to interrupt mode.
  714. */
  715. static void ipa3_handle_rx(struct ipa3_sys_context *sys)
  716. {
  717. int inactive_cycles;
  718. int cnt;
  719. int ret;
  720. ipa_pm_activate_sync(sys->pm_hdl);
  721. start_poll:
  722. inactive_cycles = 0;
  723. do {
  724. cnt = ipa3_handle_rx_core(sys, true, true);
  725. if (cnt == 0)
  726. inactive_cycles++;
  727. else
  728. inactive_cycles = 0;
  729. trace_idle_sleep_enter3(sys->ep->client);
  730. usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
  731. trace_idle_sleep_exit3(sys->ep->client);
  732. /*
  733. * if pipe is out of buffers there is no point polling for
  734. * completed descs; release the worker so delayed work can
  735. * run in a timely manner
  736. */
  737. if (sys->len == 0)
  738. break;
  739. } while (inactive_cycles <= POLLING_INACTIVITY_RX);
  740. trace_poll_to_intr3(sys->ep->client);
  741. ret = ipa3_rx_switch_to_intr_mode(sys);
  742. if (ret == -GSI_STATUS_PENDING_IRQ)
  743. goto start_poll;
  744. ipa_pm_deferred_deactivate(sys->pm_hdl);
  745. }
  746. static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
  747. {
  748. struct delayed_work *dwork;
  749. struct ipa3_sys_context *sys;
  750. dwork = container_of(work, struct delayed_work, work);
  751. sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
  752. if (sys->napi_obj) {
  753. /* interrupt mode is done in ipa3_rx_poll context */
  754. ipa_assert();
  755. } else
  756. ipa3_handle_rx(sys);
  757. }
  758. enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param)
  759. {
  760. struct ipa3_sys_context *sys = container_of(param,
  761. struct ipa3_sys_context, db_timer);
  762. queue_work(sys->wq, &sys->work);
  763. return HRTIMER_NORESTART;
  764. }
  765. static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
  766. {
  767. struct ipa3_sys_context *sys = (struct ipa3_sys_context *)p;
  768. switch (event) {
  769. case IPA_PM_CLIENT_ACTIVATED:
  770. /*
  771. * this event is ignored as the sync version of activation
  772. * will be used.
  773. */
  774. break;
  775. case IPA_PM_REQUEST_WAKEUP:
  776. /*
  777. * pipe will be unsuspended as part of
  778. * enabling IPA clocks
  779. */
  780. IPADBG("calling wakeup for client %d\n", sys->ep->client);
  781. if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
  782. IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_WAN");
  783. usleep_range(SUSPEND_MIN_SLEEP_RX,
  784. SUSPEND_MAX_SLEEP_RX);
  785. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_WAN");
  786. } else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
  787. IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LAN");
  788. usleep_range(SUSPEND_MIN_SLEEP_RX,
  789. SUSPEND_MAX_SLEEP_RX);
  790. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LAN");
  791. } else if (sys->ep->client == IPA_CLIENT_ODL_DPL_CONS) {
  792. IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_ODL");
  793. usleep_range(SUSPEND_MIN_SLEEP_RX,
  794. SUSPEND_MAX_SLEEP_RX);
  795. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_ODL");
  796. } else if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  797. IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_COAL");
  798. usleep_range(SUSPEND_MIN_SLEEP_RX,
  799. SUSPEND_MAX_SLEEP_RX);
  800. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_COAL");
  801. } else
  802. IPAERR("Unexpected event %d\n for client %d\n",
  803. event, sys->ep->client);
  804. break;
  805. default:
  806. IPAERR("Unexpected event %d\n for client %d\n",
  807. event, sys->ep->client);
  808. WARN_ON(1);
  809. return;
  810. }
  811. }
  812. /**
  813. * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
  814. * IPA EP configuration
  815. * @sys_in: [in] input needed to setup the pipe and configure EP
  816. * @clnt_hdl: [out] client handle
  817. *
  818. * - configure the end-point registers with the supplied
  819. * parameters from the user.
  820. * - Creates a GPI connection with IPA.
  821. * - allocate descriptor FIFO
  822. *
  823. * Returns: 0 on success, negative on failure
  824. */
  825. int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
  826. {
  827. struct ipa3_ep_context *ep;
  828. int i, ipa_ep_idx, wan_handle, coal_ep_id;
  829. int result = -EINVAL;
  830. struct ipahal_reg_coal_qmap_cfg qmap_cfg;
  831. struct ipahal_reg_coal_evict_lru evict_lru;
  832. char buff[IPA_RESOURCE_NAME_MAX];
  833. struct ipa_ep_cfg ep_cfg_copy;
  834. if (sys_in == NULL || clnt_hdl == NULL) {
  835. IPAERR("NULL args\n");
  836. goto fail_gen;
  837. }
  838. if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
  839. IPAERR("bad parm client:%d fifo_sz:%d\n",
  840. sys_in->client, sys_in->desc_fifo_sz);
  841. goto fail_gen;
  842. }
  843. ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
  844. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  845. IPAERR("Invalid client.\n");
  846. goto fail_gen;
  847. }
  848. ep = &ipa3_ctx->ep[ipa_ep_idx];
  849. if (ep->valid == 1) {
  850. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  851. goto fail_gen;
  852. }
  853. coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  854. /* save the input config parameters */
  855. if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  856. ep_cfg_copy = sys_in->ipa_ep_cfg;
  857. IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
  858. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  859. if (!ep->sys) {
  860. struct ipa_pm_register_params pm_reg;
  861. memset(&pm_reg, 0, sizeof(pm_reg));
  862. ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
  863. if (!ep->sys) {
  864. IPAERR("failed to sys ctx for client %d\n",
  865. sys_in->client);
  866. result = -ENOMEM;
  867. goto fail_and_disable_clocks;
  868. }
  869. ep->sys->ep = ep;
  870. snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
  871. sys_in->client);
  872. ep->sys->wq = alloc_workqueue(buff,
  873. WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
  874. if (!ep->sys->wq) {
  875. IPAERR("failed to create wq for client %d\n",
  876. sys_in->client);
  877. result = -EFAULT;
  878. goto fail_wq;
  879. }
  880. snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
  881. sys_in->client);
  882. ep->sys->repl_wq = alloc_workqueue(buff,
  883. WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
  884. if (!ep->sys->repl_wq) {
  885. IPAERR("failed to create rep wq for client %d\n",
  886. sys_in->client);
  887. result = -EFAULT;
  888. goto fail_wq2;
  889. }
  890. INIT_LIST_HEAD(&ep->sys->head_desc_list);
  891. INIT_LIST_HEAD(&ep->sys->rcycl_list);
  892. spin_lock_init(&ep->sys->spinlock);
  893. hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
  894. HRTIMER_MODE_REL);
  895. ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
  896. /* create IPA PM resources for handling polling mode */
  897. if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS &&
  898. coal_ep_id != IPA_EP_NOT_ALLOCATED &&
  899. ipa3_ctx->ep[coal_ep_id].valid == 1) {
  900. /* Use coalescing pipe PM handle for default pipe also*/
  901. ep->sys->pm_hdl = ipa3_ctx->ep[coal_ep_id].sys->pm_hdl;
  902. } else if (IPA_CLIENT_IS_CONS(sys_in->client)) {
  903. pm_reg.name = ipa_clients_strings[sys_in->client];
  904. pm_reg.callback = ipa_pm_sys_pipe_cb;
  905. pm_reg.user_data = ep->sys;
  906. pm_reg.group = IPA_PM_GROUP_APPS;
  907. result = ipa_pm_register(&pm_reg, &ep->sys->pm_hdl);
  908. if (result) {
  909. IPAERR("failed to create IPA PM client %d\n",
  910. result);
  911. goto fail_pm;
  912. }
  913. if (IPA_CLIENT_IS_APPS_CONS(sys_in->client)) {
  914. result = ipa_pm_associate_ipa_cons_to_client(
  915. ep->sys->pm_hdl, sys_in->client);
  916. if (result) {
  917. IPAERR("failed to associate\n");
  918. goto fail_gen2;
  919. }
  920. }
  921. result = ipa_pm_set_throughput(ep->sys->pm_hdl,
  922. IPA_APPS_BW_FOR_PM);
  923. if (result) {
  924. IPAERR("failed to set profile IPA PM client\n");
  925. goto fail_gen2;
  926. }
  927. }
  928. } else {
  929. memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
  930. }
  931. atomic_set(&ep->sys->xmit_eot_cnt, 0);
  932. tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
  933. (unsigned long) ep->sys);
  934. ep->skip_ep_cfg = sys_in->skip_ep_cfg;
  935. if (ipa3_assign_policy(sys_in, ep->sys)) {
  936. IPAERR("failed to sys ctx for client %d\n", sys_in->client);
  937. result = -ENOMEM;
  938. goto fail_gen2;
  939. }
  940. ep->valid = 1;
  941. ep->client = sys_in->client;
  942. ep->client_notify = sys_in->notify;
  943. ep->sys->napi_obj = sys_in->napi_obj;
  944. ep->priv = sys_in->priv;
  945. ep->keep_ipa_awake = sys_in->keep_ipa_awake;
  946. atomic_set(&ep->avail_fifo_desc,
  947. ((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1));
  948. if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
  949. ep->sys->status_stat == NULL) {
  950. ep->sys->status_stat =
  951. kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL);
  952. if (!ep->sys->status_stat)
  953. goto fail_gen2;
  954. }
  955. if (!ep->skip_ep_cfg) {
  956. if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
  957. IPAERR("fail to configure EP.\n");
  958. goto fail_gen2;
  959. }
  960. if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
  961. IPAERR("fail to configure status of EP.\n");
  962. goto fail_gen2;
  963. }
  964. IPADBG("ep %d configuration successful\n", ipa_ep_idx);
  965. } else {
  966. IPADBG("skipping ep %d configuration\n", ipa_ep_idx);
  967. }
  968. result = ipa_gsi_setup_channel(sys_in, ep);
  969. if (result) {
  970. IPAERR("Failed to setup GSI channel\n");
  971. goto fail_gen2;
  972. }
  973. *clnt_hdl = ipa_ep_idx;
  974. if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
  975. ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
  976. if (!ep->sys->repl) {
  977. IPAERR("failed to alloc repl for client %d\n",
  978. sys_in->client);
  979. result = -ENOMEM;
  980. goto fail_gen2;
  981. }
  982. atomic_set(&ep->sys->repl->pending, 0);
  983. ep->sys->repl->capacity = ep->sys->rx_pool_sz + 1;
  984. ep->sys->repl->cache = kcalloc(ep->sys->repl->capacity,
  985. sizeof(void *), GFP_KERNEL);
  986. if (!ep->sys->repl->cache) {
  987. IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
  988. ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
  989. ep->sys->repl->capacity = 0;
  990. } else {
  991. atomic_set(&ep->sys->repl->head_idx, 0);
  992. atomic_set(&ep->sys->repl->tail_idx, 0);
  993. ipa3_wq_repl_rx(&ep->sys->repl_work);
  994. }
  995. }
  996. if (ep->sys->repl_hdlr == ipa3_replenish_rx_page_recycle) {
  997. ep->sys->page_recycle_repl = kzalloc(
  998. sizeof(*ep->sys->page_recycle_repl), GFP_KERNEL);
  999. if (!ep->sys->page_recycle_repl) {
  1000. IPAERR("failed to alloc repl for client %d\n",
  1001. sys_in->client);
  1002. result = -ENOMEM;
  1003. goto fail_gen2;
  1004. }
  1005. atomic_set(&ep->sys->page_recycle_repl->pending, 0);
  1006. ep->sys->page_recycle_repl->capacity =
  1007. (ep->sys->rx_pool_sz + 1) * 2;
  1008. ep->sys->page_recycle_repl->cache =
  1009. kcalloc(ep->sys->page_recycle_repl->capacity,
  1010. sizeof(void *), GFP_KERNEL);
  1011. atomic_set(&ep->sys->page_recycle_repl->head_idx, 0);
  1012. atomic_set(&ep->sys->page_recycle_repl->tail_idx, 0);
  1013. ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
  1014. if (!ep->sys->repl) {
  1015. IPAERR("failed to alloc repl for client %d\n",
  1016. sys_in->client);
  1017. result = -ENOMEM;
  1018. goto fail_page_recycle_repl;
  1019. }
  1020. ep->sys->repl->capacity = (ep->sys->rx_pool_sz + 1);
  1021. atomic_set(&ep->sys->repl->pending, 0);
  1022. ep->sys->repl->cache = kcalloc(ep->sys->repl->capacity,
  1023. sizeof(void *), GFP_KERNEL);
  1024. atomic_set(&ep->sys->repl->head_idx, 0);
  1025. atomic_set(&ep->sys->repl->tail_idx, 0);
  1026. ipa3_replenish_rx_page_cache(ep->sys);
  1027. ipa3_wq_page_repl(&ep->sys->repl_work);
  1028. }
  1029. if (IPA_CLIENT_IS_CONS(sys_in->client)) {
  1030. if (IPA_CLIENT_IS_WAN_CONS(sys_in->client) &&
  1031. ipa3_ctx->ipa_wan_skb_page) {
  1032. ipa3_replenish_rx_page_recycle(ep->sys);
  1033. } else
  1034. ipa3_replenish_rx_cache(ep->sys);
  1035. for (i = 0; i < GSI_VEID_MAX; i++)
  1036. INIT_LIST_HEAD(&ep->sys->pending_pkts[i]);
  1037. }
  1038. if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
  1039. ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
  1040. atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
  1041. }
  1042. ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
  1043. if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
  1044. if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
  1045. sys_in->client == IPA_CLIENT_APPS_WAN_PROD)
  1046. IPADBG("modem cfg emb pipe flt\n");
  1047. else
  1048. ipa3_install_dflt_flt_rules(ipa_ep_idx);
  1049. }
  1050. result = ipa3_enable_data_path(ipa_ep_idx);
  1051. if (result) {
  1052. IPAERR("enable data path failed res=%d ep=%d.\n", result,
  1053. ipa_ep_idx);
  1054. goto fail_repl;
  1055. }
  1056. result = gsi_start_channel(ep->gsi_chan_hdl);
  1057. if (result != GSI_STATUS_SUCCESS) {
  1058. IPAERR("gsi_start_channel failed res=%d ep=%d.\n", result,
  1059. ipa_ep_idx);
  1060. goto fail_gen3;
  1061. }
  1062. IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client,
  1063. ipa_ep_idx, ep->sys);
  1064. /* configure the registers and setup the default pipe */
  1065. if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  1066. evict_lru.coal_vp_lru_thrshld = 0;
  1067. evict_lru.coal_eviction_en = true;
  1068. ipahal_write_reg_fields(IPA_COAL_EVICT_LRU, &evict_lru);
  1069. qmap_cfg.mux_id_byte_sel = IPA_QMAP_ID_BYTE;
  1070. ipahal_write_reg_fields(IPA_COAL_QMAP_CFG, &qmap_cfg);
  1071. sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
  1072. sys_in->ipa_ep_cfg = ep_cfg_copy;
  1073. result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
  1074. if (result) {
  1075. IPAERR("failed to setup default coalescing pipe\n");
  1076. goto fail_repl;
  1077. }
  1078. }
  1079. if (!ep->keep_ipa_awake)
  1080. IPA_ACTIVE_CLIENTS_DEC_EP(ep->client);
  1081. return 0;
  1082. fail_gen3:
  1083. ipa3_disable_data_path(ipa_ep_idx);
  1084. fail_repl:
  1085. ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
  1086. ep->sys->repl->capacity = 0;
  1087. kfree(ep->sys->repl);
  1088. fail_page_recycle_repl:
  1089. if (ep->sys->page_recycle_repl) {
  1090. ep->sys->page_recycle_repl->capacity = 0;
  1091. kfree(ep->sys->page_recycle_repl);
  1092. }
  1093. fail_gen2:
  1094. ipa_pm_deregister(ep->sys->pm_hdl);
  1095. fail_pm:
  1096. destroy_workqueue(ep->sys->repl_wq);
  1097. fail_wq2:
  1098. destroy_workqueue(ep->sys->wq);
  1099. fail_wq:
  1100. kfree(ep->sys);
  1101. memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
  1102. fail_and_disable_clocks:
  1103. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  1104. fail_gen:
  1105. return result;
  1106. }
  1107. /**
  1108. * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
  1109. * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
  1110. *
  1111. * Returns: 0 on success, negative on failure
  1112. */
  1113. int ipa3_teardown_sys_pipe(u32 clnt_hdl)
  1114. {
  1115. struct ipa3_ep_context *ep;
  1116. int empty;
  1117. int result;
  1118. int i;
  1119. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1120. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  1121. IPAERR("bad parm.\n");
  1122. return -EINVAL;
  1123. }
  1124. ep = &ipa3_ctx->ep[clnt_hdl];
  1125. if (!ep->keep_ipa_awake)
  1126. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  1127. ipa3_disable_data_path(clnt_hdl);
  1128. if (IPA_CLIENT_IS_PROD(ep->client)) {
  1129. do {
  1130. spin_lock_bh(&ep->sys->spinlock);
  1131. empty = list_empty(&ep->sys->head_desc_list);
  1132. spin_unlock_bh(&ep->sys->spinlock);
  1133. if (!empty)
  1134. usleep_range(95, 105);
  1135. else
  1136. break;
  1137. } while (1);
  1138. }
  1139. /* channel stop might fail on timeout if IPA is busy */
  1140. for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
  1141. result = ipa3_stop_gsi_channel(clnt_hdl);
  1142. if (result == GSI_STATUS_SUCCESS)
  1143. break;
  1144. if (result != -GSI_STATUS_AGAIN &&
  1145. result != -GSI_STATUS_TIMED_OUT)
  1146. break;
  1147. }
  1148. if (result != GSI_STATUS_SUCCESS) {
  1149. IPAERR("GSI stop chan err: %d.\n", result);
  1150. ipa_assert();
  1151. return result;
  1152. }
  1153. if (ep->sys->napi_obj) {
  1154. do {
  1155. usleep_range(95, 105);
  1156. } while (atomic_read(&ep->sys->curr_polling_state));
  1157. }
  1158. if (IPA_CLIENT_IS_CONS(ep->client))
  1159. cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
  1160. flush_workqueue(ep->sys->wq);
  1161. if (IPA_CLIENT_IS_PROD(ep->client))
  1162. atomic_set(&ep->sys->workqueue_flushed, 1);
  1163. /* tear down the default pipe before we reset the channel*/
  1164. if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  1165. i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
  1166. if (i == IPA_EP_NOT_ALLOCATED) {
  1167. IPAERR("failed to get idx");
  1168. return i;
  1169. }
  1170. result = ipa3_teardown_coal_def_pipe(i);
  1171. if (result) {
  1172. IPAERR("failed to teardown default coal pipe\n");
  1173. return result;
  1174. }
  1175. }
  1176. result = ipa3_reset_gsi_channel(clnt_hdl);
  1177. if (result != GSI_STATUS_SUCCESS) {
  1178. IPAERR("Failed to reset chan: %d.\n", result);
  1179. ipa_assert();
  1180. return result;
  1181. }
  1182. dma_free_coherent(ipa3_ctx->pdev,
  1183. ep->gsi_mem_info.chan_ring_len,
  1184. ep->gsi_mem_info.chan_ring_base_vaddr,
  1185. ep->gsi_mem_info.chan_ring_base_addr);
  1186. result = gsi_dealloc_channel(ep->gsi_chan_hdl);
  1187. if (result != GSI_STATUS_SUCCESS) {
  1188. IPAERR("Failed to dealloc chan: %d.\n", result);
  1189. ipa_assert();
  1190. return result;
  1191. }
  1192. /* free event ring only when it is present */
  1193. if (ep->sys->use_comm_evt_ring) {
  1194. ipa3_ctx->gsi_evt_comm_ring_rem +=
  1195. ep->gsi_mem_info.chan_ring_len;
  1196. } else if (ep->gsi_evt_ring_hdl != ~0) {
  1197. result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
  1198. if (WARN(result != GSI_STATUS_SUCCESS, "reset evt %d", result))
  1199. return result;
  1200. dma_free_coherent(ipa3_ctx->pdev,
  1201. ep->gsi_mem_info.evt_ring_len,
  1202. ep->gsi_mem_info.evt_ring_base_vaddr,
  1203. ep->gsi_mem_info.evt_ring_base_addr);
  1204. result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  1205. if (WARN(result != GSI_STATUS_SUCCESS, "deall evt %d", result))
  1206. return result;
  1207. }
  1208. if (ep->sys->repl_wq)
  1209. flush_workqueue(ep->sys->repl_wq);
  1210. if (IPA_CLIENT_IS_CONS(ep->client))
  1211. ipa3_cleanup_rx(ep->sys);
  1212. if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
  1213. if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
  1214. ep->client == IPA_CLIENT_APPS_WAN_PROD)
  1215. IPADBG("modem cfg emb pipe flt\n");
  1216. else
  1217. ipa3_delete_dflt_flt_rules(clnt_hdl);
  1218. }
  1219. if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
  1220. atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt);
  1221. memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats));
  1222. if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt))
  1223. ipa3_cleanup_wlan_rx_common_cache();
  1224. ep->valid = 0;
  1225. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  1226. IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
  1227. return 0;
  1228. }
  1229. /**
  1230. * ipa3_teardown_coal_def_pipe() - Teardown the APPS_WAN_COAL_CONS
  1231. * default GPI pipe and cleanup IPA EP
  1232. * called after the coalesced pipe is destroyed.
  1233. * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
  1234. *
  1235. * Returns: 0 on success, negative on failure
  1236. */
  1237. static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl)
  1238. {
  1239. struct ipa3_ep_context *ep;
  1240. int result;
  1241. int i;
  1242. ep = &ipa3_ctx->ep[clnt_hdl];
  1243. ipa3_disable_data_path(clnt_hdl);
  1244. /* channel stop might fail on timeout if IPA is busy */
  1245. for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
  1246. result = ipa3_stop_gsi_channel(clnt_hdl);
  1247. if (result == GSI_STATUS_SUCCESS)
  1248. break;
  1249. if (result != -GSI_STATUS_AGAIN &&
  1250. result != -GSI_STATUS_TIMED_OUT)
  1251. break;
  1252. }
  1253. if (result != GSI_STATUS_SUCCESS) {
  1254. IPAERR("GSI stop chan err: %d.\n", result);
  1255. ipa_assert();
  1256. return result;
  1257. }
  1258. result = ipa3_reset_gsi_channel(clnt_hdl);
  1259. if (result != GSI_STATUS_SUCCESS) {
  1260. IPAERR("Failed to reset chan: %d.\n", result);
  1261. ipa_assert();
  1262. return result;
  1263. }
  1264. dma_free_coherent(ipa3_ctx->pdev,
  1265. ep->gsi_mem_info.chan_ring_len,
  1266. ep->gsi_mem_info.chan_ring_base_vaddr,
  1267. ep->gsi_mem_info.chan_ring_base_addr);
  1268. result = gsi_dealloc_channel(ep->gsi_chan_hdl);
  1269. if (result != GSI_STATUS_SUCCESS) {
  1270. IPAERR("Failed to dealloc chan: %d.\n", result);
  1271. ipa_assert();
  1272. return result;
  1273. }
  1274. if (IPA_CLIENT_IS_CONS(ep->client))
  1275. cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
  1276. flush_workqueue(ep->sys->wq);
  1277. if (ep->sys->repl_wq)
  1278. flush_workqueue(ep->sys->repl_wq);
  1279. if (IPA_CLIENT_IS_CONS(ep->client))
  1280. ipa3_cleanup_rx(ep->sys);
  1281. ep->valid = 0;
  1282. IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
  1283. return 0;
  1284. }
  1285. /**
  1286. * ipa3_tx_comp_usr_notify_release() - Callback function which will call the
  1287. * user supplied callback function to release the skb, or release it on
  1288. * its own if no callback function was supplied.
  1289. * @user1
  1290. * @user2
  1291. *
  1292. * This notified callback is for the destination client.
  1293. */
  1294. static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
  1295. {
  1296. struct sk_buff *skb = (struct sk_buff *)user1;
  1297. int ep_idx = user2;
  1298. IPADBG_LOW("skb=%pK ep=%d\n", skb, ep_idx);
  1299. IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
  1300. if (ipa3_ctx->ep[ep_idx].client_notify)
  1301. ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
  1302. IPA_WRITE_DONE, (unsigned long)skb);
  1303. else
  1304. dev_kfree_skb_any(skb);
  1305. }
  1306. void ipa3_tx_cmd_comp(void *user1, int user2)
  1307. {
  1308. ipahal_destroy_imm_cmd(user1);
  1309. }
  1310. /**
  1311. * ipa3_tx_dp() - Data-path tx handler
  1312. * @dst: [in] which IPA destination to route tx packets to
  1313. * @skb: [in] the packet to send
  1314. * @metadata: [in] TX packet meta-data
  1315. *
  1316. * Data-path tx handler, this is used for both SW data-path which by-passes most
  1317. * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
  1318. * dst is a "valid" CONS type, then SW data-path is used. If dst is the
  1319. * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
  1320. * is an error. For errors, client needs to free the skb as needed. For success,
  1321. * IPA driver will later invoke client callback if one was supplied. That
  1322. * callback should free the skb. If no callback supplied, IPA driver will free
  1323. * the skb internally
  1324. *
  1325. * The function will use two descriptors for this send command
  1326. * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
  1327. * the first descriptor will be used to inform the IPA hardware that
  1328. * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
  1329. * Once this send was done from transport point-of-view the IPA driver will
  1330. * get notified by the supplied callback.
  1331. *
  1332. * Returns: 0 on success, negative on failure
  1333. */
  1334. int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
  1335. struct ipa_tx_meta *meta)
  1336. {
  1337. struct ipa3_desc *desc;
  1338. struct ipa3_desc _desc[3];
  1339. int dst_ep_idx;
  1340. struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
  1341. struct ipa3_sys_context *sys;
  1342. int src_ep_idx;
  1343. int num_frags, f;
  1344. const struct ipa_gsi_ep_config *gsi_ep;
  1345. int data_idx;
  1346. unsigned int max_desc;
  1347. if (unlikely(!ipa3_ctx)) {
  1348. IPAERR("IPA3 driver was not initialized\n");
  1349. return -EINVAL;
  1350. }
  1351. if (skb->len == 0) {
  1352. IPAERR("packet size is 0\n");
  1353. return -EINVAL;
  1354. }
  1355. /*
  1356. * USB_CONS: PKT_INIT ep_idx = dst pipe
  1357. * Q6_CONS: PKT_INIT ep_idx = sender pipe
  1358. * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
  1359. *
  1360. * LAN TX: all PKT_INIT
  1361. * WAN TX: PKT_INIT (cmd) + HW (data)
  1362. *
  1363. */
  1364. if (IPA_CLIENT_IS_CONS(dst)) {
  1365. src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
  1366. if (-1 == src_ep_idx) {
  1367. IPAERR("Client %u is not mapped\n",
  1368. IPA_CLIENT_APPS_LAN_PROD);
  1369. goto fail_gen;
  1370. }
  1371. dst_ep_idx = ipa3_get_ep_mapping(dst);
  1372. } else {
  1373. src_ep_idx = ipa3_get_ep_mapping(dst);
  1374. if (-1 == src_ep_idx) {
  1375. IPAERR("Client %u is not mapped\n", dst);
  1376. goto fail_gen;
  1377. }
  1378. if (meta && meta->pkt_init_dst_ep_valid)
  1379. dst_ep_idx = meta->pkt_init_dst_ep;
  1380. else
  1381. dst_ep_idx = -1;
  1382. }
  1383. sys = ipa3_ctx->ep[src_ep_idx].sys;
  1384. if (!sys || !sys->ep->valid) {
  1385. IPAERR_RL("pipe %d not valid\n", src_ep_idx);
  1386. goto fail_pipe_not_valid;
  1387. }
  1388. num_frags = skb_shinfo(skb)->nr_frags;
  1389. /*
  1390. * make sure TLV FIFO supports the needed frags.
  1391. * 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS.
  1392. * 1 descriptor needed for the linear portion of skb.
  1393. */
  1394. gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client);
  1395. if (unlikely(gsi_ep == NULL)) {
  1396. IPAERR("failed to get EP %d GSI info\n", src_ep_idx);
  1397. goto fail_gen;
  1398. }
  1399. max_desc = gsi_ep->ipa_if_tlv;
  1400. if (gsi_ep->prefetch_mode == GSI_SMART_PRE_FETCH ||
  1401. gsi_ep->prefetch_mode == GSI_FREE_PRE_FETCH)
  1402. max_desc -= gsi_ep->prefetch_threshold;
  1403. if (num_frags + 3 > max_desc) {
  1404. if (skb_linearize(skb)) {
  1405. IPAERR("Failed to linear skb with %d frags\n",
  1406. num_frags);
  1407. goto fail_gen;
  1408. }
  1409. num_frags = 0;
  1410. }
  1411. if (num_frags) {
  1412. /* 1 desc for tag to resolve status out-of-order issue;
  1413. * 1 desc is needed for the linear portion of skb;
  1414. * 1 desc may be needed for the PACKET_INIT;
  1415. * 1 desc for each frag
  1416. */
  1417. desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
  1418. if (!desc) {
  1419. IPAERR("failed to alloc desc array\n");
  1420. goto fail_gen;
  1421. }
  1422. } else {
  1423. memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
  1424. desc = &_desc[0];
  1425. }
  1426. if (dst_ep_idx != -1) {
  1427. int skb_idx;
  1428. /* SW data path */
  1429. data_idx = 0;
  1430. if (sys->policy == IPA_POLICY_NOINTR_MODE) {
  1431. /*
  1432. * For non-interrupt mode channel (where there is no
  1433. * event ring) TAG STATUS are used for completion
  1434. * notification. IPA will generate a status packet with
  1435. * tag info as a result of the TAG STATUS command.
  1436. */
  1437. desc[data_idx].is_tag_status = true;
  1438. data_idx++;
  1439. }
  1440. desc[data_idx].opcode = ipa3_ctx->pkt_init_imm_opcode;
  1441. desc[data_idx].dma_address_valid = true;
  1442. desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
  1443. desc[data_idx].type = IPA_IMM_CMD_DESC;
  1444. desc[data_idx].callback = NULL;
  1445. data_idx++;
  1446. desc[data_idx].pyld = skb->data;
  1447. desc[data_idx].len = skb_headlen(skb);
  1448. desc[data_idx].type = IPA_DATA_DESC_SKB;
  1449. desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
  1450. desc[data_idx].user1 = skb;
  1451. desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid &&
  1452. meta->pkt_init_dst_ep_remote) ?
  1453. src_ep_idx :
  1454. dst_ep_idx;
  1455. if (meta && meta->dma_address_valid) {
  1456. desc[data_idx].dma_address_valid = true;
  1457. desc[data_idx].dma_address = meta->dma_address;
  1458. }
  1459. skb_idx = data_idx;
  1460. data_idx++;
  1461. for (f = 0; f < num_frags; f++) {
  1462. desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f];
  1463. desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED;
  1464. desc[data_idx + f].len =
  1465. skb_frag_size(desc[data_idx + f].frag);
  1466. }
  1467. /* don't free skb till frag mappings are released */
  1468. if (num_frags) {
  1469. desc[data_idx + f - 1].callback =
  1470. desc[skb_idx].callback;
  1471. desc[data_idx + f - 1].user1 = desc[skb_idx].user1;
  1472. desc[data_idx + f - 1].user2 = desc[skb_idx].user2;
  1473. desc[skb_idx].callback = NULL;
  1474. }
  1475. if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
  1476. IPAERR_RL("fail to send skb %pK num_frags %u SWP\n",
  1477. skb, num_frags);
  1478. goto fail_send;
  1479. }
  1480. IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
  1481. } else {
  1482. /* HW data path */
  1483. data_idx = 0;
  1484. if (sys->policy == IPA_POLICY_NOINTR_MODE) {
  1485. /*
  1486. * For non-interrupt mode channel (where there is no
  1487. * event ring) TAG STATUS are used for completion
  1488. * notification. IPA will generate a status packet with
  1489. * tag info as a result of the TAG STATUS command.
  1490. */
  1491. desc[data_idx].is_tag_status = true;
  1492. data_idx++;
  1493. }
  1494. desc[data_idx].pyld = skb->data;
  1495. desc[data_idx].len = skb_headlen(skb);
  1496. desc[data_idx].type = IPA_DATA_DESC_SKB;
  1497. desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
  1498. desc[data_idx].user1 = skb;
  1499. desc[data_idx].user2 = src_ep_idx;
  1500. if (meta && meta->dma_address_valid) {
  1501. desc[data_idx].dma_address_valid = true;
  1502. desc[data_idx].dma_address = meta->dma_address;
  1503. }
  1504. if (num_frags == 0) {
  1505. if (ipa3_send(sys, data_idx + 1, desc, true)) {
  1506. IPAERR("fail to send skb %pK HWP\n", skb);
  1507. goto fail_mem;
  1508. }
  1509. } else {
  1510. for (f = 0; f < num_frags; f++) {
  1511. desc[data_idx+f+1].frag =
  1512. &skb_shinfo(skb)->frags[f];
  1513. desc[data_idx+f+1].type =
  1514. IPA_DATA_DESC_SKB_PAGED;
  1515. desc[data_idx+f+1].len =
  1516. skb_frag_size(desc[data_idx+f+1].frag);
  1517. }
  1518. /* don't free skb till frag mappings are released */
  1519. desc[data_idx+f].callback = desc[data_idx].callback;
  1520. desc[data_idx+f].user1 = desc[data_idx].user1;
  1521. desc[data_idx+f].user2 = desc[data_idx].user2;
  1522. desc[data_idx].callback = NULL;
  1523. if (ipa3_send(sys, num_frags + data_idx + 1,
  1524. desc, true)) {
  1525. IPAERR("fail to send skb %pK num_frags %u\n",
  1526. skb, num_frags);
  1527. goto fail_mem;
  1528. }
  1529. }
  1530. IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
  1531. }
  1532. if (num_frags) {
  1533. kfree(desc);
  1534. IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
  1535. }
  1536. return 0;
  1537. fail_send:
  1538. ipahal_destroy_imm_cmd(cmd_pyld);
  1539. fail_mem:
  1540. if (num_frags)
  1541. kfree(desc);
  1542. fail_gen:
  1543. return -EFAULT;
  1544. fail_pipe_not_valid:
  1545. return -EPIPE;
  1546. }
  1547. static void ipa3_wq_handle_rx(struct work_struct *work)
  1548. {
  1549. struct ipa3_sys_context *sys;
  1550. sys = container_of(work, struct ipa3_sys_context, work);
  1551. if (sys->napi_obj) {
  1552. ipa_pm_activate_sync(sys->pm_hdl);
  1553. napi_schedule(sys->napi_obj);
  1554. } else
  1555. ipa3_handle_rx(sys);
  1556. }
  1557. static void ipa3_wq_repl_rx(struct work_struct *work)
  1558. {
  1559. struct ipa3_sys_context *sys;
  1560. void *ptr;
  1561. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1562. gfp_t flag = GFP_KERNEL;
  1563. u32 next;
  1564. u32 curr;
  1565. sys = container_of(work, struct ipa3_sys_context, repl_work);
  1566. atomic_set(&sys->repl->pending, 0);
  1567. curr = atomic_read(&sys->repl->tail_idx);
  1568. begin:
  1569. while (1) {
  1570. next = (curr + 1) % sys->repl->capacity;
  1571. if (next == atomic_read(&sys->repl->head_idx))
  1572. goto fail_kmem_cache_alloc;
  1573. rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
  1574. flag);
  1575. if (!rx_pkt)
  1576. goto fail_kmem_cache_alloc;
  1577. INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
  1578. rx_pkt->sys = sys;
  1579. rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
  1580. if (rx_pkt->data.skb == NULL)
  1581. goto fail_skb_alloc;
  1582. ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
  1583. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
  1584. sys->rx_buff_sz,
  1585. DMA_FROM_DEVICE);
  1586. if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
  1587. pr_err_ratelimited("%s dma map fail %pK for %pK sys=%pK\n",
  1588. __func__, (void *)rx_pkt->data.dma_addr,
  1589. ptr, sys);
  1590. goto fail_dma_mapping;
  1591. }
  1592. sys->repl->cache[curr] = rx_pkt;
  1593. curr = next;
  1594. /* ensure write is done before setting tail index */
  1595. mb();
  1596. atomic_set(&sys->repl->tail_idx, next);
  1597. }
  1598. return;
  1599. fail_dma_mapping:
  1600. sys->free_skb(rx_pkt->data.skb);
  1601. fail_skb_alloc:
  1602. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1603. fail_kmem_cache_alloc:
  1604. if (atomic_read(&sys->repl->tail_idx) ==
  1605. atomic_read(&sys->repl->head_idx)) {
  1606. if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
  1607. sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  1608. IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
  1609. else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
  1610. IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
  1611. pr_err_ratelimited("%s sys=%pK repl ring empty\n",
  1612. __func__, sys);
  1613. goto begin;
  1614. }
  1615. }
  1616. static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
  1617. gfp_t flag, bool is_tmp_alloc)
  1618. {
  1619. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1620. flag |= __GFP_NOMEMALLOC;
  1621. rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
  1622. flag);
  1623. if (unlikely(!rx_pkt))
  1624. return NULL;
  1625. rx_pkt->len = PAGE_SIZE << IPA_WAN_PAGE_ORDER;
  1626. rx_pkt->page_data.page = __dev_alloc_pages(flag,
  1627. IPA_WAN_PAGE_ORDER);
  1628. if (unlikely(!rx_pkt->page_data.page))
  1629. goto fail_page_alloc;
  1630. rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
  1631. rx_pkt->page_data.page, 0,
  1632. rx_pkt->len, DMA_FROM_DEVICE);
  1633. if (dma_mapping_error(ipa3_ctx->pdev,
  1634. rx_pkt->page_data.dma_addr)) {
  1635. pr_err_ratelimited("%s dma map fail %pK for %pK\n",
  1636. __func__, (void *)rx_pkt->page_data.dma_addr,
  1637. rx_pkt->page_data.page);
  1638. goto fail_dma_mapping;
  1639. }
  1640. if (is_tmp_alloc)
  1641. rx_pkt->page_data.is_tmp_alloc = true;
  1642. else
  1643. rx_pkt->page_data.is_tmp_alloc = false;
  1644. return rx_pkt;
  1645. fail_dma_mapping:
  1646. __free_pages(rx_pkt->page_data.page, IPA_WAN_PAGE_ORDER);
  1647. fail_page_alloc:
  1648. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1649. return NULL;
  1650. }
  1651. static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)
  1652. {
  1653. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1654. u32 curr;
  1655. for (curr = 0; curr < sys->page_recycle_repl->capacity; curr++) {
  1656. rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, false);
  1657. if (!rx_pkt) {
  1658. IPAERR("ipa3_alloc_rx_pkt_page fails\n");
  1659. ipa_assert();
  1660. break;
  1661. }
  1662. rx_pkt->sys = sys;
  1663. sys->page_recycle_repl->cache[curr] = rx_pkt;
  1664. }
  1665. return;
  1666. }
  1667. static void ipa3_wq_page_repl(struct work_struct *work)
  1668. {
  1669. struct ipa3_sys_context *sys;
  1670. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1671. u32 next;
  1672. u32 curr;
  1673. sys = container_of(work, struct ipa3_sys_context, repl_work);
  1674. atomic_set(&sys->repl->pending, 0);
  1675. curr = atomic_read(&sys->repl->tail_idx);
  1676. begin:
  1677. while (1) {
  1678. next = (curr + 1) % sys->repl->capacity;
  1679. if (unlikely(next == atomic_read(&sys->repl->head_idx)))
  1680. goto fail_kmem_cache_alloc;
  1681. rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, true);
  1682. if (unlikely(!rx_pkt)) {
  1683. IPAERR("ipa3_alloc_rx_pkt_page fails\n");
  1684. break;
  1685. }
  1686. rx_pkt->sys = sys;
  1687. sys->repl->cache[curr] = rx_pkt;
  1688. curr = next;
  1689. /* ensure write is done before setting tail index */
  1690. mb();
  1691. atomic_set(&sys->repl->tail_idx, next);
  1692. }
  1693. return;
  1694. fail_kmem_cache_alloc:
  1695. if (atomic_read(&sys->repl->tail_idx) ==
  1696. atomic_read(&sys->repl->head_idx)) {
  1697. if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
  1698. sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  1699. IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
  1700. pr_err_ratelimited("%s sys=%pK wq_repl ring empty\n",
  1701. __func__, sys);
  1702. goto begin;
  1703. }
  1704. }
  1705. static inline void __trigger_repl_work(struct ipa3_sys_context *sys)
  1706. {
  1707. int tail, head, avail;
  1708. if (atomic_read(&sys->repl->pending))
  1709. return;
  1710. tail = atomic_read(&sys->repl->tail_idx);
  1711. head = atomic_read(&sys->repl->head_idx);
  1712. avail = (tail - head) % sys->repl->capacity;
  1713. if (avail < sys->repl->capacity / 4) {
  1714. atomic_set(&sys->repl->pending, 1);
  1715. queue_work(sys->repl_wq, &sys->repl_work);
  1716. }
  1717. }
  1718. static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
  1719. {
  1720. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1721. int ret;
  1722. int rx_len_cached = 0;
  1723. struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
  1724. u32 curr;
  1725. u32 curr_wq;
  1726. int idx = 0;
  1727. struct page *cur_page;
  1728. u32 stats_i = 0;
  1729. /* start replenish only when buffers go lower than the threshold */
  1730. if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
  1731. return;
  1732. stats_i = (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) ? 0 : 1;
  1733. spin_lock_bh(&sys->spinlock);
  1734. rx_len_cached = sys->len;
  1735. curr = atomic_read(&sys->page_recycle_repl->head_idx);
  1736. curr_wq = atomic_read(&sys->repl->head_idx);
  1737. while (rx_len_cached < sys->rx_pool_sz) {
  1738. cur_page = sys->page_recycle_repl->cache[curr]->page_data.page;
  1739. /* Found an idle page that can be used */
  1740. if (page_ref_count(cur_page) == 1) {
  1741. page_ref_inc(cur_page);
  1742. rx_pkt = sys->page_recycle_repl->cache[curr];
  1743. curr = (++curr == sys->page_recycle_repl->capacity) ?
  1744. 0 : curr;
  1745. } else {
  1746. /*
  1747. * Could not find idle page at curr index.
  1748. * Allocate a new one.
  1749. */
  1750. if (curr_wq == atomic_read(&sys->repl->tail_idx))
  1751. break;
  1752. ipa3_ctx->stats.page_recycle_stats[stats_i].tmp_alloc++;
  1753. rx_pkt = sys->repl->cache[curr_wq];
  1754. curr_wq = (++curr_wq == sys->repl->capacity) ?
  1755. 0 : curr_wq;
  1756. }
  1757. dma_sync_single_for_device(ipa3_ctx->pdev,
  1758. rx_pkt->page_data.dma_addr,
  1759. rx_pkt->len, DMA_FROM_DEVICE);
  1760. gsi_xfer_elem_array[idx].addr = rx_pkt->page_data.dma_addr;
  1761. gsi_xfer_elem_array[idx].len = rx_pkt->len;
  1762. gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
  1763. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
  1764. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
  1765. gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
  1766. gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
  1767. rx_len_cached++;
  1768. idx++;
  1769. ipa3_ctx->stats.page_recycle_stats[stats_i].total_replenished++;
  1770. /*
  1771. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
  1772. * If this size is reached we need to queue the xfers.
  1773. */
  1774. if (idx == IPA_REPL_XFER_MAX) {
  1775. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1776. gsi_xfer_elem_array, false);
  1777. if (ret != GSI_STATUS_SUCCESS) {
  1778. /* we don't expect this will happen */
  1779. IPAERR("failed to provide buffer: %d\n", ret);
  1780. ipa_assert();
  1781. break;
  1782. }
  1783. idx = 0;
  1784. }
  1785. }
  1786. /* only ring doorbell once here */
  1787. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1788. gsi_xfer_elem_array, true);
  1789. if (ret == GSI_STATUS_SUCCESS) {
  1790. /* ensure write is done before setting head index */
  1791. mb();
  1792. atomic_set(&sys->repl->head_idx, curr_wq);
  1793. atomic_set(&sys->page_recycle_repl->head_idx, curr);
  1794. sys->len = rx_len_cached;
  1795. } else {
  1796. /* we don't expect this will happen */
  1797. IPAERR("failed to provide buffer: %d\n", ret);
  1798. ipa_assert();
  1799. }
  1800. spin_unlock_bh(&sys->spinlock);
  1801. __trigger_repl_work(sys);
  1802. if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
  1803. if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
  1804. sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  1805. IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
  1806. else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
  1807. IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
  1808. else
  1809. WARN_ON(1);
  1810. queue_delayed_work(sys->wq, &sys->replenish_rx_work,
  1811. msecs_to_jiffies(1));
  1812. }
  1813. return;
  1814. }
  1815. static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
  1816. {
  1817. struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
  1818. struct ipa3_rx_pkt_wrapper *tmp;
  1819. int ret;
  1820. struct gsi_xfer_elem gsi_xfer_elem_one;
  1821. u32 rx_len_cached = 0;
  1822. IPADBG_LOW("\n");
  1823. spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1824. rx_len_cached = sys->len;
  1825. if (rx_len_cached < sys->rx_pool_sz) {
  1826. list_for_each_entry_safe(rx_pkt, tmp,
  1827. &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
  1828. list_del(&rx_pkt->link);
  1829. if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
  1830. ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
  1831. rx_pkt->len = 0;
  1832. rx_pkt->sys = sys;
  1833. memset(&gsi_xfer_elem_one, 0,
  1834. sizeof(gsi_xfer_elem_one));
  1835. gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
  1836. gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
  1837. gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
  1838. gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
  1839. gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
  1840. gsi_xfer_elem_one.xfer_user_data = rx_pkt;
  1841. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
  1842. &gsi_xfer_elem_one, true);
  1843. if (ret) {
  1844. IPAERR("failed to provide buffer: %d\n", ret);
  1845. goto fail_provide_rx_buffer;
  1846. }
  1847. rx_len_cached = ++sys->len;
  1848. if (rx_len_cached >= sys->rx_pool_sz) {
  1849. spin_unlock_bh(
  1850. &ipa3_ctx->wc_memb.wlan_spinlock);
  1851. return;
  1852. }
  1853. }
  1854. }
  1855. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1856. if (rx_len_cached < sys->rx_pool_sz &&
  1857. ipa3_ctx->wc_memb.wlan_comm_total_cnt <
  1858. IPA_WLAN_COMM_RX_POOL_HIGH) {
  1859. ipa3_replenish_rx_cache(sys);
  1860. ipa3_ctx->wc_memb.wlan_comm_total_cnt +=
  1861. (sys->rx_pool_sz - rx_len_cached);
  1862. }
  1863. return;
  1864. fail_provide_rx_buffer:
  1865. list_del(&rx_pkt->link);
  1866. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1867. }
  1868. static void ipa3_cleanup_wlan_rx_common_cache(void)
  1869. {
  1870. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1871. struct ipa3_rx_pkt_wrapper *tmp;
  1872. spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1873. list_for_each_entry_safe(rx_pkt, tmp,
  1874. &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
  1875. list_del(&rx_pkt->link);
  1876. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  1877. IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
  1878. dev_kfree_skb_any(rx_pkt->data.skb);
  1879. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1880. ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
  1881. ipa3_ctx->wc_memb.wlan_comm_total_cnt--;
  1882. }
  1883. ipa3_ctx->wc_memb.total_tx_pkts_freed = 0;
  1884. if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0)
  1885. IPAERR("wlan comm buff free cnt: %d\n",
  1886. ipa3_ctx->wc_memb.wlan_comm_free_cnt);
  1887. if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0)
  1888. IPAERR("wlan comm buff total cnt: %d\n",
  1889. ipa3_ctx->wc_memb.wlan_comm_total_cnt);
  1890. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1891. }
  1892. static void ipa3_alloc_wlan_rx_common_cache(u32 size)
  1893. {
  1894. void *ptr;
  1895. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1896. int rx_len_cached = 0;
  1897. gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
  1898. rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt;
  1899. while (rx_len_cached < size) {
  1900. rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
  1901. flag);
  1902. if (!rx_pkt)
  1903. goto fail_kmem_cache_alloc;
  1904. INIT_LIST_HEAD(&rx_pkt->link);
  1905. INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
  1906. rx_pkt->data.skb =
  1907. ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
  1908. flag);
  1909. if (rx_pkt->data.skb == NULL) {
  1910. IPAERR("failed to alloc skb\n");
  1911. goto fail_skb_alloc;
  1912. }
  1913. ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
  1914. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
  1915. IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
  1916. if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
  1917. IPAERR("dma_map_single failure %pK for %pK\n",
  1918. (void *)rx_pkt->data.dma_addr, ptr);
  1919. goto fail_dma_mapping;
  1920. }
  1921. spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1922. list_add_tail(&rx_pkt->link,
  1923. &ipa3_ctx->wc_memb.wlan_comm_desc_list);
  1924. rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
  1925. ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
  1926. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1927. }
  1928. return;
  1929. fail_dma_mapping:
  1930. dev_kfree_skb_any(rx_pkt->data.skb);
  1931. fail_skb_alloc:
  1932. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1933. fail_kmem_cache_alloc:
  1934. return;
  1935. }
  1936. /**
  1937. * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
  1938. *
  1939. * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
  1940. * are IPA_RX_POOL_CEIL buffers in the cache.
  1941. * - Allocate a buffer in the cache
  1942. * - Initialized the packets link
  1943. * - Initialize the packets work struct
  1944. * - Allocate the packets socket buffer (skb)
  1945. * - Fill the packets skb with data
  1946. * - Make the packet DMAable
  1947. * - Add the packet to the system pipe linked list
  1948. */
  1949. static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
  1950. {
  1951. void *ptr;
  1952. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1953. int ret;
  1954. int idx = 0;
  1955. int rx_len_cached = 0;
  1956. struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
  1957. gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
  1958. rx_len_cached = sys->len;
  1959. /* start replenish only when buffers go lower than the threshold */
  1960. if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
  1961. return;
  1962. while (rx_len_cached < sys->rx_pool_sz) {
  1963. rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
  1964. flag);
  1965. if (!rx_pkt)
  1966. goto fail_kmem_cache_alloc;
  1967. INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
  1968. rx_pkt->sys = sys;
  1969. rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
  1970. if (rx_pkt->data.skb == NULL) {
  1971. IPAERR("failed to alloc skb\n");
  1972. goto fail_skb_alloc;
  1973. }
  1974. ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
  1975. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
  1976. sys->rx_buff_sz,
  1977. DMA_FROM_DEVICE);
  1978. if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
  1979. IPAERR("dma_map_single failure %pK for %pK\n",
  1980. (void *)rx_pkt->data.dma_addr, ptr);
  1981. goto fail_dma_mapping;
  1982. }
  1983. gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
  1984. gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
  1985. gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
  1986. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
  1987. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
  1988. gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
  1989. gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
  1990. idx++;
  1991. rx_len_cached++;
  1992. /*
  1993. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
  1994. * If this size is reached we need to queue the xfers.
  1995. */
  1996. if (idx == IPA_REPL_XFER_MAX) {
  1997. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1998. gsi_xfer_elem_array, false);
  1999. if (ret != GSI_STATUS_SUCCESS) {
  2000. /* we don't expect this will happen */
  2001. IPAERR("failed to provide buffer: %d\n", ret);
  2002. WARN_ON(1);
  2003. break;
  2004. }
  2005. idx = 0;
  2006. }
  2007. }
  2008. goto done;
  2009. fail_dma_mapping:
  2010. sys->free_skb(rx_pkt->data.skb);
  2011. fail_skb_alloc:
  2012. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  2013. fail_kmem_cache_alloc:
  2014. if (rx_len_cached == 0)
  2015. queue_delayed_work(sys->wq, &sys->replenish_rx_work,
  2016. msecs_to_jiffies(1));
  2017. done:
  2018. /* only ring doorbell once here */
  2019. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  2020. gsi_xfer_elem_array, true);
  2021. if (ret == GSI_STATUS_SUCCESS) {
  2022. sys->len = rx_len_cached;
  2023. } else {
  2024. /* we don't expect this will happen */
  2025. IPAERR("failed to provide buffer: %d\n", ret);
  2026. WARN_ON(1);
  2027. }
  2028. }
  2029. static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
  2030. {
  2031. void *ptr;
  2032. struct ipa3_rx_pkt_wrapper *rx_pkt;
  2033. int ret;
  2034. int idx = 0;
  2035. int rx_len_cached = 0;
  2036. struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
  2037. gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
  2038. /* start replenish only when buffers go lower than the threshold */
  2039. if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
  2040. return;
  2041. rx_len_cached = sys->len;
  2042. while (rx_len_cached < sys->rx_pool_sz) {
  2043. if (list_empty(&sys->rcycl_list)) {
  2044. rx_pkt = kmem_cache_zalloc(
  2045. ipa3_ctx->rx_pkt_wrapper_cache, flag);
  2046. if (!rx_pkt)
  2047. goto fail_kmem_cache_alloc;
  2048. INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
  2049. rx_pkt->sys = sys;
  2050. rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
  2051. if (rx_pkt->data.skb == NULL) {
  2052. IPAERR("failed to alloc skb\n");
  2053. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
  2054. rx_pkt);
  2055. goto fail_kmem_cache_alloc;
  2056. }
  2057. ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
  2058. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
  2059. ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
  2060. if (dma_mapping_error(ipa3_ctx->pdev,
  2061. rx_pkt->data.dma_addr)) {
  2062. IPAERR("dma_map_single failure %pK for %pK\n",
  2063. (void *)rx_pkt->data.dma_addr, ptr);
  2064. goto fail_dma_mapping;
  2065. }
  2066. } else {
  2067. spin_lock_bh(&sys->spinlock);
  2068. rx_pkt = list_first_entry(&sys->rcycl_list,
  2069. struct ipa3_rx_pkt_wrapper, link);
  2070. list_del(&rx_pkt->link);
  2071. spin_unlock_bh(&sys->spinlock);
  2072. ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
  2073. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
  2074. ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
  2075. if (dma_mapping_error(ipa3_ctx->pdev,
  2076. rx_pkt->data.dma_addr)) {
  2077. IPAERR("dma_map_single failure %pK for %pK\n",
  2078. (void *)rx_pkt->data.dma_addr, ptr);
  2079. goto fail_dma_mapping;
  2080. }
  2081. }
  2082. gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
  2083. gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
  2084. gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
  2085. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
  2086. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
  2087. gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
  2088. gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
  2089. idx++;
  2090. rx_len_cached++;
  2091. /*
  2092. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
  2093. * If this size is reached we need to queue the xfers.
  2094. */
  2095. if (idx == IPA_REPL_XFER_MAX) {
  2096. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  2097. gsi_xfer_elem_array, false);
  2098. if (ret != GSI_STATUS_SUCCESS) {
  2099. /* we don't expect this will happen */
  2100. IPAERR("failed to provide buffer: %d\n", ret);
  2101. WARN_ON(1);
  2102. break;
  2103. }
  2104. idx = 0;
  2105. }
  2106. }
  2107. goto done;
  2108. fail_dma_mapping:
  2109. spin_lock_bh(&sys->spinlock);
  2110. list_add_tail(&rx_pkt->link, &sys->rcycl_list);
  2111. INIT_LIST_HEAD(&rx_pkt->link);
  2112. spin_unlock_bh(&sys->spinlock);
  2113. fail_kmem_cache_alloc:
  2114. if (rx_len_cached == 0)
  2115. queue_delayed_work(sys->wq, &sys->replenish_rx_work,
  2116. msecs_to_jiffies(1));
  2117. done:
  2118. /* only ring doorbell once here */
  2119. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  2120. gsi_xfer_elem_array, true);
  2121. if (ret == GSI_STATUS_SUCCESS) {
  2122. sys->len = rx_len_cached;
  2123. } else {
  2124. /* we don't expect this will happen */
  2125. IPAERR("failed to provide buffer: %d\n", ret);
  2126. WARN_ON(1);
  2127. }
  2128. }
  2129. static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
  2130. {
  2131. struct ipa3_rx_pkt_wrapper *rx_pkt;
  2132. int ret;
  2133. int rx_len_cached = 0;
  2134. struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
  2135. u32 curr;
  2136. int idx = 0;
  2137. /* start replenish only when buffers go lower than the threshold */
  2138. if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
  2139. return;
  2140. spin_lock_bh(&sys->spinlock);
  2141. rx_len_cached = sys->len;
  2142. curr = atomic_read(&sys->repl->head_idx);
  2143. while (rx_len_cached < sys->rx_pool_sz) {
  2144. if (curr == atomic_read(&sys->repl->tail_idx))
  2145. break;
  2146. rx_pkt = sys->repl->cache[curr];
  2147. gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
  2148. gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
  2149. gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
  2150. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
  2151. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
  2152. gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
  2153. gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
  2154. rx_len_cached++;
  2155. curr = (++curr == sys->repl->capacity) ? 0 : curr;
  2156. idx++;
  2157. /*
  2158. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
  2159. * If this size is reached we need to queue the xfers.
  2160. */
  2161. if (idx == IPA_REPL_XFER_MAX) {
  2162. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  2163. gsi_xfer_elem_array, false);
  2164. if (ret != GSI_STATUS_SUCCESS) {
  2165. /* we don't expect this will happen */
  2166. IPAERR("failed to provide buffer: %d\n", ret);
  2167. WARN_ON(1);
  2168. break;
  2169. }
  2170. idx = 0;
  2171. }
  2172. }
  2173. /* only ring doorbell once here */
  2174. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  2175. gsi_xfer_elem_array, true);
  2176. if (ret == GSI_STATUS_SUCCESS) {
  2177. /* ensure write is done before setting head index */
  2178. mb();
  2179. atomic_set(&sys->repl->head_idx, curr);
  2180. sys->len = rx_len_cached;
  2181. } else {
  2182. /* we don't expect this will happen */
  2183. IPAERR("failed to provide buffer: %d\n", ret);
  2184. WARN_ON(1);
  2185. }
  2186. spin_unlock_bh(&sys->spinlock);
  2187. __trigger_repl_work(sys);
  2188. if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
  2189. if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
  2190. sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  2191. IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
  2192. else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
  2193. IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
  2194. else
  2195. WARN_ON(1);
  2196. queue_delayed_work(sys->wq, &sys->replenish_rx_work,
  2197. msecs_to_jiffies(1));
  2198. }
  2199. }
  2200. static void ipa3_replenish_rx_work_func(struct work_struct *work)
  2201. {
  2202. struct delayed_work *dwork;
  2203. struct ipa3_sys_context *sys;
  2204. dwork = container_of(work, struct delayed_work, work);
  2205. sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
  2206. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  2207. sys->repl_hdlr(sys);
  2208. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  2209. }
  2210. /**
  2211. * free_rx_pkt() - function to free the skb and rx_pkt_wrapper
  2212. *
  2213. * @chan_user_data: ipa_sys_context used for skb size and skb_free func
  2214. * @xfer_uder_data: rx_pkt wrapper to be freed
  2215. *
  2216. */
  2217. static void free_rx_pkt(void *chan_user_data, void *xfer_user_data)
  2218. {
  2219. struct ipa3_rx_pkt_wrapper *rx_pkt = (struct ipa3_rx_pkt_wrapper *)
  2220. xfer_user_data;
  2221. struct ipa3_sys_context *sys = (struct ipa3_sys_context *)
  2222. chan_user_data;
  2223. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  2224. sys->rx_buff_sz, DMA_FROM_DEVICE);
  2225. sys->free_skb(rx_pkt->data.skb);
  2226. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  2227. }
  2228. /**
  2229. * free_rx_page() - function to free the page and rx_pkt_wrapper
  2230. *
  2231. * @chan_user_data: ipa_sys_context used for skb size and skb_free func
  2232. * @xfer_uder_data: rx_pkt wrapper to be freed
  2233. *
  2234. */
  2235. static void free_rx_page(void *chan_user_data, void *xfer_user_data)
  2236. {
  2237. struct ipa3_rx_pkt_wrapper *rx_pkt = (struct ipa3_rx_pkt_wrapper *)
  2238. xfer_user_data;
  2239. struct ipa3_sys_context *sys = rx_pkt->sys;
  2240. int i;
  2241. for (i = 0; i < sys->page_recycle_repl->capacity; i++)
  2242. if (sys->page_recycle_repl->cache[i] == rx_pkt)
  2243. break;
  2244. if (i < sys->page_recycle_repl->capacity) {
  2245. page_ref_dec(rx_pkt->page_data.page);
  2246. sys->page_recycle_repl->cache[i] = NULL;
  2247. }
  2248. dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
  2249. rx_pkt->len, DMA_FROM_DEVICE);
  2250. __free_pages(rx_pkt->page_data.page,
  2251. IPA_WAN_PAGE_ORDER);
  2252. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  2253. }
  2254. /**
  2255. * ipa3_cleanup_rx() - release RX queue resources
  2256. *
  2257. */
  2258. static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
  2259. {
  2260. struct ipa3_rx_pkt_wrapper *rx_pkt;
  2261. struct ipa3_rx_pkt_wrapper *r;
  2262. u32 head;
  2263. u32 tail;
  2264. int i;
  2265. /*
  2266. * buffers not consumed by gsi are cleaned up using cleanup callback
  2267. * provided to gsi
  2268. */
  2269. spin_lock_bh(&sys->spinlock);
  2270. list_for_each_entry_safe(rx_pkt, r,
  2271. &sys->rcycl_list, link) {
  2272. list_del(&rx_pkt->link);
  2273. if (rx_pkt->data.dma_addr)
  2274. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  2275. sys->rx_buff_sz, DMA_FROM_DEVICE);
  2276. else
  2277. IPADBG("DMA address already freed\n");
  2278. sys->free_skb(rx_pkt->data.skb);
  2279. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  2280. }
  2281. spin_unlock_bh(&sys->spinlock);
  2282. if (sys->repl) {
  2283. head = atomic_read(&sys->repl->head_idx);
  2284. tail = atomic_read(&sys->repl->tail_idx);
  2285. while (head != tail) {
  2286. rx_pkt = sys->repl->cache[head];
  2287. if (!ipa3_ctx->ipa_wan_skb_page) {
  2288. dma_unmap_single(ipa3_ctx->pdev,
  2289. rx_pkt->data.dma_addr,
  2290. sys->rx_buff_sz,
  2291. DMA_FROM_DEVICE);
  2292. sys->free_skb(rx_pkt->data.skb);
  2293. } else {
  2294. dma_unmap_page(ipa3_ctx->pdev,
  2295. rx_pkt->page_data.dma_addr,
  2296. rx_pkt->len,
  2297. DMA_FROM_DEVICE);
  2298. __free_pages(rx_pkt->page_data.page,
  2299. IPA_WAN_PAGE_ORDER);
  2300. }
  2301. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
  2302. rx_pkt);
  2303. head = (head + 1) % sys->repl->capacity;
  2304. }
  2305. kfree(sys->repl->cache);
  2306. kfree(sys->repl);
  2307. }
  2308. if (sys->page_recycle_repl) {
  2309. for (i = 0; i < sys->page_recycle_repl->capacity; i++) {
  2310. rx_pkt = sys->page_recycle_repl->cache[i];
  2311. if (rx_pkt) {
  2312. dma_unmap_page(ipa3_ctx->pdev,
  2313. rx_pkt->page_data.dma_addr,
  2314. rx_pkt->len,
  2315. DMA_FROM_DEVICE);
  2316. __free_pages(rx_pkt->page_data.page,
  2317. IPA_WAN_PAGE_ORDER);
  2318. kmem_cache_free(
  2319. ipa3_ctx->rx_pkt_wrapper_cache,
  2320. rx_pkt);
  2321. }
  2322. }
  2323. kfree(sys->page_recycle_repl->cache);
  2324. kfree(sys->page_recycle_repl);
  2325. }
  2326. }
  2327. static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
  2328. {
  2329. struct sk_buff *skb2 = NULL;
  2330. if (!ipa3_ctx->lan_rx_napi_enable)
  2331. skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM,
  2332. GFP_KERNEL);
  2333. else
  2334. skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM,
  2335. GFP_ATOMIC);
  2336. if (likely(skb2)) {
  2337. /* Set the data pointer */
  2338. skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
  2339. memcpy(skb2->data, skb->data, len);
  2340. skb2->len = len;
  2341. skb_set_tail_pointer(skb2, len);
  2342. }
  2343. return skb2;
  2344. }
  2345. static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
  2346. struct ipa3_sys_context *sys)
  2347. {
  2348. struct ipahal_pkt_status status;
  2349. u32 pkt_status_sz;
  2350. struct sk_buff *skb2;
  2351. int pad_len_byte;
  2352. int len;
  2353. unsigned char *buf;
  2354. int src_pipe;
  2355. unsigned int used = *(unsigned int *)skb->cb;
  2356. unsigned int used_align = ALIGN(used, 32);
  2357. unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
  2358. struct ipa3_tx_pkt_wrapper *tx_pkt = NULL;
  2359. unsigned long ptr;
  2360. IPA_DUMP_BUFF(skb->data, 0, skb->len);
  2361. if (skb->len == 0) {
  2362. IPAERR("ZLT packet arrived to AP\n");
  2363. goto out;
  2364. }
  2365. if (sys->len_partial) {
  2366. IPADBG_LOW("len_partial %d\n", sys->len_partial);
  2367. buf = skb_push(skb, sys->len_partial);
  2368. memcpy(buf, sys->prev_skb->data, sys->len_partial);
  2369. sys->len_partial = 0;
  2370. sys->free_skb(sys->prev_skb);
  2371. sys->prev_skb = NULL;
  2372. goto begin;
  2373. }
  2374. /* this pipe has TX comp (status only) + mux-ed LAN RX data
  2375. * (status+data)
  2376. */
  2377. if (sys->len_rem) {
  2378. IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
  2379. sys->len_pad);
  2380. if (sys->len_rem <= skb->len) {
  2381. if (sys->prev_skb) {
  2382. if (!ipa3_ctx->lan_rx_napi_enable)
  2383. skb2 = skb_copy_expand(sys->prev_skb,
  2384. 0, sys->len_rem, GFP_KERNEL);
  2385. else
  2386. skb2 = skb_copy_expand(sys->prev_skb,
  2387. 0, sys->len_rem, GFP_ATOMIC);
  2388. if (likely(skb2)) {
  2389. memcpy(skb_put(skb2, sys->len_rem),
  2390. skb->data, sys->len_rem);
  2391. skb_trim(skb2,
  2392. skb2->len - sys->len_pad);
  2393. skb2->truesize = skb2->len +
  2394. sizeof(struct sk_buff);
  2395. if (sys->drop_packet)
  2396. dev_kfree_skb_any(skb2);
  2397. else
  2398. sys->ep->client_notify(
  2399. sys->ep->priv,
  2400. IPA_RECEIVE,
  2401. (unsigned long)(skb2));
  2402. } else {
  2403. IPAERR("copy expand failed\n");
  2404. }
  2405. dev_kfree_skb_any(sys->prev_skb);
  2406. }
  2407. skb_pull(skb, sys->len_rem);
  2408. sys->prev_skb = NULL;
  2409. sys->len_rem = 0;
  2410. sys->len_pad = 0;
  2411. } else {
  2412. if (sys->prev_skb) {
  2413. if (!ipa3_ctx->lan_rx_napi_enable)
  2414. skb2 = skb_copy_expand(sys->prev_skb, 0,
  2415. skb->len, GFP_KERNEL);
  2416. else
  2417. skb2 = skb_copy_expand(sys->prev_skb, 0,
  2418. skb->len, GFP_ATOMIC);
  2419. if (likely(skb2)) {
  2420. memcpy(skb_put(skb2, skb->len),
  2421. skb->data, skb->len);
  2422. } else {
  2423. IPAERR("copy expand failed\n");
  2424. }
  2425. dev_kfree_skb_any(sys->prev_skb);
  2426. sys->prev_skb = skb2;
  2427. }
  2428. sys->len_rem -= skb->len;
  2429. goto out;
  2430. }
  2431. }
  2432. begin:
  2433. pkt_status_sz = ipahal_pkt_status_get_size();
  2434. while (skb->len) {
  2435. sys->drop_packet = false;
  2436. IPADBG_LOW("LEN_REM %d\n", skb->len);
  2437. if (skb->len < pkt_status_sz) {
  2438. WARN_ON(sys->prev_skb != NULL);
  2439. IPADBG_LOW("status straddles buffer\n");
  2440. if (!ipa3_ctx->lan_rx_napi_enable)
  2441. sys->prev_skb = skb_copy(skb, GFP_KERNEL);
  2442. else
  2443. sys->prev_skb = skb_copy(skb, GFP_ATOMIC);
  2444. sys->len_partial = skb->len;
  2445. goto out;
  2446. }
  2447. ipahal_pkt_status_parse(skb->data, &status);
  2448. IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
  2449. status.status_opcode, status.endp_src_idx,
  2450. status.endp_dest_idx, status.pkt_len);
  2451. if (sys->status_stat) {
  2452. sys->status_stat->status[sys->status_stat->curr] =
  2453. status;
  2454. sys->status_stat->curr++;
  2455. if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
  2456. sys->status_stat->curr = 0;
  2457. }
  2458. switch (status.status_opcode) {
  2459. case IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET:
  2460. case IPAHAL_PKT_STATUS_OPCODE_PACKET:
  2461. case IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET:
  2462. case IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS:
  2463. break;
  2464. case IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE:
  2465. IPAERR_RL("Frag packets received on lan consumer\n");
  2466. IPAERR_RL("STATUS opcode=%d src=%d dst=%d src ip=%x\n",
  2467. status.status_opcode, status.endp_src_idx,
  2468. status.endp_dest_idx, status.src_ip_addr);
  2469. skb_pull(skb, pkt_status_sz);
  2470. continue;
  2471. default:
  2472. IPAERR_RL("unsupported opcode(%d)\n",
  2473. status.status_opcode);
  2474. skb_pull(skb, pkt_status_sz);
  2475. continue;
  2476. }
  2477. IPA_STATS_EXCP_CNT(status.exception,
  2478. ipa3_ctx->stats.rx_excp_pkts);
  2479. if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
  2480. status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
  2481. IPAERR_RL("status fields invalid\n");
  2482. IPAERR_RL("STATUS opcode=%d src=%d dst=%d len=%d\n",
  2483. status.status_opcode, status.endp_src_idx,
  2484. status.endp_dest_idx, status.pkt_len);
  2485. WARN_ON(1);
  2486. /* HW gave an unexpected status */
  2487. ipa_assert();
  2488. }
  2489. if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
  2490. IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) {
  2491. struct ipa3_tag_completion *comp;
  2492. IPADBG_LOW("TAG packet arrived\n");
  2493. if (status.tag_info == IPA_COOKIE) {
  2494. skb_pull(skb, pkt_status_sz);
  2495. if (skb->len < sizeof(comp)) {
  2496. IPAERR("TAG arrived without packet\n");
  2497. goto out;
  2498. }
  2499. memcpy(&comp, skb->data, sizeof(comp));
  2500. skb_pull(skb, sizeof(comp));
  2501. complete(&comp->comp);
  2502. if (atomic_dec_return(&comp->cnt) == 0)
  2503. kfree(comp);
  2504. continue;
  2505. } else {
  2506. ptr = tag_to_pointer_wa(status.tag_info);
  2507. tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
  2508. IPADBG_LOW("tx_pkt recv = %pK\n", tx_pkt);
  2509. }
  2510. }
  2511. if (status.pkt_len == 0) {
  2512. IPADBG_LOW("Skip aggr close status\n");
  2513. skb_pull(skb, pkt_status_sz);
  2514. IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
  2515. IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
  2516. [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
  2517. continue;
  2518. }
  2519. if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) {
  2520. /* RX data */
  2521. src_pipe = status.endp_src_idx;
  2522. /*
  2523. * A packet which is received back to the AP after
  2524. * there was no route match.
  2525. */
  2526. if (status.exception ==
  2527. IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
  2528. ipahal_is_rule_miss_id(status.rt_rule_id))
  2529. sys->drop_packet = true;
  2530. if (skb->len == pkt_status_sz &&
  2531. status.exception ==
  2532. IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
  2533. WARN_ON(sys->prev_skb != NULL);
  2534. IPADBG_LOW("Ins header in next buffer\n");
  2535. if (!ipa3_ctx->lan_rx_napi_enable)
  2536. sys->prev_skb = skb_copy(skb,
  2537. GFP_KERNEL);
  2538. else
  2539. sys->prev_skb = skb_copy(skb,
  2540. GFP_ATOMIC);
  2541. sys->len_partial = skb->len;
  2542. goto out;
  2543. }
  2544. pad_len_byte = ((status.pkt_len + 3) & ~3) -
  2545. status.pkt_len;
  2546. len = status.pkt_len + pad_len_byte;
  2547. IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
  2548. status.pkt_len, len);
  2549. if (status.exception ==
  2550. IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
  2551. IPADBG_LOW(
  2552. "Dropping packet on DeAggr Exception\n");
  2553. sys->drop_packet = true;
  2554. }
  2555. skb2 = ipa3_skb_copy_for_client(skb,
  2556. min(status.pkt_len + pkt_status_sz, skb->len));
  2557. if (likely(skb2)) {
  2558. if (skb->len < len + pkt_status_sz) {
  2559. IPADBG_LOW("SPL skb len %d len %d\n",
  2560. skb->len, len);
  2561. sys->prev_skb = skb2;
  2562. sys->len_rem = len - skb->len +
  2563. pkt_status_sz;
  2564. sys->len_pad = pad_len_byte;
  2565. skb_pull(skb, skb->len);
  2566. } else {
  2567. skb_trim(skb2, status.pkt_len +
  2568. pkt_status_sz);
  2569. IPADBG_LOW("rx avail for %d\n",
  2570. status.endp_dest_idx);
  2571. if (sys->drop_packet) {
  2572. dev_kfree_skb_any(skb2);
  2573. } else if (status.pkt_len >
  2574. IPA_GENERIC_AGGR_BYTE_LIMIT *
  2575. 1024) {
  2576. IPAERR("packet size invalid\n");
  2577. IPAERR("STATUS opcode=%d\n",
  2578. status.status_opcode);
  2579. IPAERR("src=%d dst=%d len=%d\n",
  2580. status.endp_src_idx,
  2581. status.endp_dest_idx,
  2582. status.pkt_len);
  2583. /* Unexpected HW status */
  2584. ipa_assert();
  2585. } else {
  2586. skb2->truesize = skb2->len +
  2587. sizeof(struct sk_buff) +
  2588. (ALIGN(len +
  2589. pkt_status_sz, 32) *
  2590. unused / used_align);
  2591. sys->ep->client_notify(
  2592. sys->ep->priv,
  2593. IPA_RECEIVE,
  2594. (unsigned long)(skb2));
  2595. }
  2596. skb_pull(skb, len + pkt_status_sz);
  2597. }
  2598. } else {
  2599. IPAERR("fail to alloc skb\n");
  2600. if (skb->len < len) {
  2601. sys->prev_skb = NULL;
  2602. sys->len_rem = len - skb->len +
  2603. pkt_status_sz;
  2604. sys->len_pad = pad_len_byte;
  2605. skb_pull(skb, skb->len);
  2606. } else {
  2607. skb_pull(skb, len + pkt_status_sz);
  2608. }
  2609. }
  2610. /* TX comp */
  2611. ipa3_wq_write_done_status(src_pipe, tx_pkt);
  2612. IPADBG_LOW("tx comp imp for %d\n", src_pipe);
  2613. } else {
  2614. /* TX comp */
  2615. ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt);
  2616. IPADBG_LOW("tx comp exp for %d\n",
  2617. status.endp_src_idx);
  2618. skb_pull(skb, pkt_status_sz);
  2619. IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
  2620. IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
  2621. [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
  2622. }
  2623. tx_pkt = NULL;
  2624. }
  2625. out:
  2626. ipa3_skb_recycle(skb);
  2627. return 0;
  2628. }
  2629. static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
  2630. struct sk_buff *skb, unsigned int len)
  2631. {
  2632. struct sk_buff *skb2;
  2633. skb2 = skb_copy_expand(prev_skb, 0,
  2634. len, GFP_KERNEL);
  2635. if (likely(skb2)) {
  2636. memcpy(skb_put(skb2, len),
  2637. skb->data, len);
  2638. } else {
  2639. IPAERR("copy expand failed\n");
  2640. skb2 = NULL;
  2641. }
  2642. dev_kfree_skb_any(prev_skb);
  2643. return skb2;
  2644. }
  2645. static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
  2646. struct ipa3_sys_context *sys)
  2647. {
  2648. struct sk_buff *skb2;
  2649. IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
  2650. if (sys->len_rem <= skb->len) {
  2651. if (sys->prev_skb) {
  2652. skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
  2653. sys->len_rem);
  2654. if (likely(skb2)) {
  2655. IPADBG_LOW(
  2656. "removing Status element from skb and sending to WAN client");
  2657. skb_pull(skb2, ipahal_pkt_status_get_size());
  2658. skb2->truesize = skb2->len +
  2659. sizeof(struct sk_buff);
  2660. sys->ep->client_notify(sys->ep->priv,
  2661. IPA_RECEIVE,
  2662. (unsigned long)(skb2));
  2663. }
  2664. }
  2665. skb_pull(skb, sys->len_rem);
  2666. sys->prev_skb = NULL;
  2667. sys->len_rem = 0;
  2668. } else {
  2669. if (sys->prev_skb) {
  2670. skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
  2671. skb->len);
  2672. sys->prev_skb = skb2;
  2673. }
  2674. sys->len_rem -= skb->len;
  2675. skb_pull(skb, skb->len);
  2676. }
  2677. }
  2678. static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
  2679. struct ipa3_sys_context *sys)
  2680. {
  2681. struct ipahal_pkt_status status;
  2682. unsigned char *skb_data;
  2683. u32 pkt_status_sz;
  2684. struct sk_buff *skb2;
  2685. u16 pkt_len_with_pad;
  2686. u32 qmap_hdr;
  2687. int checksum_trailer_exists;
  2688. int frame_len;
  2689. int ep_idx;
  2690. unsigned int used = *(unsigned int *)skb->cb;
  2691. unsigned int used_align = ALIGN(used, 32);
  2692. unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
  2693. IPA_DUMP_BUFF(skb->data, 0, skb->len);
  2694. if (skb->len == 0) {
  2695. IPAERR("ZLT\n");
  2696. goto bail;
  2697. }
  2698. if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) {
  2699. sys->ep->client_notify(sys->ep->priv,
  2700. IPA_RECEIVE, (unsigned long)(skb));
  2701. return 0;
  2702. }
  2703. if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
  2704. IPAERR("Recycle should enable only with GRO Aggr\n");
  2705. ipa_assert();
  2706. }
  2707. /*
  2708. * payload splits across 2 buff or more,
  2709. * take the start of the payload from prev_skb
  2710. */
  2711. if (sys->len_rem)
  2712. ipa3_wan_rx_handle_splt_pyld(skb, sys);
  2713. pkt_status_sz = ipahal_pkt_status_get_size();
  2714. while (skb->len) {
  2715. IPADBG_LOW("LEN_REM %d\n", skb->len);
  2716. if (skb->len < pkt_status_sz) {
  2717. IPAERR("status straddles buffer\n");
  2718. WARN_ON(1);
  2719. goto bail;
  2720. }
  2721. ipahal_pkt_status_parse(skb->data, &status);
  2722. skb_data = skb->data;
  2723. IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
  2724. status.status_opcode, status.endp_src_idx,
  2725. status.endp_dest_idx, status.pkt_len);
  2726. if (sys->status_stat) {
  2727. sys->status_stat->status[sys->status_stat->curr] =
  2728. status;
  2729. sys->status_stat->curr++;
  2730. if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
  2731. sys->status_stat->curr = 0;
  2732. }
  2733. if ((status.status_opcode !=
  2734. IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
  2735. (status.status_opcode !=
  2736. IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
  2737. (status.status_opcode !=
  2738. IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
  2739. IPAERR("unsupported opcode(%d)\n",
  2740. status.status_opcode);
  2741. skb_pull(skb, pkt_status_sz);
  2742. continue;
  2743. }
  2744. IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
  2745. if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
  2746. status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
  2747. IPAERR("status fields invalid\n");
  2748. WARN_ON(1);
  2749. goto bail;
  2750. }
  2751. if (status.pkt_len == 0) {
  2752. IPADBG_LOW("Skip aggr close status\n");
  2753. skb_pull(skb, pkt_status_sz);
  2754. IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
  2755. IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
  2756. continue;
  2757. }
  2758. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
  2759. if (status.endp_dest_idx != ep_idx) {
  2760. IPAERR("expected endp_dest_idx %d received %d\n",
  2761. ep_idx, status.endp_dest_idx);
  2762. WARN_ON(1);
  2763. goto bail;
  2764. }
  2765. /* RX data */
  2766. if (skb->len == pkt_status_sz) {
  2767. IPAERR("Ins header in next buffer\n");
  2768. WARN_ON(1);
  2769. goto bail;
  2770. }
  2771. qmap_hdr = *(u32 *)(skb_data + pkt_status_sz);
  2772. /*
  2773. * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
  2774. * header
  2775. */
  2776. /*QMAP is BE: convert the pkt_len field from BE to LE*/
  2777. pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
  2778. IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
  2779. /*get the CHECKSUM_PROCESS bit*/
  2780. checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
  2781. IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status);
  2782. IPADBG_LOW("checksum_trailer_exists %d\n",
  2783. checksum_trailer_exists);
  2784. frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH +
  2785. pkt_len_with_pad;
  2786. if (checksum_trailer_exists)
  2787. frame_len += IPA_DL_CHECKSUM_LENGTH;
  2788. IPADBG_LOW("frame_len %d\n", frame_len);
  2789. skb2 = skb_clone(skb, GFP_KERNEL);
  2790. if (likely(skb2)) {
  2791. /*
  2792. * the len of actual data is smaller than expected
  2793. * payload split across 2 buff
  2794. */
  2795. if (skb->len < frame_len) {
  2796. IPADBG_LOW("SPL skb len %d len %d\n",
  2797. skb->len, frame_len);
  2798. sys->prev_skb = skb2;
  2799. sys->len_rem = frame_len - skb->len;
  2800. skb_pull(skb, skb->len);
  2801. } else {
  2802. skb_trim(skb2, frame_len);
  2803. IPADBG_LOW("rx avail for %d\n",
  2804. status.endp_dest_idx);
  2805. IPADBG_LOW(
  2806. "removing Status element from skb and sending to WAN client");
  2807. skb_pull(skb2, pkt_status_sz);
  2808. skb2->truesize = skb2->len +
  2809. sizeof(struct sk_buff) +
  2810. (ALIGN(frame_len, 32) *
  2811. unused / used_align);
  2812. sys->ep->client_notify(sys->ep->priv,
  2813. IPA_RECEIVE, (unsigned long)(skb2));
  2814. skb_pull(skb, frame_len);
  2815. }
  2816. } else {
  2817. IPAERR("fail to clone\n");
  2818. if (skb->len < frame_len) {
  2819. sys->prev_skb = NULL;
  2820. sys->len_rem = frame_len - skb->len;
  2821. skb_pull(skb, skb->len);
  2822. } else {
  2823. skb_pull(skb, frame_len);
  2824. }
  2825. }
  2826. }
  2827. bail:
  2828. sys->free_skb(skb);
  2829. return 0;
  2830. }
  2831. static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
  2832. {
  2833. return __dev_alloc_skb(len, flags);
  2834. }
  2835. static void ipa3_free_skb_rx(struct sk_buff *skb)
  2836. {
  2837. dev_kfree_skb_any(skb);
  2838. }
  2839. void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
  2840. {
  2841. struct sk_buff *rx_skb = (struct sk_buff *)data;
  2842. struct ipahal_pkt_status_thin status;
  2843. struct ipa3_ep_context *ep;
  2844. unsigned int src_pipe;
  2845. u32 metadata;
  2846. u8 ucp;
  2847. ipahal_pkt_status_parse_thin(rx_skb->data, &status);
  2848. src_pipe = status.endp_src_idx;
  2849. metadata = status.metadata;
  2850. ucp = status.ucp;
  2851. ep = &ipa3_ctx->ep[src_pipe];
  2852. if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes)) {
  2853. IPAERR_RL("drop pipe=%d\n", src_pipe);
  2854. dev_kfree_skb_any(rx_skb);
  2855. return;
  2856. }
  2857. if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
  2858. skb_pull(rx_skb, ipahal_pkt_status_get_size() +
  2859. IPA_LAN_RX_HEADER_LENGTH);
  2860. else
  2861. skb_pull(rx_skb, ipahal_pkt_status_get_size());
  2862. /* Metadata Info
  2863. * ------------------------------------------
  2864. * | 3 | 2 | 1 | 0 |
  2865. * | fw_desc | vdev_id | qmap mux id | Resv |
  2866. * ------------------------------------------
  2867. */
  2868. *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
  2869. *(u8 *)(rx_skb->cb + 4) = ucp;
  2870. IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
  2871. metadata, *(u32 *)rx_skb->cb);
  2872. IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
  2873. if (likely((!atomic_read(&ep->disconnect_in_progress)) &&
  2874. ep->valid && ep->client_notify))
  2875. ep->client_notify(ep->priv, IPA_RECEIVE,
  2876. (unsigned long)(rx_skb));
  2877. else
  2878. dev_kfree_skb_any(rx_skb);
  2879. }
  2880. static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
  2881. {
  2882. rx_pkt->data.dma_addr = 0;
  2883. /* skb recycle was moved to pyld_hdlr */
  2884. INIT_LIST_HEAD(&rx_pkt->link);
  2885. spin_lock_bh(&rx_pkt->sys->spinlock);
  2886. list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
  2887. spin_unlock_bh(&rx_pkt->sys->spinlock);
  2888. }
  2889. static void ipa3_recycle_rx_page_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
  2890. {
  2891. struct ipa_rx_page_data rx_page;
  2892. rx_page = rx_pkt->page_data;
  2893. /* Free rx_wrapper only for tmp alloc pages*/
  2894. if (rx_page.is_tmp_alloc)
  2895. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  2896. }
  2897. /**
  2898. * handle_skb_completion()- Handle event completion EOB or EOT and prep the skb
  2899. *
  2900. * if eob: Set skb values, put rx_pkt at the end of the list and return NULL
  2901. *
  2902. * if eot: Set skb values, put skb at the end of the list. Then update the
  2903. * length and chain the skbs together while also freeing and unmapping the
  2904. * corresponding rx pkt. Once finished return the head_skb to be sent up the
  2905. * network stack.
  2906. */
  2907. static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
  2908. *notify, bool update_truesize)
  2909. {
  2910. struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
  2911. struct sk_buff *rx_skb, *next_skb = NULL;
  2912. struct list_head *head;
  2913. struct ipa3_sys_context *sys;
  2914. sys = (struct ipa3_sys_context *) notify->chan_user_data;
  2915. rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
  2916. spin_lock_bh(&rx_pkt->sys->spinlock);
  2917. rx_pkt->sys->len--;
  2918. spin_unlock_bh(&rx_pkt->sys->spinlock);
  2919. if (notify->bytes_xfered)
  2920. rx_pkt->len = notify->bytes_xfered;
  2921. /*Drop packets when WAN consumer channel receive EOB event*/
  2922. if ((notify->evt_id == GSI_CHAN_EVT_EOB ||
  2923. sys->skip_eot) &&
  2924. sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
  2925. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  2926. sys->rx_buff_sz, DMA_FROM_DEVICE);
  2927. sys->free_skb(rx_pkt->data.skb);
  2928. sys->free_rx_wrapper(rx_pkt);
  2929. sys->eob_drop_cnt++;
  2930. if (notify->evt_id == GSI_CHAN_EVT_EOB) {
  2931. IPADBG("EOB event on WAN consumer channel, drop\n");
  2932. sys->skip_eot = true;
  2933. } else {
  2934. IPADBG("Reset skip eot flag.\n");
  2935. sys->skip_eot = false;
  2936. }
  2937. return NULL;
  2938. }
  2939. rx_skb = rx_pkt->data.skb;
  2940. skb_set_tail_pointer(rx_skb, rx_pkt->len);
  2941. rx_skb->len = rx_pkt->len;
  2942. if (update_truesize) {
  2943. *(unsigned int *)rx_skb->cb = rx_skb->len;
  2944. rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
  2945. }
  2946. if (notify->veid >= GSI_VEID_MAX) {
  2947. WARN_ON(1);
  2948. return NULL;
  2949. }
  2950. head = &rx_pkt->sys->pending_pkts[notify->veid];
  2951. INIT_LIST_HEAD(&rx_pkt->link);
  2952. list_add_tail(&rx_pkt->link, head);
  2953. /* Check added for handling LAN consumer packet without EOT flag */
  2954. if (notify->evt_id == GSI_CHAN_EVT_EOT ||
  2955. sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
  2956. /* go over the list backward to save computations on updating length */
  2957. list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
  2958. rx_skb = rx_pkt->data.skb;
  2959. list_del(&rx_pkt->link);
  2960. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  2961. sys->rx_buff_sz, DMA_FROM_DEVICE);
  2962. sys->free_rx_wrapper(rx_pkt);
  2963. if (next_skb) {
  2964. skb_shinfo(rx_skb)->frag_list = next_skb;
  2965. rx_skb->len += next_skb->len;
  2966. rx_skb->data_len += next_skb->len;
  2967. }
  2968. next_skb = rx_skb;
  2969. }
  2970. } else {
  2971. return NULL;
  2972. }
  2973. return rx_skb;
  2974. }
  2975. /**
  2976. * handle_page_completion()- Handle event completion EOB or EOT
  2977. * and prep the skb
  2978. *
  2979. * if eob: Set skb values, put rx_pkt at the end of the list and return NULL
  2980. *
  2981. * if eot: Set skb values, put skb at the end of the list. Then update the
  2982. * length and put the page together to the frags while also
  2983. * freeing and unmapping the corresponding rx pkt. Once finished
  2984. * return the head_skb to be sent up the network stack.
  2985. */
  2986. static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
  2987. *notify, bool update_truesize)
  2988. {
  2989. struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
  2990. struct sk_buff *rx_skb;
  2991. struct list_head *head;
  2992. struct ipa3_sys_context *sys;
  2993. struct ipa_rx_page_data rx_page;
  2994. sys = (struct ipa3_sys_context *) notify->chan_user_data;
  2995. rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
  2996. rx_page = rx_pkt->page_data;
  2997. spin_lock_bh(&rx_pkt->sys->spinlock);
  2998. rx_pkt->sys->len--;
  2999. spin_unlock_bh(&rx_pkt->sys->spinlock);
  3000. /* TODO: truesize handle for EOB */
  3001. if (update_truesize)
  3002. IPAERR("update_truesize not supported\n");
  3003. if (notify->veid >= GSI_VEID_MAX) {
  3004. rx_pkt->sys->free_rx_wrapper(rx_pkt);
  3005. if (!rx_page.is_tmp_alloc)
  3006. init_page_count(rx_page.page);
  3007. IPAERR("notify->veid > GSI_VEID_MAX\n");
  3008. return NULL;
  3009. }
  3010. head = &rx_pkt->sys->pending_pkts[notify->veid];
  3011. INIT_LIST_HEAD(&rx_pkt->link);
  3012. list_add_tail(&rx_pkt->link, head);
  3013. /* Check added for handling LAN consumer packet without EOT flag */
  3014. if (notify->evt_id == GSI_CHAN_EVT_EOT ||
  3015. sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
  3016. rx_skb = alloc_skb(0, GFP_ATOMIC);
  3017. if (unlikely(!rx_skb)) {
  3018. rx_pkt->sys->free_rx_wrapper(rx_pkt);
  3019. if (!rx_page.is_tmp_alloc)
  3020. init_page_count(rx_page.page);
  3021. IPAERR("skb alloc failure\n");
  3022. return NULL;
  3023. }
  3024. /* go over the list backward to save computations on updating length */
  3025. list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
  3026. rx_page = rx_pkt->page_data;
  3027. list_del(&rx_pkt->link);
  3028. if (rx_page.is_tmp_alloc)
  3029. dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
  3030. rx_pkt->len, DMA_FROM_DEVICE);
  3031. else
  3032. dma_sync_single_for_cpu(ipa3_ctx->pdev,
  3033. rx_page.dma_addr,
  3034. rx_pkt->len, DMA_FROM_DEVICE);
  3035. rx_pkt->sys->free_rx_wrapper(rx_pkt);
  3036. skb_add_rx_frag(rx_skb,
  3037. skb_shinfo(rx_skb)->nr_frags,
  3038. rx_page.page, 0,
  3039. notify->bytes_xfered,
  3040. PAGE_SIZE << IPA_WAN_PAGE_ORDER);
  3041. }
  3042. } else {
  3043. return NULL;
  3044. }
  3045. return rx_skb;
  3046. }
  3047. static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
  3048. struct gsi_chan_xfer_notify *notify)
  3049. {
  3050. struct sk_buff *rx_skb;
  3051. struct ipa3_sys_context *coal_sys;
  3052. int ipa_ep_idx;
  3053. if (!notify) {
  3054. IPAERR_RL("gsi_chan_xfer_notify is null\n");
  3055. return;
  3056. }
  3057. rx_skb = handle_skb_completion(notify, true);
  3058. if (rx_skb) {
  3059. sys->pyld_hdlr(rx_skb, sys);
  3060. /* For coalescing, we have 2 transfer rings to replenish */
  3061. if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  3062. ipa_ep_idx = ipa3_get_ep_mapping(
  3063. IPA_CLIENT_APPS_WAN_CONS);
  3064. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  3065. IPAERR("Invalid client.\n");
  3066. return;
  3067. }
  3068. coal_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
  3069. coal_sys->repl_hdlr(coal_sys);
  3070. }
  3071. sys->repl_hdlr(sys);
  3072. }
  3073. }
  3074. static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
  3075. struct gsi_chan_xfer_notify *notify, uint32_t num)
  3076. {
  3077. struct ipa3_sys_context *wan_def_sys;
  3078. int i, ipa_ep_idx;
  3079. struct sk_buff *rx_skb, *first_skb = NULL, *prev_skb = NULL;
  3080. /* non-coalescing case (SKB chaining enabled) */
  3081. if (sys->ep->client != IPA_CLIENT_APPS_WAN_COAL_CONS) {
  3082. for (i = 0; i < num; i++) {
  3083. if (!ipa3_ctx->ipa_wan_skb_page)
  3084. rx_skb = handle_skb_completion(
  3085. &notify[i], false);
  3086. else
  3087. rx_skb = handle_page_completion(
  3088. &notify[i], false);
  3089. /* this is always true for EOTs */
  3090. if (rx_skb) {
  3091. if (!first_skb)
  3092. first_skb = rx_skb;
  3093. if (prev_skb)
  3094. skb_shinfo(prev_skb)->frag_list =
  3095. rx_skb;
  3096. prev_skb = rx_skb;
  3097. }
  3098. }
  3099. if (prev_skb) {
  3100. skb_shinfo(prev_skb)->frag_list = NULL;
  3101. sys->pyld_hdlr(first_skb, sys);
  3102. }
  3103. } else {
  3104. if (!ipa3_ctx->ipa_wan_skb_page) {
  3105. /* TODO: add chaining for coal case */
  3106. for (i = 0; i < num; i++) {
  3107. rx_skb = handle_skb_completion(
  3108. &notify[i], false);
  3109. if (rx_skb) {
  3110. sys->pyld_hdlr(rx_skb, sys);
  3111. /*
  3112. * For coalescing, we have 2 transfer
  3113. * rings to replenish
  3114. */
  3115. ipa_ep_idx = ipa3_get_ep_mapping(
  3116. IPA_CLIENT_APPS_WAN_CONS);
  3117. if (ipa_ep_idx ==
  3118. IPA_EP_NOT_ALLOCATED) {
  3119. IPAERR("Invalid client.\n");
  3120. return;
  3121. }
  3122. wan_def_sys =
  3123. ipa3_ctx->ep[ipa_ep_idx].sys;
  3124. wan_def_sys->repl_hdlr(wan_def_sys);
  3125. sys->repl_hdlr(sys);
  3126. }
  3127. }
  3128. } else {
  3129. for (i = 0; i < num; i++) {
  3130. rx_skb = handle_page_completion(
  3131. &notify[i], false);
  3132. /* this is always true for EOTs */
  3133. if (rx_skb) {
  3134. if (!first_skb)
  3135. first_skb = rx_skb;
  3136. if (prev_skb)
  3137. skb_shinfo(prev_skb)->frag_list
  3138. = rx_skb;
  3139. prev_skb = rx_skb;
  3140. }
  3141. }
  3142. if (prev_skb) {
  3143. skb_shinfo(prev_skb)->frag_list = NULL;
  3144. sys->pyld_hdlr(first_skb, sys);
  3145. /*
  3146. * For coalescing, we have 2 transfer
  3147. * rings to replenish
  3148. */
  3149. ipa_ep_idx = ipa3_get_ep_mapping(
  3150. IPA_CLIENT_APPS_WAN_CONS);
  3151. if (ipa_ep_idx ==
  3152. IPA_EP_NOT_ALLOCATED) {
  3153. IPAERR("Invalid client.\n");
  3154. return;
  3155. }
  3156. wan_def_sys =
  3157. ipa3_ctx->ep[ipa_ep_idx].sys;
  3158. wan_def_sys->repl_hdlr(wan_def_sys);
  3159. }
  3160. }
  3161. }
  3162. }
  3163. static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
  3164. struct gsi_chan_xfer_notify *notify)
  3165. {
  3166. struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
  3167. struct sk_buff *rx_skb;
  3168. rx_pkt_expected = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
  3169. sys->len--;
  3170. if (notify->bytes_xfered)
  3171. rx_pkt_expected->len = notify->bytes_xfered;
  3172. rx_skb = rx_pkt_expected->data.skb;
  3173. skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
  3174. rx_skb->len = rx_pkt_expected->len;
  3175. rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
  3176. sys->ep->wstats.tx_pkts_rcvd++;
  3177. if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
  3178. ipa3_free_skb(&rx_pkt_expected->data);
  3179. sys->ep->wstats.tx_pkts_dropped++;
  3180. } else {
  3181. sys->ep->wstats.tx_pkts_sent++;
  3182. sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
  3183. (unsigned long)(&rx_pkt_expected->data));
  3184. }
  3185. ipa3_replenish_wlan_rx_cache(sys);
  3186. }
  3187. static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys)
  3188. {
  3189. IPADBG_LOW("ENTER.\n");
  3190. if (unlikely(list_empty(&sys->head_desc_list))) {
  3191. IPAERR("descriptor list is empty!\n");
  3192. WARN_ON(1);
  3193. return;
  3194. }
  3195. sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, 0);
  3196. IPADBG_LOW("EXIT\n");
  3197. }
  3198. static void ipa3_wq_rx_avail(struct work_struct *work)
  3199. {
  3200. struct ipa3_rx_pkt_wrapper *rx_pkt;
  3201. struct ipa3_sys_context *sys;
  3202. rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work);
  3203. WARN(unlikely(rx_pkt == NULL), "rx pkt is null");
  3204. sys = rx_pkt->sys;
  3205. ipa3_wq_rx_common(sys, 0);
  3206. }
  3207. static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
  3208. struct ipa3_sys_context *sys)
  3209. {
  3210. if (sys->ep->client_notify) {
  3211. sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
  3212. (unsigned long)(rx_skb));
  3213. } else {
  3214. dev_kfree_skb_any(rx_skb);
  3215. WARN(1, "client notify is null");
  3216. }
  3217. return 0;
  3218. }
  3219. static int ipa3_odl_dpl_rx_pyld_hdlr(struct sk_buff *rx_skb,
  3220. struct ipa3_sys_context *sys)
  3221. {
  3222. if (WARN(!sys->ep->client_notify, "sys->ep->client_notify is NULL\n")) {
  3223. dev_kfree_skb_any(rx_skb);
  3224. } else {
  3225. sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
  3226. (unsigned long)(rx_skb));
  3227. /*Recycle the SKB before reusing it*/
  3228. ipa3_skb_recycle(rx_skb);
  3229. }
  3230. return 0;
  3231. }
  3232. static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
  3233. {
  3234. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
  3235. }
  3236. static void ipa3_set_aggr_limit(struct ipa_sys_connect_params *in,
  3237. struct ipa3_sys_context *sys)
  3238. {
  3239. u32 *aggr_byte_limit = &in->ipa_ep_cfg.aggr.aggr_byte_limit;
  3240. u32 adjusted_sz = ipa_adjust_ra_buff_base_sz(*aggr_byte_limit);
  3241. IPADBG("get close-by %u\n", adjusted_sz);
  3242. IPADBG("set rx_buff_sz %lu\n", (unsigned long)
  3243. IPA_GENERIC_RX_BUFF_SZ(adjusted_sz));
  3244. /* disable ipa_status */
  3245. sys->ep->status.status_en = false;
  3246. sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(adjusted_sz);
  3247. if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  3248. in->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
  3249. *aggr_byte_limit = sys->rx_buff_sz < *aggr_byte_limit ?
  3250. IPA_ADJUST_AGGR_BYTE_LIMIT(sys->rx_buff_sz) :
  3251. IPA_ADJUST_AGGR_BYTE_LIMIT(*aggr_byte_limit);
  3252. IPADBG("set aggr_limit %lu\n", (unsigned long) *aggr_byte_limit);
  3253. }
  3254. static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
  3255. struct ipa3_sys_context *sys)
  3256. {
  3257. bool apps_wan_cons_agg_gro_flag;
  3258. unsigned long aggr_byte_limit;
  3259. if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
  3260. sys->policy = IPA_POLICY_INTR_MODE;
  3261. sys->use_comm_evt_ring = false;
  3262. return 0;
  3263. }
  3264. if (in->client == IPA_CLIENT_APPS_WAN_PROD) {
  3265. sys->policy = IPA_POLICY_INTR_MODE;
  3266. sys->use_comm_evt_ring = true;
  3267. INIT_WORK(&sys->work, ipa3_send_nop_desc);
  3268. atomic_set(&sys->workqueue_flushed, 0);
  3269. /*
  3270. * enable source notification status for exception packets
  3271. * (i.e. QMAP commands) to be routed to modem.
  3272. */
  3273. sys->ep->status.status_en = true;
  3274. sys->ep->status.status_ep =
  3275. ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS);
  3276. return 0;
  3277. }
  3278. if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
  3279. sys->policy = IPA_POLICY_NOINTR_MODE;
  3280. return 0;
  3281. }
  3282. apps_wan_cons_agg_gro_flag =
  3283. ipa3_ctx->ipa_client_apps_wan_cons_agg_gro;
  3284. aggr_byte_limit = in->ipa_ep_cfg.aggr.aggr_byte_limit;
  3285. if (IPA_CLIENT_IS_PROD(in->client)) {
  3286. if (sys->ep->skip_ep_cfg) {
  3287. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  3288. sys->use_comm_evt_ring = true;
  3289. atomic_set(&sys->curr_polling_state, 0);
  3290. } else {
  3291. sys->policy = IPA_POLICY_INTR_MODE;
  3292. sys->use_comm_evt_ring = true;
  3293. INIT_WORK(&sys->work, ipa3_send_nop_desc);
  3294. atomic_set(&sys->workqueue_flushed, 0);
  3295. }
  3296. } else {
  3297. if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
  3298. in->client == IPA_CLIENT_APPS_WAN_CONS ||
  3299. in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  3300. sys->ep->status.status_en = true;
  3301. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  3302. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  3303. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  3304. ipa3_switch_to_intr_rx_work_func);
  3305. INIT_DELAYED_WORK(&sys->replenish_rx_work,
  3306. ipa3_replenish_rx_work_func);
  3307. atomic_set(&sys->curr_polling_state, 0);
  3308. sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
  3309. IPA_GENERIC_RX_BUFF_BASE_SZ);
  3310. sys->get_skb = ipa3_get_skb_ipa_rx;
  3311. sys->free_skb = ipa3_free_skb_rx;
  3312. in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
  3313. if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  3314. in->ipa_ep_cfg.aggr.aggr = IPA_COALESCE;
  3315. else
  3316. in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
  3317. in->ipa_ep_cfg.aggr.aggr_time_limit =
  3318. IPA_GENERIC_AGGR_TIME_LIMIT;
  3319. if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
  3320. INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
  3321. sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
  3322. sys->repl_hdlr =
  3323. ipa3_replenish_rx_cache_recycle;
  3324. sys->free_rx_wrapper =
  3325. ipa3_recycle_rx_wrapper;
  3326. sys->rx_pool_sz =
  3327. ipa3_ctx->lan_rx_ring_size;
  3328. in->ipa_ep_cfg.aggr.aggr_byte_limit =
  3329. IPA_GENERIC_AGGR_BYTE_LIMIT;
  3330. in->ipa_ep_cfg.aggr.aggr_pkt_limit =
  3331. IPA_GENERIC_AGGR_PKT_LIMIT;
  3332. } else if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
  3333. in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  3334. if (ipa3_ctx->ipa_wan_skb_page
  3335. && in->napi_obj) {
  3336. INIT_WORK(&sys->repl_work,
  3337. ipa3_wq_page_repl);
  3338. sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
  3339. sys->free_rx_wrapper =
  3340. ipa3_recycle_rx_page_wrapper;
  3341. sys->repl_hdlr =
  3342. ipa3_replenish_rx_page_recycle;
  3343. sys->rx_pool_sz =
  3344. ipa3_ctx->wan_rx_ring_size;
  3345. } else {
  3346. INIT_WORK(&sys->repl_work,
  3347. ipa3_wq_repl_rx);
  3348. sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
  3349. sys->free_rx_wrapper =
  3350. ipa3_free_rx_wrapper;
  3351. sys->rx_pool_sz =
  3352. ipa3_ctx->wan_rx_ring_size;
  3353. if (nr_cpu_ids > 1) {
  3354. sys->repl_hdlr =
  3355. ipa3_fast_replenish_rx_cache;
  3356. } else {
  3357. sys->repl_hdlr =
  3358. ipa3_replenish_rx_cache;
  3359. }
  3360. if (in->napi_obj && in->recycle_enabled)
  3361. sys->repl_hdlr =
  3362. ipa3_replenish_rx_cache_recycle;
  3363. }
  3364. in->ipa_ep_cfg.aggr.aggr_sw_eof_active
  3365. = true;
  3366. if (apps_wan_cons_agg_gro_flag)
  3367. ipa3_set_aggr_limit(in, sys);
  3368. else {
  3369. in->ipa_ep_cfg.aggr.aggr_byte_limit
  3370. = IPA_GENERIC_AGGR_BYTE_LIMIT;
  3371. in->ipa_ep_cfg.aggr.aggr_pkt_limit
  3372. = IPA_GENERIC_AGGR_PKT_LIMIT;
  3373. }
  3374. }
  3375. } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
  3376. IPADBG("assigning policy to client:%d",
  3377. in->client);
  3378. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  3379. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  3380. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  3381. ipa3_switch_to_intr_rx_work_func);
  3382. INIT_DELAYED_WORK(&sys->replenish_rx_work,
  3383. ipa3_replenish_rx_work_func);
  3384. atomic_set(&sys->curr_polling_state, 0);
  3385. sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
  3386. sys->rx_pool_sz = in->desc_fifo_sz /
  3387. IPA_FIFO_ELEMENT_SIZE - 1;
  3388. if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
  3389. sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
  3390. sys->pyld_hdlr = NULL;
  3391. sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
  3392. sys->get_skb = ipa3_get_skb_ipa_rx;
  3393. sys->free_skb = ipa3_free_skb_rx;
  3394. sys->free_rx_wrapper = ipa3_free_rx_wrapper;
  3395. in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
  3396. } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
  3397. IPADBG("assigning policy to client:%d",
  3398. in->client);
  3399. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  3400. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  3401. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  3402. ipa3_switch_to_intr_rx_work_func);
  3403. INIT_DELAYED_WORK(&sys->replenish_rx_work,
  3404. ipa3_replenish_rx_work_func);
  3405. atomic_set(&sys->curr_polling_state, 0);
  3406. sys->rx_pool_sz = in->desc_fifo_sz /
  3407. IPA_FIFO_ELEMENT_SIZE - 1;
  3408. if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
  3409. sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
  3410. sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
  3411. sys->get_skb = ipa3_get_skb_ipa_rx;
  3412. sys->free_skb = ipa3_free_skb_rx;
  3413. /* recycle skb for GSB use case */
  3414. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  3415. sys->free_rx_wrapper =
  3416. ipa3_free_rx_wrapper;
  3417. sys->repl_hdlr =
  3418. ipa3_replenish_rx_cache;
  3419. /* Overwrite buffer size & aggr limit for GSB */
  3420. sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
  3421. IPA_GSB_RX_BUFF_BASE_SZ);
  3422. in->ipa_ep_cfg.aggr.aggr_byte_limit =
  3423. IPA_GSB_AGGR_BYTE_LIMIT;
  3424. } else {
  3425. sys->free_rx_wrapper =
  3426. ipa3_free_rx_wrapper;
  3427. sys->repl_hdlr = ipa3_replenish_rx_cache;
  3428. sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
  3429. }
  3430. } else if (in->client ==
  3431. IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
  3432. IPADBG("assigning policy to client:%d",
  3433. in->client);
  3434. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  3435. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  3436. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  3437. ipa3_switch_to_intr_rx_work_func);
  3438. } else if (in->client ==
  3439. IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
  3440. IPADBG("assigning policy to client:%d",
  3441. in->client);
  3442. sys->policy = IPA_POLICY_NOINTR_MODE;
  3443. } else if (in->client == IPA_CLIENT_ODL_DPL_CONS) {
  3444. IPADBG("assigning policy to ODL client:%d\n",
  3445. in->client);
  3446. /* Status enabling is needed for DPLv2 with
  3447. * IPA versions < 4.5.
  3448. * Dont enable ipa_status for APQ, since MDM IPA
  3449. * has IPA >= 4.5 with DPLv3.
  3450. */
  3451. if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
  3452. ipa3_is_mhip_offload_enabled())
  3453. sys->ep->status.status_en = false;
  3454. else
  3455. sys->ep->status.status_en = true;
  3456. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  3457. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  3458. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  3459. ipa3_switch_to_intr_rx_work_func);
  3460. INIT_DELAYED_WORK(&sys->replenish_rx_work,
  3461. ipa3_replenish_rx_work_func);
  3462. atomic_set(&sys->curr_polling_state, 0);
  3463. sys->rx_buff_sz =
  3464. IPA_GENERIC_RX_BUFF_SZ(IPA_ODL_RX_BUFF_SZ);
  3465. sys->pyld_hdlr = ipa3_odl_dpl_rx_pyld_hdlr;
  3466. sys->get_skb = ipa3_get_skb_ipa_rx;
  3467. sys->free_skb = ipa3_free_skb_rx;
  3468. sys->free_rx_wrapper = ipa3_recycle_rx_wrapper;
  3469. sys->repl_hdlr = ipa3_replenish_rx_cache_recycle;
  3470. sys->rx_pool_sz = in->desc_fifo_sz /
  3471. IPA_FIFO_ELEMENT_SIZE - 1;
  3472. } else {
  3473. WARN(1, "Need to install a RX pipe hdlr\n");
  3474. return -EINVAL;
  3475. }
  3476. }
  3477. return 0;
  3478. }
  3479. /**
  3480. * ipa3_tx_client_rx_notify_release() - Callback function
  3481. * which will call the user supplied callback function to
  3482. * release the skb, or release it on its own if no callback
  3483. * function was supplied
  3484. *
  3485. * @user1: [in] - Data Descriptor
  3486. * @user2: [in] - endpoint idx
  3487. *
  3488. * This notified callback is for the destination client
  3489. * This function is supplied in ipa3_tx_dp_mul
  3490. */
  3491. static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
  3492. {
  3493. struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
  3494. int ep_idx = user2;
  3495. IPADBG_LOW("Received data desc anchor:%pK\n", dd);
  3496. atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
  3497. ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
  3498. /* wlan host driver waits till tx complete before unload */
  3499. IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
  3500. ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
  3501. IPADBG_LOW("calling client notify callback with priv:%pK\n",
  3502. ipa3_ctx->ep[ep_idx].priv);
  3503. if (ipa3_ctx->ep[ep_idx].client_notify) {
  3504. ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
  3505. IPA_WRITE_DONE, (unsigned long)user1);
  3506. ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++;
  3507. }
  3508. }
  3509. /**
  3510. * ipa3_tx_client_rx_pkt_status() - Callback function
  3511. * which will call the user supplied callback function to
  3512. * increase the available fifo descriptor
  3513. *
  3514. * @user1: [in] - Data Descriptor
  3515. * @user2: [in] - endpoint idx
  3516. *
  3517. * This notified callback is for the destination client
  3518. * This function is supplied in ipa3_tx_dp_mul
  3519. */
  3520. static void ipa3_tx_client_rx_pkt_status(void *user1, int user2)
  3521. {
  3522. int ep_idx = user2;
  3523. atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
  3524. ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
  3525. }
  3526. /**
  3527. * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets
  3528. * @src: [in] - Client that is sending data
  3529. * @ipa_tx_data_desc: [in] data descriptors from wlan
  3530. *
  3531. * this is used for to transfer data descriptors that received
  3532. * from WLAN1_PROD pipe to IPA HW
  3533. *
  3534. * The function will send data descriptors from WLAN1_PROD (one
  3535. * at a time). Will set EOT flag for last descriptor Once this send was done
  3536. * from transport point-of-view the IPA driver will get notified by the
  3537. * supplied callback - ipa_gsi_irq_tx_notify_cb()
  3538. *
  3539. * ipa_gsi_irq_tx_notify_cb will call to the user supplied callback
  3540. *
  3541. * Returns: 0 on success, negative on failure
  3542. */
  3543. int ipa3_tx_dp_mul(enum ipa_client_type src,
  3544. struct ipa_tx_data_desc *data_desc)
  3545. {
  3546. /* The second byte in wlan header holds qmap id */
  3547. #define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
  3548. struct ipa_tx_data_desc *entry;
  3549. struct ipa3_sys_context *sys;
  3550. struct ipa3_desc desc[2];
  3551. u32 num_desc, cnt;
  3552. int ep_idx;
  3553. IPADBG_LOW("Received data desc anchor:%pK\n", data_desc);
  3554. spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
  3555. ep_idx = ipa3_get_ep_mapping(src);
  3556. if (unlikely(ep_idx == -1)) {
  3557. IPAERR("dest EP does not exist.\n");
  3558. goto fail_send;
  3559. }
  3560. IPADBG_LOW("ep idx:%d\n", ep_idx);
  3561. sys = ipa3_ctx->ep[ep_idx].sys;
  3562. if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
  3563. IPAERR("dest EP not valid.\n");
  3564. goto fail_send;
  3565. }
  3566. sys->ep->wstats.rx_hd_rcvd++;
  3567. /* Calculate the number of descriptors */
  3568. num_desc = 0;
  3569. list_for_each_entry(entry, &data_desc->link, link) {
  3570. num_desc++;
  3571. }
  3572. IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
  3573. if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
  3574. IPAERR("Insufficient data descriptors available\n");
  3575. goto fail_send;
  3576. }
  3577. /* Assign callback only for last data descriptor */
  3578. cnt = 0;
  3579. list_for_each_entry(entry, &data_desc->link, link) {
  3580. memset(desc, 0, 2 * sizeof(struct ipa3_desc));
  3581. IPADBG_LOW("Parsing data desc :%d\n", cnt);
  3582. cnt++;
  3583. ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
  3584. (u8)sys->ep->cfg.meta.qmap_id;
  3585. /* the tag field will be populated in ipa3_send() function */
  3586. desc[0].is_tag_status = true;
  3587. desc[1].pyld = entry->pyld_buffer;
  3588. desc[1].len = entry->pyld_len;
  3589. desc[1].type = IPA_DATA_DESC_SKB;
  3590. desc[1].user1 = data_desc;
  3591. desc[1].user2 = ep_idx;
  3592. IPADBG_LOW("priv:%pK pyld_buf:0x%pK pyld_len:%d\n",
  3593. entry->priv, desc[1].pyld, desc[1].len);
  3594. /* In case of last descriptor populate callback */
  3595. if (cnt == num_desc) {
  3596. IPADBG_LOW("data desc:%pK\n", data_desc);
  3597. desc[1].callback = ipa3_tx_client_rx_notify_release;
  3598. } else {
  3599. desc[1].callback = ipa3_tx_client_rx_pkt_status;
  3600. }
  3601. IPADBG_LOW("calling ipa3_send()\n");
  3602. if (ipa3_send(sys, 2, desc, true)) {
  3603. IPAERR("fail to send skb\n");
  3604. sys->ep->wstats.rx_pkt_leak += (cnt-1);
  3605. sys->ep->wstats.rx_dp_fail++;
  3606. goto fail_send;
  3607. }
  3608. if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
  3609. atomic_dec(&sys->ep->avail_fifo_desc);
  3610. sys->ep->wstats.rx_pkts_rcvd++;
  3611. IPADBG_LOW("ep=%d fifo desc=%d\n",
  3612. ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
  3613. }
  3614. sys->ep->wstats.rx_hd_processed++;
  3615. spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
  3616. return 0;
  3617. fail_send:
  3618. spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
  3619. return -EFAULT;
  3620. }
  3621. void ipa3_free_skb(struct ipa_rx_data *data)
  3622. {
  3623. struct ipa3_rx_pkt_wrapper *rx_pkt;
  3624. spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  3625. ipa3_ctx->wc_memb.total_tx_pkts_freed++;
  3626. rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data);
  3627. ipa3_skb_recycle(rx_pkt->data.skb);
  3628. (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
  3629. list_add_tail(&rx_pkt->link,
  3630. &ipa3_ctx->wc_memb.wlan_comm_desc_list);
  3631. ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
  3632. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  3633. }
  3634. /* Functions added to support kernel tests */
  3635. int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
  3636. unsigned long *ipa_transport_hdl,
  3637. u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
  3638. {
  3639. struct ipa3_ep_context *ep;
  3640. int ipa_ep_idx;
  3641. int result = -EINVAL;
  3642. if (sys_in == NULL || clnt_hdl == NULL) {
  3643. IPAERR("NULL args\n");
  3644. goto fail_gen;
  3645. }
  3646. if (ipa_transport_hdl == NULL || ipa_pipe_num == NULL) {
  3647. IPAERR("NULL args\n");
  3648. goto fail_gen;
  3649. }
  3650. if (sys_in->client >= IPA_CLIENT_MAX) {
  3651. IPAERR("bad parm client:%d\n", sys_in->client);
  3652. goto fail_gen;
  3653. }
  3654. ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
  3655. if (ipa_ep_idx == -1) {
  3656. IPAERR("Invalid client :%d\n", sys_in->client);
  3657. goto fail_gen;
  3658. }
  3659. ep = &ipa3_ctx->ep[ipa_ep_idx];
  3660. IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
  3661. if (ep->valid == 1) {
  3662. if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) {
  3663. IPAERR("EP %d already allocated\n", ipa_ep_idx);
  3664. goto fail_and_disable_clocks;
  3665. } else {
  3666. if (ipa3_cfg_ep_hdr(ipa_ep_idx,
  3667. &sys_in->ipa_ep_cfg.hdr)) {
  3668. IPAERR("fail to configure hdr prop of EP %d\n",
  3669. ipa_ep_idx);
  3670. result = -EFAULT;
  3671. goto fail_and_disable_clocks;
  3672. }
  3673. if (ipa3_cfg_ep_hdr_ext(ipa_ep_idx,
  3674. &sys_in->ipa_ep_cfg.hdr_ext)) {
  3675. IPAERR("fail config hdr_ext prop of EP %d\n",
  3676. ipa_ep_idx);
  3677. result = -EFAULT;
  3678. goto fail_and_disable_clocks;
  3679. }
  3680. if (ipa3_cfg_ep_cfg(ipa_ep_idx,
  3681. &sys_in->ipa_ep_cfg.cfg)) {
  3682. IPAERR("fail to configure cfg prop of EP %d\n",
  3683. ipa_ep_idx);
  3684. result = -EFAULT;
  3685. goto fail_and_disable_clocks;
  3686. }
  3687. IPAERR("client %d (ep: %d) overlay ok sys=%pK\n",
  3688. sys_in->client, ipa_ep_idx, ep->sys);
  3689. ep->client_notify = sys_in->notify;
  3690. ep->priv = sys_in->priv;
  3691. *clnt_hdl = ipa_ep_idx;
  3692. if (!ep->keep_ipa_awake)
  3693. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  3694. return 0;
  3695. }
  3696. }
  3697. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  3698. ep->valid = 1;
  3699. ep->client = sys_in->client;
  3700. ep->client_notify = sys_in->notify;
  3701. ep->priv = sys_in->priv;
  3702. ep->keep_ipa_awake = true;
  3703. if (en_status) {
  3704. ep->status.status_en = true;
  3705. ep->status.status_ep = ipa_ep_idx;
  3706. }
  3707. result = ipa3_enable_data_path(ipa_ep_idx);
  3708. if (result) {
  3709. IPAERR("enable data path failed res=%d clnt=%d.\n",
  3710. result, ipa_ep_idx);
  3711. goto fail_gen2;
  3712. }
  3713. if (!ep->skip_ep_cfg) {
  3714. if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
  3715. IPAERR("fail to configure EP.\n");
  3716. goto fail_gen2;
  3717. }
  3718. if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
  3719. IPAERR("fail to configure status of EP.\n");
  3720. goto fail_gen2;
  3721. }
  3722. IPADBG("ep configuration successful\n");
  3723. } else {
  3724. IPADBG("skipping ep configuration\n");
  3725. }
  3726. *clnt_hdl = ipa_ep_idx;
  3727. *ipa_pipe_num = ipa_ep_idx;
  3728. *ipa_transport_hdl = ipa3_ctx->gsi_dev_hdl;
  3729. if (!ep->keep_ipa_awake)
  3730. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  3731. ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
  3732. IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client,
  3733. ipa_ep_idx, ep->sys);
  3734. return 0;
  3735. fail_gen2:
  3736. fail_and_disable_clocks:
  3737. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  3738. fail_gen:
  3739. return result;
  3740. }
  3741. int ipa3_sys_teardown(u32 clnt_hdl)
  3742. {
  3743. struct ipa3_ep_context *ep;
  3744. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  3745. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  3746. IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
  3747. return -EINVAL;
  3748. }
  3749. ep = &ipa3_ctx->ep[clnt_hdl];
  3750. if (!ep->keep_ipa_awake)
  3751. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  3752. ipa3_disable_data_path(clnt_hdl);
  3753. ep->valid = 0;
  3754. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  3755. IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
  3756. return 0;
  3757. }
  3758. int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
  3759. unsigned long gsi_ev_hdl)
  3760. {
  3761. struct ipa3_ep_context *ep;
  3762. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  3763. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  3764. IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
  3765. return -EINVAL;
  3766. }
  3767. ep = &ipa3_ctx->ep[clnt_hdl];
  3768. ep->gsi_chan_hdl = gsi_ch_hdl;
  3769. ep->gsi_evt_ring_hdl = gsi_ev_hdl;
  3770. return 0;
  3771. }
  3772. static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
  3773. {
  3774. switch (notify->evt_id) {
  3775. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  3776. IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  3777. break;
  3778. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  3779. IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  3780. break;
  3781. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  3782. IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  3783. break;
  3784. case GSI_EVT_EVT_RING_EMPTY_ERR:
  3785. IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
  3786. break;
  3787. default:
  3788. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  3789. }
  3790. }
  3791. static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
  3792. {
  3793. switch (notify->evt_id) {
  3794. case GSI_CHAN_INVALID_TRE_ERR:
  3795. IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
  3796. break;
  3797. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  3798. IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  3799. break;
  3800. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  3801. IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  3802. break;
  3803. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  3804. IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  3805. break;
  3806. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  3807. IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  3808. break;
  3809. case GSI_CHAN_HWO_1_ERR:
  3810. IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
  3811. break;
  3812. default:
  3813. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  3814. }
  3815. }
  3816. static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
  3817. {
  3818. struct ipa3_tx_pkt_wrapper *tx_pkt;
  3819. IPADBG_LOW("event %d notified\n", notify->evt_id);
  3820. switch (notify->evt_id) {
  3821. case GSI_CHAN_EVT_EOT:
  3822. atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
  3823. tx_pkt = notify->xfer_user_data;
  3824. tx_pkt->xmit_done = true;
  3825. atomic_inc(&tx_pkt->sys->xmit_eot_cnt);
  3826. tasklet_schedule(&tx_pkt->sys->tasklet);
  3827. break;
  3828. default:
  3829. IPAERR("received unexpected event id %d\n", notify->evt_id);
  3830. }
  3831. }
  3832. void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
  3833. {
  3834. bool clk_off;
  3835. atomic_set(&sys->curr_polling_state, 1);
  3836. __ipa3_update_curr_poll_state(sys->ep->client, 1);
  3837. ipa3_inc_acquire_wakelock();
  3838. /*
  3839. * pm deactivate is done in wq context
  3840. * or after NAPI poll
  3841. */
  3842. clk_off = ipa_pm_activate(sys->pm_hdl);
  3843. if (!clk_off && sys->napi_obj) {
  3844. napi_schedule(sys->napi_obj);
  3845. return;
  3846. }
  3847. queue_work(sys->wq, &sys->work);
  3848. return;
  3849. }
  3850. static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
  3851. {
  3852. struct ipa3_sys_context *sys;
  3853. if (!notify) {
  3854. IPAERR("gsi notify is NULL.\n");
  3855. return;
  3856. }
  3857. IPADBG_LOW("event %d notified\n", notify->evt_id);
  3858. sys = (struct ipa3_sys_context *)notify->chan_user_data;
  3859. sys->ep->xfer_notify_valid = true;
  3860. sys->ep->xfer_notify = *notify;
  3861. switch (notify->evt_id) {
  3862. case GSI_CHAN_EVT_EOT:
  3863. case GSI_CHAN_EVT_EOB:
  3864. atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
  3865. if (!atomic_read(&sys->curr_polling_state)) {
  3866. /* put the gsi channel into polling mode */
  3867. gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
  3868. GSI_CHAN_MODE_POLL);
  3869. __ipa_gsi_irq_rx_scedule_poll(sys);
  3870. }
  3871. break;
  3872. default:
  3873. IPAERR("received unexpected event id %d\n", notify->evt_id);
  3874. }
  3875. }
  3876. static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
  3877. {
  3878. struct ipa3_sys_context *sys;
  3879. if (!notify) {
  3880. IPAERR("gsi notify is NULL.\n");
  3881. return;
  3882. }
  3883. IPADBG_LOW("event %d notified\n", notify->evt_id);
  3884. sys = (struct ipa3_sys_context *)notify->chan_user_data;
  3885. if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
  3886. IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
  3887. return;
  3888. }
  3889. sys->ep->xfer_notify_valid = true;
  3890. sys->ep->xfer_notify = *notify;
  3891. switch (notify->evt_id) {
  3892. case GSI_CHAN_EVT_EOT:
  3893. if (!atomic_read(&sys->curr_polling_state)) {
  3894. /* put the gsi channel into polling mode */
  3895. gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
  3896. GSI_CHAN_MODE_POLL);
  3897. ipa3_inc_acquire_wakelock();
  3898. atomic_set(&sys->curr_polling_state, 1);
  3899. queue_work(sys->wq, &sys->work);
  3900. }
  3901. break;
  3902. default:
  3903. IPAERR("received unexpected event id %d\n", notify->evt_id);
  3904. }
  3905. }
  3906. int ipa3_alloc_common_event_ring(void)
  3907. {
  3908. struct gsi_evt_ring_props gsi_evt_ring_props;
  3909. dma_addr_t evt_dma_addr;
  3910. int result;
  3911. memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
  3912. gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
  3913. gsi_evt_ring_props.intr = GSI_INTR_IRQ;
  3914. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  3915. gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE;
  3916. gsi_evt_ring_props.ring_base_vaddr =
  3917. dma_alloc_coherent(ipa3_ctx->pdev,
  3918. gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
  3919. if (!gsi_evt_ring_props.ring_base_vaddr) {
  3920. IPAERR("fail to dma alloc %u bytes\n",
  3921. gsi_evt_ring_props.ring_len);
  3922. return -ENOMEM;
  3923. }
  3924. gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
  3925. gsi_evt_ring_props.int_modt = 0;
  3926. gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
  3927. gsi_evt_ring_props.rp_update_addr = 0;
  3928. gsi_evt_ring_props.exclusive = false;
  3929. gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
  3930. gsi_evt_ring_props.user_data = NULL;
  3931. result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
  3932. ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
  3933. if (result) {
  3934. IPAERR("gsi_alloc_evt_ring failed %d\n", result);
  3935. return result;
  3936. }
  3937. ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
  3938. return 0;
  3939. }
  3940. static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
  3941. struct ipa3_ep_context *ep)
  3942. {
  3943. u32 ring_size;
  3944. int result;
  3945. gfp_t mem_flag = GFP_KERNEL;
  3946. u32 coale_ep_idx;
  3947. if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
  3948. in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
  3949. in->client == IPA_CLIENT_APPS_WAN_PROD)
  3950. mem_flag = GFP_ATOMIC;
  3951. if (!ep) {
  3952. IPAERR("EP context is empty\n");
  3953. return -EINVAL;
  3954. }
  3955. coale_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  3956. /*
  3957. * GSI ring length is calculated based on the desc_fifo_sz
  3958. * which was meant to define the BAM desc fifo. GSI descriptors
  3959. * are 16B as opposed to 8B for BAM.
  3960. */
  3961. ring_size = 2 * in->desc_fifo_sz;
  3962. ep->gsi_evt_ring_hdl = ~0;
  3963. if (ep->sys->use_comm_evt_ring) {
  3964. if (ipa3_ctx->gsi_evt_comm_ring_rem < ring_size) {
  3965. IPAERR("not enough space in common event ring\n");
  3966. IPAERR("available: %d needed: %d\n",
  3967. ipa3_ctx->gsi_evt_comm_ring_rem,
  3968. ring_size);
  3969. WARN_ON(1);
  3970. return -EFAULT;
  3971. }
  3972. ipa3_ctx->gsi_evt_comm_ring_rem -= (ring_size);
  3973. ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
  3974. } else if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  3975. result = ipa_gsi_setup_event_ring(ep,
  3976. IPA_COMMON_EVENT_RING_SIZE, mem_flag);
  3977. if (result)
  3978. goto fail_setup_event_ring;
  3979. } else if (in->client == IPA_CLIENT_APPS_WAN_CONS &&
  3980. coale_ep_idx != IPA_EP_NOT_ALLOCATED &&
  3981. ipa3_ctx->ep[coale_ep_idx].valid == 1) {
  3982. IPADBG("Wan consumer pipe configured\n");
  3983. result = ipa_gsi_setup_coal_def_channel(in, ep,
  3984. &ipa3_ctx->ep[coale_ep_idx]);
  3985. if (result) {
  3986. IPAERR("Failed to setup default coal GSI channel\n");
  3987. goto fail_setup_event_ring;
  3988. }
  3989. return result;
  3990. } else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
  3991. IPA_CLIENT_IS_CONS(ep->client)) {
  3992. result = ipa_gsi_setup_event_ring(ep, ring_size, mem_flag);
  3993. if (result)
  3994. goto fail_setup_event_ring;
  3995. }
  3996. result = ipa_gsi_setup_transfer_ring(ep, ring_size,
  3997. ep->sys, mem_flag);
  3998. if (result)
  3999. goto fail_setup_transfer_ring;
  4000. if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
  4001. gsi_config_channel_mode(ep->gsi_chan_hdl,
  4002. GSI_CHAN_MODE_POLL);
  4003. return 0;
  4004. fail_setup_transfer_ring:
  4005. if (ep->gsi_mem_info.evt_ring_base_vaddr)
  4006. dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.evt_ring_len,
  4007. ep->gsi_mem_info.evt_ring_base_vaddr,
  4008. ep->gsi_mem_info.evt_ring_base_addr);
  4009. fail_setup_event_ring:
  4010. IPAERR("Return with err: %d\n", result);
  4011. return result;
  4012. }
  4013. static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
  4014. u32 ring_size, gfp_t mem_flag)
  4015. {
  4016. struct gsi_evt_ring_props gsi_evt_ring_props;
  4017. dma_addr_t evt_dma_addr;
  4018. int result;
  4019. evt_dma_addr = 0;
  4020. memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
  4021. gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
  4022. gsi_evt_ring_props.intr = GSI_INTR_IRQ;
  4023. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  4024. gsi_evt_ring_props.ring_len = ring_size;
  4025. gsi_evt_ring_props.ring_base_vaddr =
  4026. dma_alloc_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
  4027. &evt_dma_addr, mem_flag);
  4028. if (!gsi_evt_ring_props.ring_base_vaddr) {
  4029. IPAERR("fail to dma alloc %u bytes\n",
  4030. gsi_evt_ring_props.ring_len);
  4031. return -ENOMEM;
  4032. }
  4033. gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
  4034. /* copy mem info */
  4035. ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
  4036. ep->gsi_mem_info.evt_ring_base_addr =
  4037. gsi_evt_ring_props.ring_base_addr;
  4038. ep->gsi_mem_info.evt_ring_base_vaddr =
  4039. gsi_evt_ring_props.ring_base_vaddr;
  4040. if (ep->sys->napi_obj) {
  4041. gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
  4042. gsi_evt_ring_props.int_modc = IPA_GSI_EVT_RING_INT_MODC;
  4043. } else {
  4044. gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
  4045. gsi_evt_ring_props.int_modc = 1;
  4046. }
  4047. IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
  4048. ep->client,
  4049. gsi_evt_ring_props.int_modt,
  4050. gsi_evt_ring_props.int_modc);
  4051. gsi_evt_ring_props.rp_update_addr = 0;
  4052. gsi_evt_ring_props.exclusive = true;
  4053. gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
  4054. gsi_evt_ring_props.user_data = NULL;
  4055. result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
  4056. ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl);
  4057. if (result != GSI_STATUS_SUCCESS)
  4058. goto fail_alloc_evt_ring;
  4059. return 0;
  4060. fail_alloc_evt_ring:
  4061. if (ep->gsi_mem_info.evt_ring_base_vaddr)
  4062. dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.evt_ring_len,
  4063. ep->gsi_mem_info.evt_ring_base_vaddr,
  4064. ep->gsi_mem_info.evt_ring_base_addr);
  4065. IPAERR("Return with err: %d\n", result);
  4066. return result;
  4067. }
  4068. static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
  4069. u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag)
  4070. {
  4071. dma_addr_t dma_addr;
  4072. union __packed gsi_channel_scratch ch_scratch;
  4073. struct gsi_chan_props gsi_channel_props;
  4074. const struct ipa_gsi_ep_config *gsi_ep_info;
  4075. int result;
  4076. memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
  4077. if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  4078. gsi_channel_props.prot = GSI_CHAN_PROT_GCI;
  4079. else
  4080. gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
  4081. if (IPA_CLIENT_IS_PROD(ep->client)) {
  4082. gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  4083. } else {
  4084. gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
  4085. gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
  4086. }
  4087. gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
  4088. if (!gsi_ep_info) {
  4089. IPAERR("Failed getting GSI EP info for client=%d\n",
  4090. ep->client);
  4091. result = -EINVAL;
  4092. goto fail_get_gsi_ep_info;
  4093. } else {
  4094. gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
  4095. }
  4096. gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
  4097. gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  4098. gsi_channel_props.ring_len = ring_size;
  4099. gsi_channel_props.ring_base_vaddr =
  4100. dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
  4101. &dma_addr, mem_flag);
  4102. if (!gsi_channel_props.ring_base_vaddr) {
  4103. IPAERR("fail to dma alloc %u bytes\n",
  4104. gsi_channel_props.ring_len);
  4105. result = -ENOMEM;
  4106. goto fail_alloc_channel_ring;
  4107. }
  4108. gsi_channel_props.ring_base_addr = dma_addr;
  4109. /* copy mem info */
  4110. ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
  4111. ep->gsi_mem_info.chan_ring_base_addr =
  4112. gsi_channel_props.ring_base_addr;
  4113. ep->gsi_mem_info.chan_ring_base_vaddr =
  4114. gsi_channel_props.ring_base_vaddr;
  4115. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
  4116. gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
  4117. else
  4118. gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  4119. gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  4120. if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
  4121. gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
  4122. else
  4123. gsi_channel_props.low_weight = 1;
  4124. gsi_channel_props.db_in_bytes = 1;
  4125. gsi_channel_props.prefetch_mode = gsi_ep_info->prefetch_mode;
  4126. gsi_channel_props.empty_lvl_threshold = gsi_ep_info->prefetch_threshold;
  4127. gsi_channel_props.chan_user_data = user_data;
  4128. gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
  4129. if (IPA_CLIENT_IS_PROD(ep->client))
  4130. gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
  4131. else
  4132. gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb;
  4133. if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
  4134. gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
  4135. if (IPA_CLIENT_IS_CONS(ep->client))
  4136. gsi_channel_props.cleanup_cb = free_rx_pkt;
  4137. /* overwrite the cleanup_cb for page recycling */
  4138. if (ipa3_ctx->ipa_wan_skb_page &&
  4139. (IPA_CLIENT_IS_WAN_CONS(ep->client)))
  4140. gsi_channel_props.cleanup_cb = free_rx_page;
  4141. result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
  4142. &ep->gsi_chan_hdl);
  4143. if (result != GSI_STATUS_SUCCESS) {
  4144. IPAERR("Failed to alloc GSI chan.\n");
  4145. goto fail_alloc_channel;
  4146. }
  4147. memset(&ch_scratch, 0, sizeof(ch_scratch));
  4148. /*
  4149. * Update scratch for MCS smart prefetch:
  4150. * Starting IPA4.5, smart prefetch implemented by H/W.
  4151. * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
  4152. * so keep the fields zero.
  4153. */
  4154. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  4155. ch_scratch.gpi.max_outstanding_tre =
  4156. gsi_ep_info->ipa_if_tlv * GSI_CHAN_RE_SIZE_16B;
  4157. ch_scratch.gpi.outstanding_threshold =
  4158. 2 * GSI_CHAN_RE_SIZE_16B;
  4159. }
  4160. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
  4161. ch_scratch.gpi.dl_nlo_channel = 0;
  4162. result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
  4163. if (result != GSI_STATUS_SUCCESS) {
  4164. IPAERR("failed to write scratch %d\n", result);
  4165. goto fail_write_channel_scratch;
  4166. }
  4167. return 0;
  4168. fail_write_channel_scratch:
  4169. if (gsi_dealloc_channel(ep->gsi_chan_hdl)
  4170. != GSI_STATUS_SUCCESS) {
  4171. IPAERR("Failed to dealloc GSI chan.\n");
  4172. WARN_ON(1);
  4173. }
  4174. fail_alloc_channel:
  4175. dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.chan_ring_len,
  4176. ep->gsi_mem_info.chan_ring_base_vaddr,
  4177. ep->gsi_mem_info.chan_ring_base_addr);
  4178. fail_alloc_channel_ring:
  4179. fail_get_gsi_ep_info:
  4180. if (ep->gsi_evt_ring_hdl != ~0) {
  4181. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  4182. ep->gsi_evt_ring_hdl = ~0;
  4183. }
  4184. return result;
  4185. }
  4186. static int ipa_gsi_setup_coal_def_channel(struct ipa_sys_connect_params *in,
  4187. struct ipa3_ep_context *ep, struct ipa3_ep_context *coal_ep)
  4188. {
  4189. u32 ring_size;
  4190. int result;
  4191. ring_size = 2 * in->desc_fifo_sz;
  4192. /* copy event ring handle */
  4193. ep->gsi_evt_ring_hdl = coal_ep->gsi_evt_ring_hdl;
  4194. result = ipa_gsi_setup_transfer_ring(ep, ring_size,
  4195. coal_ep->sys, GFP_ATOMIC);
  4196. if (result) {
  4197. if (ep->gsi_mem_info.evt_ring_base_vaddr)
  4198. dma_free_coherent(ipa3_ctx->pdev,
  4199. ep->gsi_mem_info.chan_ring_len,
  4200. ep->gsi_mem_info.chan_ring_base_vaddr,
  4201. ep->gsi_mem_info.chan_ring_base_addr);
  4202. IPAERR("Destroying WAN_COAL_CONS evt_ring");
  4203. if (ep->gsi_evt_ring_hdl != ~0) {
  4204. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  4205. ep->gsi_evt_ring_hdl = ~0;
  4206. }
  4207. IPAERR("Return with err: %d\n", result);
  4208. return result;
  4209. }
  4210. return 0;
  4211. }
  4212. static int ipa_populate_tag_field(struct ipa3_desc *desc,
  4213. struct ipa3_tx_pkt_wrapper *tx_pkt,
  4214. struct ipahal_imm_cmd_pyld **tag_pyld_ret)
  4215. {
  4216. struct ipahal_imm_cmd_pyld *tag_pyld;
  4217. struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
  4218. /* populate tag field only if it is NULL */
  4219. if (desc->pyld == NULL) {
  4220. tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
  4221. tag_pyld = ipahal_construct_imm_cmd(
  4222. IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
  4223. if (unlikely(!tag_pyld)) {
  4224. IPAERR("Failed to construct ip_packet_tag_status\n");
  4225. return -EFAULT;
  4226. }
  4227. /*
  4228. * This is for 32-bit pointer, will need special
  4229. * handling if 64-bit pointer is used
  4230. */
  4231. IPADBG_LOW("tx_pkt sent in tag: 0x%pK\n", tx_pkt);
  4232. desc->pyld = tag_pyld->data;
  4233. desc->opcode = tag_pyld->opcode;
  4234. desc->len = tag_pyld->len;
  4235. desc->user1 = tag_pyld;
  4236. desc->type = IPA_IMM_CMD_DESC;
  4237. desc->callback = ipa3_tag_destroy_imm;
  4238. *tag_pyld_ret = tag_pyld;
  4239. }
  4240. return 0;
  4241. }
  4242. static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
  4243. struct gsi_chan_xfer_notify *notify)
  4244. {
  4245. int unused_var;
  4246. return ipa_poll_gsi_n_pkt(sys, notify, 1, &unused_var);
  4247. }
  4248. static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
  4249. struct gsi_chan_xfer_notify *notify,
  4250. int expected_num, int *actual_num)
  4251. {
  4252. int ret;
  4253. int idx = 0;
  4254. int poll_num = 0;
  4255. if (!actual_num || expected_num <= 0 ||
  4256. expected_num > IPA_WAN_NAPI_MAX_FRAMES) {
  4257. IPAERR("bad params actual_num=%pK expected_num=%d\n",
  4258. actual_num, expected_num);
  4259. return GSI_STATUS_INVALID_PARAMS;
  4260. }
  4261. if (sys->ep->xfer_notify_valid) {
  4262. *notify = sys->ep->xfer_notify;
  4263. sys->ep->xfer_notify_valid = false;
  4264. idx++;
  4265. }
  4266. if (expected_num == idx) {
  4267. *actual_num = idx;
  4268. return GSI_STATUS_SUCCESS;
  4269. }
  4270. ret = gsi_poll_n_channel(sys->ep->gsi_chan_hdl,
  4271. &notify[idx], expected_num - idx, &poll_num);
  4272. if (ret == GSI_STATUS_POLL_EMPTY) {
  4273. if (idx) {
  4274. *actual_num = idx;
  4275. return GSI_STATUS_SUCCESS;
  4276. }
  4277. *actual_num = 0;
  4278. return ret;
  4279. } else if (ret != GSI_STATUS_SUCCESS) {
  4280. if (idx) {
  4281. *actual_num = idx;
  4282. return GSI_STATUS_SUCCESS;
  4283. }
  4284. *actual_num = 0;
  4285. IPAERR("Poll channel err: %d\n", ret);
  4286. return ret;
  4287. }
  4288. *actual_num = idx + poll_num;
  4289. return ret;
  4290. }
  4291. /**
  4292. * ipa3_lan_rx_poll() - Poll the LAN rx packets from IPA HW.
  4293. * This function is executed in the softirq context
  4294. *
  4295. * if input budget is zero, the driver switches back to
  4296. * interrupt mode.
  4297. *
  4298. * return number of polled packets, on error 0(zero)
  4299. */
  4300. int ipa3_lan_rx_poll(u32 clnt_hdl, int weight)
  4301. {
  4302. struct ipa3_ep_context *ep;
  4303. int ret;
  4304. int cnt = 0;
  4305. int remain_aggr_weight;
  4306. struct gsi_chan_xfer_notify notify;
  4307. if (unlikely(clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  4308. ipa3_ctx->ep[clnt_hdl].valid == 0)) {
  4309. IPAERR("bad param 0x%x\n", clnt_hdl);
  4310. return cnt;
  4311. }
  4312. remain_aggr_weight = weight / IPA_LAN_AGGR_PKT_CNT;
  4313. if (unlikely(remain_aggr_weight > IPA_LAN_NAPI_MAX_FRAMES)) {
  4314. IPAERR("NAPI weight is higher than expected\n");
  4315. IPAERR("expected %d got %d\n",
  4316. IPA_LAN_NAPI_MAX_FRAMES, remain_aggr_weight);
  4317. return cnt;
  4318. }
  4319. ep = &ipa3_ctx->ep[clnt_hdl];
  4320. start_poll:
  4321. while (remain_aggr_weight > 0 &&
  4322. atomic_read(&ep->sys->curr_polling_state)) {
  4323. atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
  4324. ret = ipa_poll_gsi_pkt(ep->sys, &notify);
  4325. if (ret)
  4326. break;
  4327. if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
  4328. ipa3_dma_memcpy_notify(ep->sys);
  4329. else if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
  4330. ipa3_wlan_wq_rx_common(ep->sys, &notify);
  4331. else
  4332. ipa3_wq_rx_common(ep->sys, &notify);
  4333. remain_aggr_weight--;
  4334. if (ep->sys->len == 0) {
  4335. if (remain_aggr_weight == 0)
  4336. cnt--;
  4337. break;
  4338. }
  4339. }
  4340. cnt += weight - remain_aggr_weight * IPA_LAN_AGGR_PKT_CNT;
  4341. if (cnt < weight) {
  4342. napi_complete(ep->sys->napi_obj);
  4343. ret = ipa3_rx_switch_to_intr_mode(ep->sys);
  4344. if (ret == -GSI_STATUS_PENDING_IRQ &&
  4345. napi_reschedule(ep->sys->napi_obj))
  4346. goto start_poll;
  4347. ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
  4348. }
  4349. return cnt;
  4350. }
  4351. /**
  4352. * ipa3_rx_poll() - Poll the WAN rx packets from IPA HW. This
  4353. * function is exectued in the softirq context
  4354. *
  4355. * if input budget is zero, the driver switches back to
  4356. * interrupt mode.
  4357. *
  4358. * return number of polled packets, on error 0(zero)
  4359. */
  4360. int ipa3_rx_poll(u32 clnt_hdl, int weight)
  4361. {
  4362. struct ipa3_ep_context *ep;
  4363. int ret;
  4364. int cnt = 0;
  4365. int num = 0;
  4366. int remain_aggr_weight;
  4367. struct ipa_active_client_logging_info log;
  4368. struct gsi_chan_xfer_notify notify[IPA_WAN_NAPI_MAX_FRAMES];
  4369. IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
  4370. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  4371. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  4372. IPAERR("bad parm 0x%x\n", clnt_hdl);
  4373. return cnt;
  4374. }
  4375. remain_aggr_weight = weight / IPA_WAN_AGGR_PKT_CNT;
  4376. if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) {
  4377. IPAERR("NAPI weight is higher than expected\n");
  4378. IPAERR("expected %d got %d\n",
  4379. IPA_WAN_NAPI_MAX_FRAMES, remain_aggr_weight);
  4380. return -EINVAL;
  4381. }
  4382. ep = &ipa3_ctx->ep[clnt_hdl];
  4383. start_poll:
  4384. while (remain_aggr_weight > 0 &&
  4385. atomic_read(&ep->sys->curr_polling_state)) {
  4386. atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
  4387. if (ipa3_ctx->enable_napi_chain) {
  4388. ret = ipa_poll_gsi_n_pkt(ep->sys, notify,
  4389. remain_aggr_weight, &num);
  4390. } else {
  4391. ret = ipa_poll_gsi_n_pkt(ep->sys, notify,
  4392. 1, &num);
  4393. }
  4394. if (ret)
  4395. break;
  4396. trace_ipa3_rx_poll_num(num);
  4397. ipa3_rx_napi_chain(ep->sys, notify, num);
  4398. remain_aggr_weight -= num;
  4399. trace_ipa3_rx_poll_cnt(ep->sys->len);
  4400. if (ep->sys->len == 0) {
  4401. if (remain_aggr_weight == 0)
  4402. cnt--;
  4403. break;
  4404. }
  4405. }
  4406. cnt += weight - remain_aggr_weight * IPA_WAN_AGGR_PKT_CNT;
  4407. /* call repl_hdlr before napi_reschedule / napi_complete */
  4408. ep->sys->repl_hdlr(ep->sys);
  4409. /* When not able to replenish enough descriptors pipe wait
  4410. * until minimum number descripotrs to replish.
  4411. */
  4412. if (cnt < weight && ep->sys->len > IPA_DEFAULT_SYS_YELLOW_WM) {
  4413. napi_complete(ep->sys->napi_obj);
  4414. ret = ipa3_rx_switch_to_intr_mode(ep->sys);
  4415. if (ret == -GSI_STATUS_PENDING_IRQ &&
  4416. napi_reschedule(ep->sys->napi_obj))
  4417. goto start_poll;
  4418. ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
  4419. } else {
  4420. cnt = weight;
  4421. IPADBG_LOW("Client = %d not replenished free descripotrs\n",
  4422. ep->client);
  4423. }
  4424. return cnt;
  4425. }
  4426. static unsigned long tag_to_pointer_wa(uint64_t tag)
  4427. {
  4428. return 0xFFFF000000000000 | (unsigned long) tag;
  4429. }
  4430. static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
  4431. {
  4432. u16 temp;
  4433. /* Add the check but it might have throughput issue */
  4434. if (BITS_PER_LONG == 64) {
  4435. temp = (u16) (~((unsigned long) tx_pkt &
  4436. 0xFFFF000000000000) >> 48);
  4437. if (temp) {
  4438. IPAERR("The 16 prefix is not all 1s (%pK)\n",
  4439. tx_pkt);
  4440. /*
  4441. * We need all addresses starting at 0xFFFF to
  4442. * pass it to HW.
  4443. */
  4444. ipa_assert();
  4445. }
  4446. }
  4447. return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
  4448. }
  4449. /**
  4450. * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
  4451. *
  4452. * A hardware limitation requires to avoid using GSI physical channel 20.
  4453. * This function allocates GSI physical channel 20 and holds it to prevent
  4454. * others to use it.
  4455. *
  4456. * Return codes: 0 on success, negative on failure
  4457. */
  4458. int ipa_gsi_ch20_wa(void)
  4459. {
  4460. struct gsi_chan_props gsi_channel_props;
  4461. dma_addr_t dma_addr;
  4462. int result;
  4463. int i;
  4464. unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
  4465. unsigned long chan_hdl_to_keep;
  4466. memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
  4467. gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
  4468. gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  4469. gsi_channel_props.evt_ring_hdl = ~0;
  4470. gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  4471. gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
  4472. gsi_channel_props.ring_base_vaddr =
  4473. dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
  4474. &dma_addr, 0);
  4475. gsi_channel_props.ring_base_addr = dma_addr;
  4476. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
  4477. gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
  4478. else
  4479. gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  4480. gsi_channel_props.db_in_bytes = 1;
  4481. gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  4482. gsi_channel_props.low_weight = 1;
  4483. gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
  4484. gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
  4485. /* first allocate channels up to channel 20 */
  4486. for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
  4487. gsi_channel_props.ch_id = i;
  4488. result = gsi_alloc_channel(&gsi_channel_props,
  4489. ipa3_ctx->gsi_dev_hdl,
  4490. &chan_hdl[i]);
  4491. if (result != GSI_STATUS_SUCCESS) {
  4492. IPAERR("failed to alloc channel %d err %d\n",
  4493. i, result);
  4494. return result;
  4495. }
  4496. }
  4497. /* allocate channel 20 */
  4498. gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
  4499. result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
  4500. &chan_hdl_to_keep);
  4501. if (result != GSI_STATUS_SUCCESS) {
  4502. IPAERR("failed to alloc channel %d err %d\n",
  4503. i, result);
  4504. return result;
  4505. }
  4506. /* release all other channels */
  4507. for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
  4508. result = gsi_dealloc_channel(chan_hdl[i]);
  4509. if (result != GSI_STATUS_SUCCESS) {
  4510. IPAERR("failed to dealloc channel %d err %d\n",
  4511. i, result);
  4512. return result;
  4513. }
  4514. }
  4515. /* DMA memory shall not be freed as it is used by channel 20 */
  4516. return 0;
  4517. }
  4518. /**
  4519. * ipa_adjust_ra_buff_base_sz()
  4520. *
  4521. * Return value: the largest power of two which is smaller
  4522. * than the input value
  4523. */
  4524. static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
  4525. {
  4526. aggr_byte_limit += IPA_MTU;
  4527. aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
  4528. aggr_byte_limit--;
  4529. aggr_byte_limit |= aggr_byte_limit >> 1;
  4530. aggr_byte_limit |= aggr_byte_limit >> 2;
  4531. aggr_byte_limit |= aggr_byte_limit >> 4;
  4532. aggr_byte_limit |= aggr_byte_limit >> 8;
  4533. aggr_byte_limit |= aggr_byte_limit >> 16;
  4534. aggr_byte_limit++;
  4535. return aggr_byte_limit >> 1;
  4536. }