ipa_dp.c 123 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/delay.h>
  6. #include <linux/device.h>
  7. #include <linux/dmapool.h>
  8. #include <linux/list.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/msm_gsi.h>
  11. #include "ipa_i.h"
  12. #include "ipa_trace.h"
  13. #include "ipahal/ipahal.h"
  14. #include "ipahal/ipahal_fltrt.h"
  15. #define IPA_WAN_AGGR_PKT_CNT 5
  16. #define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
  17. #define IPA_LAST_DESC_CNT 0xFFFF
  18. #define POLLING_INACTIVITY_RX 40
  19. #define POLLING_MIN_SLEEP_RX 1010
  20. #define POLLING_MAX_SLEEP_RX 1050
  21. #define POLLING_INACTIVITY_TX 40
  22. #define POLLING_MIN_SLEEP_TX 400
  23. #define POLLING_MAX_SLEEP_TX 500
  24. #define SUSPEND_MIN_SLEEP_RX 1000
  25. #define SUSPEND_MAX_SLEEP_RX 1005
  26. /* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
  27. #define IPA_MTU 1500
  28. #define IPA_GENERIC_AGGR_BYTE_LIMIT 6
  29. #define IPA_GENERIC_AGGR_TIME_LIMIT 500 /* 0.5msec */
  30. #define IPA_GENERIC_AGGR_PKT_LIMIT 0
  31. #define IPA_GSB_AGGR_BYTE_LIMIT 14
  32. #define IPA_GSB_RX_BUFF_BASE_SZ 16384
  33. #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
  34. #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
  35. (X) + NET_SKB_PAD) +\
  36. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  37. #define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
  38. (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
  39. #define IPA_GENERIC_RX_BUFF_LIMIT (\
  40. IPA_REAL_GENERIC_RX_BUFF_SZ(\
  41. IPA_GENERIC_RX_BUFF_BASE_SZ) -\
  42. IPA_GENERIC_RX_BUFF_BASE_SZ)
  43. /* less 1 nominal MTU (1500 bytes) rounded to units of KB */
  44. #define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
  45. #define IPA_RX_BUFF_CLIENT_HEADROOM 256
  46. #define IPA_WLAN_RX_POOL_SZ 100
  47. #define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
  48. #define IPA_WLAN_RX_BUFF_SZ 2048
  49. #define IPA_WLAN_COMM_RX_POOL_LOW 100
  50. #define IPA_WLAN_COMM_RX_POOL_HIGH 900
  51. #define IPA_ODU_RX_BUFF_SZ 2048
  52. #define IPA_ODU_RX_POOL_SZ 64
  53. #define IPA_ODL_RX_BUFF_SZ (16 * 1024)
  54. #define IPA_GSI_MAX_CH_LOW_WEIGHT 15
  55. #define IPA_GSI_EVT_RING_INT_MODT (16) /* 0.5ms under 32KHz clock */
  56. #define IPA_GSI_EVT_RING_INT_MODC (20)
  57. #define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
  58. /* The below virtual channel cannot be used by any entity */
  59. #define IPA_GSI_CH_20_WA_VIRT_CHAN 29
  60. #define IPA_DEFAULT_SYS_YELLOW_WM 32
  61. #define IPA_REPL_XFER_THRESH 20
  62. #define IPA_REPL_XFER_MAX 36
  63. #define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
  64. #define IPA_APPS_BW_FOR_PM 700
  65. #define IPA_SEND_MAX_DESC (20)
  66. #define IPA_EOT_THRESH 32
  67. #define IPA_QMAP_ID_BYTE 0
  68. static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
  69. static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
  70. static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
  71. static void ipa3_replenish_rx_work_func(struct work_struct *work);
  72. static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
  73. static void ipa3_wq_handle_rx(struct work_struct *work);
  74. static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
  75. struct gsi_chan_xfer_notify *notify);
  76. static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys,
  77. struct gsi_chan_xfer_notify *notify, uint32_t num);
  78. static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
  79. struct gsi_chan_xfer_notify *notify);
  80. static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
  81. struct ipa3_sys_context *sys);
  82. static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
  83. static void ipa3_wq_rx_avail(struct work_struct *work);
  84. static void ipa3_alloc_wlan_rx_common_cache(u32 size);
  85. static void ipa3_cleanup_wlan_rx_common_cache(void);
  86. static void ipa3_wq_repl_rx(struct work_struct *work);
  87. static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys);
  88. static int ipa_gsi_setup_coal_def_channel(struct ipa_sys_connect_params *in,
  89. struct ipa3_ep_context *ep, struct ipa3_ep_context *coal_ep);
  90. static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
  91. struct ipa3_ep_context *ep);
  92. static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
  93. u32 ring_size, gfp_t mem_flag);
  94. static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
  95. u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag);
  96. static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl);
  97. static int ipa_populate_tag_field(struct ipa3_desc *desc,
  98. struct ipa3_tx_pkt_wrapper *tx_pkt,
  99. struct ipahal_imm_cmd_pyld **tag_pyld_ret);
  100. static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
  101. struct gsi_chan_xfer_notify *notify);
  102. static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
  103. struct gsi_chan_xfer_notify *notify, int expected_num,
  104. int *actual_num);
  105. static unsigned long tag_to_pointer_wa(uint64_t tag);
  106. static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
  107. static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
  108. static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
  109. struct ipa3_tx_pkt_wrapper *tx_pkt)
  110. {
  111. struct ipa3_tx_pkt_wrapper *next_pkt;
  112. int i, cnt;
  113. if (unlikely(tx_pkt == NULL)) {
  114. IPAERR("tx_pkt is NULL\n");
  115. return;
  116. }
  117. cnt = tx_pkt->cnt;
  118. IPADBG_LOW("cnt: %d\n", cnt);
  119. for (i = 0; i < cnt; i++) {
  120. spin_lock_bh(&sys->spinlock);
  121. if (unlikely(list_empty(&sys->head_desc_list))) {
  122. spin_unlock_bh(&sys->spinlock);
  123. return;
  124. }
  125. next_pkt = list_next_entry(tx_pkt, link);
  126. list_del(&tx_pkt->link);
  127. sys->len--;
  128. spin_unlock_bh(&sys->spinlock);
  129. if (!tx_pkt->no_unmap_dma) {
  130. if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
  131. dma_unmap_single(ipa3_ctx->pdev,
  132. tx_pkt->mem.phys_base,
  133. tx_pkt->mem.size,
  134. DMA_TO_DEVICE);
  135. } else {
  136. dma_unmap_page(ipa3_ctx->pdev,
  137. tx_pkt->mem.phys_base,
  138. tx_pkt->mem.size,
  139. DMA_TO_DEVICE);
  140. }
  141. }
  142. if (tx_pkt->callback)
  143. tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
  144. kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
  145. tx_pkt = next_pkt;
  146. }
  147. }
  148. static void ipa3_wq_write_done_status(int src_pipe,
  149. struct ipa3_tx_pkt_wrapper *tx_pkt)
  150. {
  151. struct ipa3_sys_context *sys;
  152. WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes);
  153. if (!ipa3_ctx->ep[src_pipe].status.status_en)
  154. return;
  155. sys = ipa3_ctx->ep[src_pipe].sys;
  156. if (!sys)
  157. return;
  158. ipa3_wq_write_done_common(sys, tx_pkt);
  159. }
  160. /**
  161. * ipa_write_done() - this function will be (eventually) called when a Tx
  162. * operation is complete
  163. * @data: user pointer point to the ipa3_sys_context
  164. *
  165. * Will be called in deferred context.
  166. * - invoke the callback supplied by the client who sent this command
  167. * - iterate over all packets and validate that
  168. * the order for sent packet is the same as expected
  169. * - delete all the tx packet descriptors from the system
  170. * pipe context (not needed anymore)
  171. */
  172. static void ipa3_tasklet_write_done(unsigned long data)
  173. {
  174. struct ipa3_sys_context *sys;
  175. struct ipa3_tx_pkt_wrapper *this_pkt;
  176. bool xmit_done = false;
  177. sys = (struct ipa3_sys_context *)data;
  178. spin_lock_bh(&sys->spinlock);
  179. while (atomic_add_unless(&sys->xmit_eot_cnt, -1, 0)) {
  180. while (!list_empty(&sys->head_desc_list)) {
  181. this_pkt = list_first_entry(&sys->head_desc_list,
  182. struct ipa3_tx_pkt_wrapper, link);
  183. xmit_done = this_pkt->xmit_done;
  184. spin_unlock_bh(&sys->spinlock);
  185. ipa3_wq_write_done_common(sys, this_pkt);
  186. spin_lock_bh(&sys->spinlock);
  187. if (xmit_done)
  188. break;
  189. }
  190. }
  191. spin_unlock_bh(&sys->spinlock);
  192. }
  193. static void ipa3_send_nop_desc(struct work_struct *work)
  194. {
  195. struct ipa3_sys_context *sys = container_of(work,
  196. struct ipa3_sys_context, work);
  197. struct gsi_xfer_elem nop_xfer;
  198. struct ipa3_tx_pkt_wrapper *tx_pkt;
  199. IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
  200. tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
  201. if (!tx_pkt) {
  202. queue_work(sys->wq, &sys->work);
  203. return;
  204. }
  205. INIT_LIST_HEAD(&tx_pkt->link);
  206. tx_pkt->cnt = 1;
  207. tx_pkt->no_unmap_dma = true;
  208. tx_pkt->sys = sys;
  209. spin_lock_bh(&sys->spinlock);
  210. if (unlikely(!sys->nop_pending)) {
  211. spin_unlock_bh(&sys->spinlock);
  212. kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
  213. return;
  214. }
  215. list_add_tail(&tx_pkt->link, &sys->head_desc_list);
  216. sys->nop_pending = false;
  217. memset(&nop_xfer, 0, sizeof(nop_xfer));
  218. nop_xfer.type = GSI_XFER_ELEM_NOP;
  219. nop_xfer.flags = GSI_XFER_FLAG_EOT;
  220. nop_xfer.xfer_user_data = tx_pkt;
  221. if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) {
  222. spin_unlock_bh(&sys->spinlock);
  223. IPAERR("gsi_queue_xfer for ch:%lu failed\n",
  224. sys->ep->gsi_chan_hdl);
  225. queue_work(sys->wq, &sys->work);
  226. return;
  227. }
  228. spin_unlock_bh(&sys->spinlock);
  229. /* make sure TAG process is sent before clocks are gated */
  230. ipa3_ctx->tag_process_before_gating = true;
  231. }
  232. /**
  233. * ipa3_send() - Send multiple descriptors in one HW transaction
  234. * @sys: system pipe context
  235. * @num_desc: number of packets
  236. * @desc: packets to send (may be immediate command or data)
  237. * @in_atomic: whether caller is in atomic context
  238. *
  239. * This function is used for GPI connection.
  240. * - ipa3_tx_pkt_wrapper will be used for each ipa
  241. * descriptor (allocated from wrappers cache)
  242. * - The wrapper struct will be configured for each ipa-desc payload and will
  243. * contain information which will be later used by the user callbacks
  244. * - Each packet (command or data) that will be sent will also be saved in
  245. * ipa3_sys_context for later check that all data was sent
  246. *
  247. * Return codes: 0: success, -EFAULT: failure
  248. */
  249. int ipa3_send(struct ipa3_sys_context *sys,
  250. u32 num_desc,
  251. struct ipa3_desc *desc,
  252. bool in_atomic)
  253. {
  254. struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first = NULL;
  255. struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
  256. struct ipa3_tx_pkt_wrapper *next_pkt;
  257. struct gsi_xfer_elem gsi_xfer[IPA_SEND_MAX_DESC];
  258. int i = 0;
  259. int j;
  260. int result;
  261. u32 mem_flag = GFP_ATOMIC;
  262. const struct ipa_gsi_ep_config *gsi_ep_cfg;
  263. bool send_nop = false;
  264. unsigned int max_desc;
  265. if (unlikely(!in_atomic))
  266. mem_flag = GFP_KERNEL;
  267. gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client);
  268. if (unlikely(!gsi_ep_cfg)) {
  269. IPAERR("failed to get gsi EP config for client=%d\n",
  270. sys->ep->client);
  271. return -EFAULT;
  272. }
  273. if (unlikely(num_desc > IPA_SEND_MAX_DESC)) {
  274. IPAERR("max descriptors reached need=%d max=%d\n",
  275. num_desc, IPA_SEND_MAX_DESC);
  276. WARN_ON(1);
  277. return -EPERM;
  278. }
  279. max_desc = gsi_ep_cfg->ipa_if_tlv;
  280. if (gsi_ep_cfg->prefetch_mode == GSI_SMART_PRE_FETCH ||
  281. gsi_ep_cfg->prefetch_mode == GSI_FREE_PRE_FETCH)
  282. max_desc -= gsi_ep_cfg->prefetch_threshold;
  283. if (unlikely(num_desc > max_desc)) {
  284. IPAERR("Too many chained descriptors need=%d max=%d\n",
  285. num_desc, max_desc);
  286. WARN_ON(1);
  287. return -EPERM;
  288. }
  289. /* initialize only the xfers we use */
  290. memset(gsi_xfer, 0, sizeof(gsi_xfer[0]) * num_desc);
  291. spin_lock_bh(&sys->spinlock);
  292. for (i = 0; i < num_desc; i++) {
  293. tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
  294. GFP_ATOMIC);
  295. if (!tx_pkt) {
  296. IPAERR("failed to alloc tx wrapper\n");
  297. result = -ENOMEM;
  298. goto failure;
  299. }
  300. INIT_LIST_HEAD(&tx_pkt->link);
  301. if (i == 0) {
  302. tx_pkt_first = tx_pkt;
  303. tx_pkt->cnt = num_desc;
  304. }
  305. /* populate tag field */
  306. if (desc[i].is_tag_status) {
  307. if (ipa_populate_tag_field(&desc[i], tx_pkt,
  308. &tag_pyld_ret)) {
  309. IPAERR("Failed to populate tag field\n");
  310. result = -EFAULT;
  311. goto failure_dma_map;
  312. }
  313. }
  314. tx_pkt->type = desc[i].type;
  315. if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
  316. tx_pkt->mem.base = desc[i].pyld;
  317. tx_pkt->mem.size = desc[i].len;
  318. if (!desc[i].dma_address_valid) {
  319. tx_pkt->mem.phys_base =
  320. dma_map_single(ipa3_ctx->pdev,
  321. tx_pkt->mem.base,
  322. tx_pkt->mem.size,
  323. DMA_TO_DEVICE);
  324. } else {
  325. tx_pkt->mem.phys_base =
  326. desc[i].dma_address;
  327. tx_pkt->no_unmap_dma = true;
  328. }
  329. } else {
  330. tx_pkt->mem.base = desc[i].frag;
  331. tx_pkt->mem.size = desc[i].len;
  332. if (!desc[i].dma_address_valid) {
  333. tx_pkt->mem.phys_base =
  334. skb_frag_dma_map(ipa3_ctx->pdev,
  335. desc[i].frag,
  336. 0, tx_pkt->mem.size,
  337. DMA_TO_DEVICE);
  338. } else {
  339. tx_pkt->mem.phys_base =
  340. desc[i].dma_address;
  341. tx_pkt->no_unmap_dma = true;
  342. }
  343. }
  344. if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
  345. IPAERR("failed to do dma map.\n");
  346. result = -EFAULT;
  347. goto failure_dma_map;
  348. }
  349. tx_pkt->sys = sys;
  350. tx_pkt->callback = desc[i].callback;
  351. tx_pkt->user1 = desc[i].user1;
  352. tx_pkt->user2 = desc[i].user2;
  353. tx_pkt->xmit_done = false;
  354. list_add_tail(&tx_pkt->link, &sys->head_desc_list);
  355. gsi_xfer[i].addr = tx_pkt->mem.phys_base;
  356. /*
  357. * Special treatment for immediate commands, where
  358. * the structure of the descriptor is different
  359. */
  360. if (desc[i].type == IPA_IMM_CMD_DESC) {
  361. gsi_xfer[i].len = desc[i].opcode;
  362. gsi_xfer[i].type =
  363. GSI_XFER_ELEM_IMME_CMD;
  364. } else {
  365. gsi_xfer[i].len = desc[i].len;
  366. gsi_xfer[i].type =
  367. GSI_XFER_ELEM_DATA;
  368. }
  369. if (i == (num_desc - 1)) {
  370. if (!sys->use_comm_evt_ring ||
  371. (sys->pkt_sent % IPA_EOT_THRESH == 0)) {
  372. gsi_xfer[i].flags |=
  373. GSI_XFER_FLAG_EOT;
  374. gsi_xfer[i].flags |=
  375. GSI_XFER_FLAG_BEI;
  376. } else {
  377. send_nop = true;
  378. }
  379. gsi_xfer[i].xfer_user_data =
  380. tx_pkt_first;
  381. } else {
  382. gsi_xfer[i].flags |=
  383. GSI_XFER_FLAG_CHAIN;
  384. }
  385. }
  386. IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
  387. result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
  388. gsi_xfer, true);
  389. if (result != GSI_STATUS_SUCCESS) {
  390. IPAERR_RL("GSI xfer failed.\n");
  391. result = -EFAULT;
  392. goto failure;
  393. }
  394. if (send_nop && !sys->nop_pending)
  395. sys->nop_pending = true;
  396. else
  397. send_nop = false;
  398. sys->pkt_sent++;
  399. spin_unlock_bh(&sys->spinlock);
  400. /* set the timer for sending the NOP descriptor */
  401. if (send_nop) {
  402. ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
  403. IPADBG_LOW("scheduling timer for ch %lu\n",
  404. sys->ep->gsi_chan_hdl);
  405. hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
  406. }
  407. /* make sure TAG process is sent before clocks are gated */
  408. ipa3_ctx->tag_process_before_gating = true;
  409. return 0;
  410. failure_dma_map:
  411. kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
  412. failure:
  413. ipahal_destroy_imm_cmd(tag_pyld_ret);
  414. tx_pkt = tx_pkt_first;
  415. for (j = 0; j < i; j++) {
  416. next_pkt = list_next_entry(tx_pkt, link);
  417. list_del(&tx_pkt->link);
  418. if (!tx_pkt->no_unmap_dma) {
  419. if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
  420. dma_unmap_single(ipa3_ctx->pdev,
  421. tx_pkt->mem.phys_base,
  422. tx_pkt->mem.size, DMA_TO_DEVICE);
  423. } else {
  424. dma_unmap_page(ipa3_ctx->pdev,
  425. tx_pkt->mem.phys_base,
  426. tx_pkt->mem.size,
  427. DMA_TO_DEVICE);
  428. }
  429. }
  430. kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
  431. tx_pkt = next_pkt;
  432. }
  433. spin_unlock_bh(&sys->spinlock);
  434. return result;
  435. }
  436. /**
  437. * ipa3_send_one() - Send a single descriptor
  438. * @sys: system pipe context
  439. * @desc: descriptor to send
  440. * @in_atomic: whether caller is in atomic context
  441. *
  442. * - Allocate tx_packet wrapper
  443. * - transfer data to the IPA
  444. * - after the transfer was done the SPS will
  445. * notify the sending user via ipa_sps_irq_comp_tx()
  446. *
  447. * Return codes: 0: success, -EFAULT: failure
  448. */
  449. int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
  450. bool in_atomic)
  451. {
  452. return ipa3_send(sys, 1, desc, in_atomic);
  453. }
  454. /**
  455. * ipa3_transport_irq_cmd_ack - callback function which will be called by
  456. * the transport driver after an immediate command is complete.
  457. * @user1: pointer to the descriptor of the transfer
  458. * @user2:
  459. *
  460. * Complete the immediate commands completion object, this will release the
  461. * thread which waits on this completion object (ipa3_send_cmd())
  462. */
  463. static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
  464. {
  465. struct ipa3_desc *desc = (struct ipa3_desc *)user1;
  466. if (WARN(!desc, "desc is NULL"))
  467. return;
  468. IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
  469. complete(&desc->xfer_done);
  470. }
  471. /**
  472. * ipa3_transport_irq_cmd_ack_free - callback function which will be
  473. * called by the transport driver after an immediate command is complete.
  474. * This function will also free the completion object once it is done.
  475. * @tag_comp: pointer to the completion object
  476. * @ignored: parameter not used
  477. *
  478. * Complete the immediate commands completion object, this will release the
  479. * thread which waits on this completion object (ipa3_send_cmd())
  480. */
  481. static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored)
  482. {
  483. struct ipa3_tag_completion *comp = tag_comp;
  484. if (!comp) {
  485. IPAERR("comp is NULL\n");
  486. return;
  487. }
  488. complete(&comp->comp);
  489. if (atomic_dec_return(&comp->cnt) == 0)
  490. kfree(comp);
  491. }
  492. /**
  493. * ipa3_send_cmd - send immediate commands
  494. * @num_desc: number of descriptors within the desc struct
  495. * @descr: descriptor structure
  496. *
  497. * Function will block till command gets ACK from IPA HW, caller needs
  498. * to free any resources it allocated after function returns
  499. * The callback in ipa3_desc should not be set by the caller
  500. * for this function.
  501. */
  502. int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
  503. {
  504. struct ipa3_desc *desc;
  505. int i, result = 0;
  506. struct ipa3_sys_context *sys;
  507. int ep_idx;
  508. for (i = 0; i < num_desc; i++)
  509. IPADBG("sending imm cmd %d\n", descr[i].opcode);
  510. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
  511. if (-1 == ep_idx) {
  512. IPAERR("Client %u is not mapped\n",
  513. IPA_CLIENT_APPS_CMD_PROD);
  514. return -EFAULT;
  515. }
  516. sys = ipa3_ctx->ep[ep_idx].sys;
  517. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  518. if (num_desc == 1) {
  519. init_completion(&descr->xfer_done);
  520. if (descr->callback || descr->user1)
  521. WARN_ON(1);
  522. descr->callback = ipa3_transport_irq_cmd_ack;
  523. descr->user1 = descr;
  524. if (ipa3_send_one(sys, descr, true)) {
  525. IPAERR("fail to send immediate command\n");
  526. result = -EFAULT;
  527. goto bail;
  528. }
  529. wait_for_completion(&descr->xfer_done);
  530. } else {
  531. desc = &descr[num_desc - 1];
  532. init_completion(&desc->xfer_done);
  533. if (desc->callback || desc->user1)
  534. WARN_ON(1);
  535. desc->callback = ipa3_transport_irq_cmd_ack;
  536. desc->user1 = desc;
  537. if (ipa3_send(sys, num_desc, descr, true)) {
  538. IPAERR("fail to send multiple immediate command set\n");
  539. result = -EFAULT;
  540. goto bail;
  541. }
  542. wait_for_completion(&desc->xfer_done);
  543. }
  544. bail:
  545. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  546. return result;
  547. }
  548. /**
  549. * ipa3_send_cmd_timeout - send immediate commands with limited time
  550. * waiting for ACK from IPA HW
  551. * @num_desc: number of descriptors within the desc struct
  552. * @descr: descriptor structure
  553. * @timeout: millisecond to wait till get ACK from IPA HW
  554. *
  555. * Function will block till command gets ACK from IPA HW or timeout.
  556. * Caller needs to free any resources it allocated after function returns
  557. * The callback in ipa3_desc should not be set by the caller
  558. * for this function.
  559. */
  560. int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
  561. {
  562. struct ipa3_desc *desc;
  563. int i, result = 0;
  564. struct ipa3_sys_context *sys;
  565. int ep_idx;
  566. int completed;
  567. struct ipa3_tag_completion *comp;
  568. for (i = 0; i < num_desc; i++)
  569. IPADBG("sending imm cmd %d\n", descr[i].opcode);
  570. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
  571. if (-1 == ep_idx) {
  572. IPAERR("Client %u is not mapped\n",
  573. IPA_CLIENT_APPS_CMD_PROD);
  574. return -EFAULT;
  575. }
  576. comp = kzalloc(sizeof(*comp), GFP_ATOMIC);
  577. if (!comp)
  578. return -ENOMEM;
  579. init_completion(&comp->comp);
  580. /* completion needs to be released from both here and in ack callback */
  581. atomic_set(&comp->cnt, 2);
  582. sys = ipa3_ctx->ep[ep_idx].sys;
  583. if (num_desc == 1) {
  584. if (descr->callback || descr->user1)
  585. WARN_ON(1);
  586. descr->callback = ipa3_transport_irq_cmd_ack_free;
  587. descr->user1 = comp;
  588. if (ipa3_send_one(sys, descr, true)) {
  589. IPAERR("fail to send immediate command\n");
  590. kfree(comp);
  591. result = -EFAULT;
  592. goto bail;
  593. }
  594. } else {
  595. desc = &descr[num_desc - 1];
  596. if (desc->callback || desc->user1)
  597. WARN_ON(1);
  598. desc->callback = ipa3_transport_irq_cmd_ack_free;
  599. desc->user1 = comp;
  600. if (ipa3_send(sys, num_desc, descr, true)) {
  601. IPAERR("fail to send multiple immediate command set\n");
  602. kfree(comp);
  603. result = -EFAULT;
  604. goto bail;
  605. }
  606. }
  607. completed = wait_for_completion_timeout(
  608. &comp->comp, msecs_to_jiffies(timeout));
  609. if (!completed)
  610. IPADBG("timeout waiting for imm-cmd ACK\n");
  611. if (atomic_dec_return(&comp->cnt) == 0)
  612. kfree(comp);
  613. bail:
  614. return result;
  615. }
  616. /**
  617. * ipa3_handle_rx_core() - The core functionality of packet reception. This
  618. * function is read from multiple code paths.
  619. *
  620. * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
  621. * endpoint. The function runs as long as there are packets in the pipe.
  622. * For each packet:
  623. * - Disconnect the packet from the system pipe linked list
  624. * - Unmap the packets skb, make it non DMAable
  625. * - Free the packet from the cache
  626. * - Prepare a proper skb
  627. * - Call the endpoints notify function, passing the skb in the parameters
  628. * - Replenish the rx cache
  629. */
  630. static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
  631. bool in_poll_state)
  632. {
  633. int ret;
  634. int cnt = 0;
  635. struct gsi_chan_xfer_notify notify = { 0 };
  636. while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
  637. !atomic_read(&sys->curr_polling_state))) {
  638. if (cnt && !process_all)
  639. break;
  640. ret = ipa_poll_gsi_pkt(sys, &notify);
  641. if (ret)
  642. break;
  643. if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
  644. ipa3_dma_memcpy_notify(sys);
  645. else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
  646. ipa3_wlan_wq_rx_common(sys, &notify);
  647. else
  648. ipa3_wq_rx_common(sys, &notify);
  649. ++cnt;
  650. }
  651. return cnt;
  652. }
  653. /**
  654. * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
  655. */
  656. static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
  657. {
  658. int ret;
  659. atomic_set(&sys->curr_polling_state, 0);
  660. ipa3_dec_release_wakelock();
  661. ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
  662. GSI_CHAN_MODE_CALLBACK);
  663. if ((ret != GSI_STATUS_SUCCESS) &&
  664. !atomic_read(&sys->curr_polling_state)) {
  665. if (ret == -GSI_STATUS_PENDING_IRQ) {
  666. ipa3_inc_acquire_wakelock();
  667. atomic_set(&sys->curr_polling_state, 1);
  668. } else {
  669. IPAERR("Failed to switch to intr mode.\n");
  670. }
  671. }
  672. return ret;
  673. }
  674. /**
  675. * ipa3_handle_rx() - handle packet reception. This function is executed in the
  676. * context of a work queue.
  677. * @work: work struct needed by the work queue
  678. *
  679. * ipa3_handle_rx_core() is run in polling mode. After all packets has been
  680. * received, the driver switches back to interrupt mode.
  681. */
  682. static void ipa3_handle_rx(struct ipa3_sys_context *sys)
  683. {
  684. int inactive_cycles;
  685. int cnt;
  686. int ret;
  687. ipa_pm_activate_sync(sys->pm_hdl);
  688. start_poll:
  689. inactive_cycles = 0;
  690. do {
  691. cnt = ipa3_handle_rx_core(sys, true, true);
  692. if (cnt == 0)
  693. inactive_cycles++;
  694. else
  695. inactive_cycles = 0;
  696. trace_idle_sleep_enter3(sys->ep->client);
  697. usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
  698. trace_idle_sleep_exit3(sys->ep->client);
  699. /*
  700. * if pipe is out of buffers there is no point polling for
  701. * completed descs; release the worker so delayed work can
  702. * run in a timely manner
  703. */
  704. if (sys->len == 0)
  705. break;
  706. } while (inactive_cycles <= POLLING_INACTIVITY_RX);
  707. trace_poll_to_intr3(sys->ep->client);
  708. ret = ipa3_rx_switch_to_intr_mode(sys);
  709. if (ret == -GSI_STATUS_PENDING_IRQ)
  710. goto start_poll;
  711. ipa_pm_deferred_deactivate(sys->pm_hdl);
  712. }
  713. static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
  714. {
  715. struct delayed_work *dwork;
  716. struct ipa3_sys_context *sys;
  717. dwork = container_of(work, struct delayed_work, work);
  718. sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
  719. if (sys->napi_obj) {
  720. /* interrupt mode is done in ipa3_rx_poll context */
  721. ipa_assert();
  722. } else
  723. ipa3_handle_rx(sys);
  724. }
  725. enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param)
  726. {
  727. struct ipa3_sys_context *sys = container_of(param,
  728. struct ipa3_sys_context, db_timer);
  729. queue_work(sys->wq, &sys->work);
  730. return HRTIMER_NORESTART;
  731. }
  732. static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
  733. {
  734. struct ipa3_sys_context *sys = (struct ipa3_sys_context *)p;
  735. switch (event) {
  736. case IPA_PM_CLIENT_ACTIVATED:
  737. /*
  738. * this event is ignored as the sync version of activation
  739. * will be used.
  740. */
  741. break;
  742. case IPA_PM_REQUEST_WAKEUP:
  743. /*
  744. * pipe will be unsuspended as part of
  745. * enabling IPA clocks
  746. */
  747. IPADBG("calling wakeup for client %d\n", sys->ep->client);
  748. if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
  749. IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_WAN");
  750. usleep_range(SUSPEND_MIN_SLEEP_RX,
  751. SUSPEND_MAX_SLEEP_RX);
  752. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_WAN");
  753. } else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
  754. IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LAN");
  755. usleep_range(SUSPEND_MIN_SLEEP_RX,
  756. SUSPEND_MAX_SLEEP_RX);
  757. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LAN");
  758. } else if (sys->ep->client == IPA_CLIENT_ODL_DPL_CONS) {
  759. IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_ODL");
  760. usleep_range(SUSPEND_MIN_SLEEP_RX,
  761. SUSPEND_MAX_SLEEP_RX);
  762. IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_ODL");
  763. } else
  764. IPAERR("Unexpected event %d\n for client %d\n",
  765. event, sys->ep->client);
  766. break;
  767. default:
  768. IPAERR("Unexpected event %d\n for client %d\n",
  769. event, sys->ep->client);
  770. WARN_ON(1);
  771. return;
  772. }
  773. }
  774. /**
  775. * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
  776. * IPA EP configuration
  777. * @sys_in: [in] input needed to setup the pipe and configure EP
  778. * @clnt_hdl: [out] client handle
  779. *
  780. * - configure the end-point registers with the supplied
  781. * parameters from the user.
  782. * - Creates a GPI connection with IPA.
  783. * - allocate descriptor FIFO
  784. *
  785. * Returns: 0 on success, negative on failure
  786. */
  787. int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
  788. {
  789. struct ipa3_ep_context *ep;
  790. int i, ipa_ep_idx, wan_handle;
  791. int result = -EINVAL;
  792. struct ipahal_reg_coal_qmap_cfg qmap_cfg;
  793. struct ipahal_reg_coal_evict_lru evict_lru;
  794. char buff[IPA_RESOURCE_NAME_MAX];
  795. struct ipa_ep_cfg ep_cfg_copy;
  796. if (sys_in == NULL || clnt_hdl == NULL) {
  797. IPAERR("NULL args\n");
  798. goto fail_gen;
  799. }
  800. if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
  801. IPAERR("bad parm client:%d fifo_sz:%d\n",
  802. sys_in->client, sys_in->desc_fifo_sz);
  803. goto fail_gen;
  804. }
  805. ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
  806. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  807. IPAERR("Invalid client.\n");
  808. goto fail_gen;
  809. }
  810. ep = &ipa3_ctx->ep[ipa_ep_idx];
  811. if (ep->valid == 1) {
  812. IPAERR("EP %d already allocated.\n", ipa_ep_idx);
  813. goto fail_gen;
  814. }
  815. /* save the input config parameters */
  816. if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  817. ep_cfg_copy = sys_in->ipa_ep_cfg;
  818. IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
  819. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  820. if (!ep->sys) {
  821. struct ipa_pm_register_params pm_reg;
  822. memset(&pm_reg, 0, sizeof(pm_reg));
  823. ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
  824. if (!ep->sys) {
  825. IPAERR("failed to sys ctx for client %d\n",
  826. sys_in->client);
  827. result = -ENOMEM;
  828. goto fail_and_disable_clocks;
  829. }
  830. ep->sys->ep = ep;
  831. snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
  832. sys_in->client);
  833. ep->sys->wq = alloc_workqueue(buff,
  834. WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
  835. if (!ep->sys->wq) {
  836. IPAERR("failed to create wq for client %d\n",
  837. sys_in->client);
  838. result = -EFAULT;
  839. goto fail_wq;
  840. }
  841. snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
  842. sys_in->client);
  843. ep->sys->repl_wq = alloc_workqueue(buff,
  844. WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
  845. if (!ep->sys->repl_wq) {
  846. IPAERR("failed to create rep wq for client %d\n",
  847. sys_in->client);
  848. result = -EFAULT;
  849. goto fail_wq2;
  850. }
  851. INIT_LIST_HEAD(&ep->sys->head_desc_list);
  852. INIT_LIST_HEAD(&ep->sys->rcycl_list);
  853. spin_lock_init(&ep->sys->spinlock);
  854. hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
  855. HRTIMER_MODE_REL);
  856. ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
  857. /* create IPA PM resources for handling polling mode */
  858. if (IPA_CLIENT_IS_CONS(sys_in->client)) {
  859. pm_reg.name = ipa_clients_strings[sys_in->client];
  860. pm_reg.callback = ipa_pm_sys_pipe_cb;
  861. pm_reg.user_data = ep->sys;
  862. pm_reg.group = IPA_PM_GROUP_APPS;
  863. result = ipa_pm_register(&pm_reg, &ep->sys->pm_hdl);
  864. if (result) {
  865. IPAERR("failed to create IPA PM client %d\n",
  866. result);
  867. goto fail_pm;
  868. }
  869. if (IPA_CLIENT_IS_APPS_CONS(sys_in->client)) {
  870. result = ipa_pm_associate_ipa_cons_to_client(
  871. ep->sys->pm_hdl, sys_in->client);
  872. if (result) {
  873. IPAERR("failed to associate\n");
  874. goto fail_gen2;
  875. }
  876. }
  877. result = ipa_pm_set_throughput(ep->sys->pm_hdl,
  878. IPA_APPS_BW_FOR_PM);
  879. if (result) {
  880. IPAERR("failed to set profile IPA PM client\n");
  881. goto fail_gen2;
  882. }
  883. }
  884. } else {
  885. memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
  886. }
  887. atomic_set(&ep->sys->xmit_eot_cnt, 0);
  888. tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
  889. (unsigned long) ep->sys);
  890. ep->skip_ep_cfg = sys_in->skip_ep_cfg;
  891. if (ipa3_assign_policy(sys_in, ep->sys)) {
  892. IPAERR("failed to sys ctx for client %d\n", sys_in->client);
  893. result = -ENOMEM;
  894. goto fail_gen2;
  895. }
  896. ep->valid = 1;
  897. ep->client = sys_in->client;
  898. ep->client_notify = sys_in->notify;
  899. ep->sys->napi_obj = sys_in->napi_obj;
  900. ep->priv = sys_in->priv;
  901. ep->keep_ipa_awake = sys_in->keep_ipa_awake;
  902. atomic_set(&ep->avail_fifo_desc,
  903. ((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1));
  904. if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
  905. ep->sys->status_stat == NULL) {
  906. ep->sys->status_stat =
  907. kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL);
  908. if (!ep->sys->status_stat)
  909. goto fail_gen2;
  910. }
  911. if (!ep->skip_ep_cfg) {
  912. if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
  913. IPAERR("fail to configure EP.\n");
  914. goto fail_gen2;
  915. }
  916. if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
  917. IPAERR("fail to configure status of EP.\n");
  918. goto fail_gen2;
  919. }
  920. IPADBG("ep %d configuration successful\n", ipa_ep_idx);
  921. } else {
  922. IPADBG("skipping ep %d configuration\n", ipa_ep_idx);
  923. }
  924. result = ipa_gsi_setup_channel(sys_in, ep);
  925. if (result) {
  926. IPAERR("Failed to setup GSI channel\n");
  927. goto fail_gen2;
  928. }
  929. *clnt_hdl = ipa_ep_idx;
  930. if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
  931. ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
  932. if (!ep->sys->repl) {
  933. IPAERR("failed to alloc repl for client %d\n",
  934. sys_in->client);
  935. result = -ENOMEM;
  936. goto fail_gen2;
  937. }
  938. atomic_set(&ep->sys->repl->pending, 0);
  939. ep->sys->repl->capacity = ep->sys->rx_pool_sz + 1;
  940. ep->sys->repl->cache = kcalloc(ep->sys->repl->capacity,
  941. sizeof(void *), GFP_KERNEL);
  942. if (!ep->sys->repl->cache) {
  943. IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
  944. ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
  945. ep->sys->repl->capacity = 0;
  946. } else {
  947. atomic_set(&ep->sys->repl->head_idx, 0);
  948. atomic_set(&ep->sys->repl->tail_idx, 0);
  949. ipa3_wq_repl_rx(&ep->sys->repl_work);
  950. }
  951. }
  952. if (IPA_CLIENT_IS_CONS(sys_in->client)) {
  953. ipa3_replenish_rx_cache(ep->sys);
  954. for (i = 0; i < GSI_VEID_MAX; i++)
  955. INIT_LIST_HEAD(&ep->sys->pending_pkts[i]);
  956. }
  957. if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
  958. ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
  959. atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
  960. }
  961. ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
  962. if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
  963. if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
  964. sys_in->client == IPA_CLIENT_APPS_WAN_PROD)
  965. IPADBG("modem cfg emb pipe flt\n");
  966. else
  967. ipa3_install_dflt_flt_rules(ipa_ep_idx);
  968. }
  969. result = ipa3_enable_data_path(ipa_ep_idx);
  970. if (result) {
  971. IPAERR("enable data path failed res=%d ep=%d.\n", result,
  972. ipa_ep_idx);
  973. goto fail_repl;
  974. }
  975. result = gsi_start_channel(ep->gsi_chan_hdl);
  976. if (result != GSI_STATUS_SUCCESS) {
  977. IPAERR("gsi_start_channel failed res=%d ep=%d.\n", result,
  978. ipa_ep_idx);
  979. goto fail_gen3;
  980. }
  981. if (!ep->keep_ipa_awake)
  982. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  983. IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client,
  984. ipa_ep_idx, ep->sys);
  985. /* configure the registers and setup the default pipe */
  986. if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  987. evict_lru.coal_vp_lru_thrshld = 0;
  988. evict_lru.coal_eviction_en = true;
  989. ipahal_write_reg_fields(IPA_COAL_EVICT_LRU, &evict_lru);
  990. qmap_cfg.mux_id_byte_sel = IPA_QMAP_ID_BYTE;
  991. ipahal_write_reg_fields(IPA_COAL_QMAP_CFG, &qmap_cfg);
  992. sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
  993. sys_in->ipa_ep_cfg = ep_cfg_copy;
  994. result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
  995. if (result) {
  996. IPAERR("failed to setup default coalescing pipe\n");
  997. goto fail_repl;
  998. }
  999. }
  1000. return 0;
  1001. fail_gen3:
  1002. ipa3_disable_data_path(ipa_ep_idx);
  1003. fail_repl:
  1004. ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
  1005. ep->sys->repl->capacity = 0;
  1006. kfree(ep->sys->repl);
  1007. fail_gen2:
  1008. ipa_pm_deregister(ep->sys->pm_hdl);
  1009. fail_pm:
  1010. destroy_workqueue(ep->sys->repl_wq);
  1011. fail_wq2:
  1012. destroy_workqueue(ep->sys->wq);
  1013. fail_wq:
  1014. kfree(ep->sys);
  1015. memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
  1016. fail_and_disable_clocks:
  1017. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  1018. fail_gen:
  1019. return result;
  1020. }
  1021. /**
  1022. * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
  1023. * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
  1024. *
  1025. * Returns: 0 on success, negative on failure
  1026. */
  1027. int ipa3_teardown_sys_pipe(u32 clnt_hdl)
  1028. {
  1029. struct ipa3_ep_context *ep;
  1030. int empty;
  1031. int result;
  1032. int i;
  1033. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  1034. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  1035. IPAERR("bad parm.\n");
  1036. return -EINVAL;
  1037. }
  1038. ep = &ipa3_ctx->ep[clnt_hdl];
  1039. if (!ep->keep_ipa_awake)
  1040. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  1041. ipa3_disable_data_path(clnt_hdl);
  1042. if (IPA_CLIENT_IS_PROD(ep->client)) {
  1043. do {
  1044. spin_lock_bh(&ep->sys->spinlock);
  1045. empty = list_empty(&ep->sys->head_desc_list);
  1046. spin_unlock_bh(&ep->sys->spinlock);
  1047. if (!empty)
  1048. usleep_range(95, 105);
  1049. else
  1050. break;
  1051. } while (1);
  1052. }
  1053. /* channel stop might fail on timeout if IPA is busy */
  1054. for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
  1055. result = ipa3_stop_gsi_channel(clnt_hdl);
  1056. if (result == GSI_STATUS_SUCCESS)
  1057. break;
  1058. if (result != -GSI_STATUS_AGAIN &&
  1059. result != -GSI_STATUS_TIMED_OUT)
  1060. break;
  1061. }
  1062. if (result != GSI_STATUS_SUCCESS) {
  1063. IPAERR("GSI stop chan err: %d.\n", result);
  1064. ipa_assert();
  1065. return result;
  1066. }
  1067. if (ep->sys->napi_obj) {
  1068. do {
  1069. usleep_range(95, 105);
  1070. } while (atomic_read(&ep->sys->curr_polling_state));
  1071. }
  1072. if (IPA_CLIENT_IS_CONS(ep->client))
  1073. cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
  1074. flush_workqueue(ep->sys->wq);
  1075. /* tear down the default pipe before we reset the channel*/
  1076. if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  1077. i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
  1078. if (i == IPA_EP_NOT_ALLOCATED) {
  1079. IPAERR("failed to get idx");
  1080. return i;
  1081. }
  1082. result = ipa3_teardown_coal_def_pipe(i);
  1083. if (result) {
  1084. IPAERR("failed to teardown default coal pipe\n");
  1085. return result;
  1086. }
  1087. }
  1088. result = ipa3_reset_gsi_channel(clnt_hdl);
  1089. if (result != GSI_STATUS_SUCCESS) {
  1090. IPAERR("Failed to reset chan: %d.\n", result);
  1091. ipa_assert();
  1092. return result;
  1093. }
  1094. dma_free_coherent(ipa3_ctx->pdev,
  1095. ep->gsi_mem_info.chan_ring_len,
  1096. ep->gsi_mem_info.chan_ring_base_vaddr,
  1097. ep->gsi_mem_info.chan_ring_base_addr);
  1098. result = gsi_dealloc_channel(ep->gsi_chan_hdl);
  1099. if (result != GSI_STATUS_SUCCESS) {
  1100. IPAERR("Failed to dealloc chan: %d.\n", result);
  1101. ipa_assert();
  1102. return result;
  1103. }
  1104. /* free event ring only when it is present */
  1105. if (ep->sys->use_comm_evt_ring) {
  1106. ipa3_ctx->gsi_evt_comm_ring_rem +=
  1107. ep->gsi_mem_info.chan_ring_len;
  1108. } else if (ep->gsi_evt_ring_hdl != ~0) {
  1109. result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
  1110. if (WARN(result != GSI_STATUS_SUCCESS, "reset evt %d", result))
  1111. return result;
  1112. dma_free_coherent(ipa3_ctx->pdev,
  1113. ep->gsi_mem_info.evt_ring_len,
  1114. ep->gsi_mem_info.evt_ring_base_vaddr,
  1115. ep->gsi_mem_info.evt_ring_base_addr);
  1116. result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  1117. if (WARN(result != GSI_STATUS_SUCCESS, "deall evt %d", result))
  1118. return result;
  1119. }
  1120. if (ep->sys->repl_wq)
  1121. flush_workqueue(ep->sys->repl_wq);
  1122. if (IPA_CLIENT_IS_CONS(ep->client))
  1123. ipa3_cleanup_rx(ep->sys);
  1124. if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
  1125. if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
  1126. ep->client == IPA_CLIENT_APPS_WAN_PROD)
  1127. IPADBG("modem cfg emb pipe flt\n");
  1128. else
  1129. ipa3_delete_dflt_flt_rules(clnt_hdl);
  1130. }
  1131. if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
  1132. atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt);
  1133. memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats));
  1134. if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt))
  1135. ipa3_cleanup_wlan_rx_common_cache();
  1136. ep->valid = 0;
  1137. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  1138. IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
  1139. return 0;
  1140. }
  1141. /**
  1142. * ipa3_teardown_coal_def_pipe() - Teardown the APPS_WAN_COAL_CONS
  1143. * default GPI pipe and cleanup IPA EP
  1144. * called after the coalesced pipe is destroyed.
  1145. * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
  1146. *
  1147. * Returns: 0 on success, negative on failure
  1148. */
  1149. static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl)
  1150. {
  1151. struct ipa3_ep_context *ep;
  1152. int result;
  1153. int i;
  1154. ep = &ipa3_ctx->ep[clnt_hdl];
  1155. ipa3_disable_data_path(clnt_hdl);
  1156. /* channel stop might fail on timeout if IPA is busy */
  1157. for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
  1158. result = ipa3_stop_gsi_channel(clnt_hdl);
  1159. if (result == GSI_STATUS_SUCCESS)
  1160. break;
  1161. if (result != -GSI_STATUS_AGAIN &&
  1162. result != -GSI_STATUS_TIMED_OUT)
  1163. break;
  1164. }
  1165. if (result != GSI_STATUS_SUCCESS) {
  1166. IPAERR("GSI stop chan err: %d.\n", result);
  1167. ipa_assert();
  1168. return result;
  1169. }
  1170. result = ipa3_reset_gsi_channel(clnt_hdl);
  1171. if (result != GSI_STATUS_SUCCESS) {
  1172. IPAERR("Failed to reset chan: %d.\n", result);
  1173. ipa_assert();
  1174. return result;
  1175. }
  1176. dma_free_coherent(ipa3_ctx->pdev,
  1177. ep->gsi_mem_info.chan_ring_len,
  1178. ep->gsi_mem_info.chan_ring_base_vaddr,
  1179. ep->gsi_mem_info.chan_ring_base_addr);
  1180. result = gsi_dealloc_channel(ep->gsi_chan_hdl);
  1181. if (result != GSI_STATUS_SUCCESS) {
  1182. IPAERR("Failed to dealloc chan: %d.\n", result);
  1183. ipa_assert();
  1184. return result;
  1185. }
  1186. if (IPA_CLIENT_IS_CONS(ep->client))
  1187. cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
  1188. flush_workqueue(ep->sys->wq);
  1189. if (ep->sys->repl_wq)
  1190. flush_workqueue(ep->sys->repl_wq);
  1191. if (IPA_CLIENT_IS_CONS(ep->client))
  1192. ipa3_cleanup_rx(ep->sys);
  1193. ep->valid = 0;
  1194. IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
  1195. return 0;
  1196. }
  1197. /**
  1198. * ipa3_tx_comp_usr_notify_release() - Callback function which will call the
  1199. * user supplied callback function to release the skb, or release it on
  1200. * its own if no callback function was supplied.
  1201. * @user1
  1202. * @user2
  1203. *
  1204. * This notified callback is for the destination client.
  1205. */
  1206. static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
  1207. {
  1208. struct sk_buff *skb = (struct sk_buff *)user1;
  1209. int ep_idx = user2;
  1210. IPADBG_LOW("skb=%pK ep=%d\n", skb, ep_idx);
  1211. IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
  1212. if (ipa3_ctx->ep[ep_idx].client_notify)
  1213. ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
  1214. IPA_WRITE_DONE, (unsigned long)skb);
  1215. else
  1216. dev_kfree_skb_any(skb);
  1217. }
  1218. void ipa3_tx_cmd_comp(void *user1, int user2)
  1219. {
  1220. ipahal_destroy_imm_cmd(user1);
  1221. }
  1222. /**
  1223. * ipa3_tx_dp() - Data-path tx handler
  1224. * @dst: [in] which IPA destination to route tx packets to
  1225. * @skb: [in] the packet to send
  1226. * @metadata: [in] TX packet meta-data
  1227. *
  1228. * Data-path tx handler, this is used for both SW data-path which by-passes most
  1229. * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
  1230. * dst is a "valid" CONS type, then SW data-path is used. If dst is the
  1231. * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
  1232. * is an error. For errors, client needs to free the skb as needed. For success,
  1233. * IPA driver will later invoke client callback if one was supplied. That
  1234. * callback should free the skb. If no callback supplied, IPA driver will free
  1235. * the skb internally
  1236. *
  1237. * The function will use two descriptors for this send command
  1238. * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
  1239. * the first descriptor will be used to inform the IPA hardware that
  1240. * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
  1241. * Once this send was done from transport point-of-view the IPA driver will
  1242. * get notified by the supplied callback.
  1243. *
  1244. * Returns: 0 on success, negative on failure
  1245. */
  1246. int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
  1247. struct ipa_tx_meta *meta)
  1248. {
  1249. struct ipa3_desc *desc;
  1250. struct ipa3_desc _desc[3];
  1251. int dst_ep_idx;
  1252. struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
  1253. struct ipa3_sys_context *sys;
  1254. int src_ep_idx;
  1255. int num_frags, f;
  1256. const struct ipa_gsi_ep_config *gsi_ep;
  1257. int data_idx;
  1258. unsigned int max_desc;
  1259. if (unlikely(!ipa3_ctx)) {
  1260. IPAERR("IPA3 driver was not initialized\n");
  1261. return -EINVAL;
  1262. }
  1263. if (skb->len == 0) {
  1264. IPAERR("packet size is 0\n");
  1265. return -EINVAL;
  1266. }
  1267. /*
  1268. * USB_CONS: PKT_INIT ep_idx = dst pipe
  1269. * Q6_CONS: PKT_INIT ep_idx = sender pipe
  1270. * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
  1271. *
  1272. * LAN TX: all PKT_INIT
  1273. * WAN TX: PKT_INIT (cmd) + HW (data)
  1274. *
  1275. */
  1276. if (IPA_CLIENT_IS_CONS(dst)) {
  1277. src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
  1278. if (-1 == src_ep_idx) {
  1279. IPAERR("Client %u is not mapped\n",
  1280. IPA_CLIENT_APPS_LAN_PROD);
  1281. goto fail_gen;
  1282. }
  1283. dst_ep_idx = ipa3_get_ep_mapping(dst);
  1284. } else {
  1285. src_ep_idx = ipa3_get_ep_mapping(dst);
  1286. if (-1 == src_ep_idx) {
  1287. IPAERR("Client %u is not mapped\n", dst);
  1288. goto fail_gen;
  1289. }
  1290. if (meta && meta->pkt_init_dst_ep_valid)
  1291. dst_ep_idx = meta->pkt_init_dst_ep;
  1292. else
  1293. dst_ep_idx = -1;
  1294. }
  1295. sys = ipa3_ctx->ep[src_ep_idx].sys;
  1296. if (!sys || !sys->ep->valid) {
  1297. IPAERR_RL("pipe %d not valid\n", src_ep_idx);
  1298. goto fail_pipe_not_valid;
  1299. }
  1300. num_frags = skb_shinfo(skb)->nr_frags;
  1301. /*
  1302. * make sure TLV FIFO supports the needed frags.
  1303. * 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS.
  1304. * 1 descriptor needed for the linear portion of skb.
  1305. */
  1306. gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client);
  1307. if (unlikely(gsi_ep == NULL)) {
  1308. IPAERR("failed to get EP %d GSI info\n", src_ep_idx);
  1309. goto fail_gen;
  1310. }
  1311. max_desc = gsi_ep->ipa_if_tlv;
  1312. if (gsi_ep->prefetch_mode == GSI_SMART_PRE_FETCH ||
  1313. gsi_ep->prefetch_mode == GSI_FREE_PRE_FETCH)
  1314. max_desc -= gsi_ep->prefetch_threshold;
  1315. if (num_frags + 3 > max_desc) {
  1316. if (skb_linearize(skb)) {
  1317. IPAERR("Failed to linear skb with %d frags\n",
  1318. num_frags);
  1319. goto fail_gen;
  1320. }
  1321. num_frags = 0;
  1322. }
  1323. if (num_frags) {
  1324. /* 1 desc for tag to resolve status out-of-order issue;
  1325. * 1 desc is needed for the linear portion of skb;
  1326. * 1 desc may be needed for the PACKET_INIT;
  1327. * 1 desc for each frag
  1328. */
  1329. desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
  1330. if (!desc) {
  1331. IPAERR("failed to alloc desc array\n");
  1332. goto fail_gen;
  1333. }
  1334. } else {
  1335. memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
  1336. desc = &_desc[0];
  1337. }
  1338. if (dst_ep_idx != -1) {
  1339. int skb_idx;
  1340. /* SW data path */
  1341. data_idx = 0;
  1342. if (sys->policy == IPA_POLICY_NOINTR_MODE) {
  1343. /*
  1344. * For non-interrupt mode channel (where there is no
  1345. * event ring) TAG STATUS are used for completion
  1346. * notification. IPA will generate a status packet with
  1347. * tag info as a result of the TAG STATUS command.
  1348. */
  1349. desc[data_idx].is_tag_status = true;
  1350. data_idx++;
  1351. }
  1352. desc[data_idx].opcode = ipa3_ctx->pkt_init_imm_opcode;
  1353. desc[data_idx].dma_address_valid = true;
  1354. desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
  1355. desc[data_idx].type = IPA_IMM_CMD_DESC;
  1356. desc[data_idx].callback = NULL;
  1357. data_idx++;
  1358. desc[data_idx].pyld = skb->data;
  1359. desc[data_idx].len = skb_headlen(skb);
  1360. desc[data_idx].type = IPA_DATA_DESC_SKB;
  1361. desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
  1362. desc[data_idx].user1 = skb;
  1363. desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid &&
  1364. meta->pkt_init_dst_ep_remote) ?
  1365. src_ep_idx :
  1366. dst_ep_idx;
  1367. if (meta && meta->dma_address_valid) {
  1368. desc[data_idx].dma_address_valid = true;
  1369. desc[data_idx].dma_address = meta->dma_address;
  1370. }
  1371. skb_idx = data_idx;
  1372. data_idx++;
  1373. for (f = 0; f < num_frags; f++) {
  1374. desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f];
  1375. desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED;
  1376. desc[data_idx + f].len =
  1377. skb_frag_size(desc[data_idx + f].frag);
  1378. }
  1379. /* don't free skb till frag mappings are released */
  1380. if (num_frags) {
  1381. desc[data_idx + f - 1].callback =
  1382. desc[skb_idx].callback;
  1383. desc[data_idx + f - 1].user1 = desc[skb_idx].user1;
  1384. desc[data_idx + f - 1].user2 = desc[skb_idx].user2;
  1385. desc[skb_idx].callback = NULL;
  1386. }
  1387. if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
  1388. IPAERR_RL("fail to send skb %pK num_frags %u SWP\n",
  1389. skb, num_frags);
  1390. goto fail_send;
  1391. }
  1392. IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
  1393. } else {
  1394. /* HW data path */
  1395. data_idx = 0;
  1396. if (sys->policy == IPA_POLICY_NOINTR_MODE) {
  1397. /*
  1398. * For non-interrupt mode channel (where there is no
  1399. * event ring) TAG STATUS are used for completion
  1400. * notification. IPA will generate a status packet with
  1401. * tag info as a result of the TAG STATUS command.
  1402. */
  1403. desc[data_idx].is_tag_status = true;
  1404. data_idx++;
  1405. }
  1406. desc[data_idx].pyld = skb->data;
  1407. desc[data_idx].len = skb_headlen(skb);
  1408. desc[data_idx].type = IPA_DATA_DESC_SKB;
  1409. desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
  1410. desc[data_idx].user1 = skb;
  1411. desc[data_idx].user2 = src_ep_idx;
  1412. if (meta && meta->dma_address_valid) {
  1413. desc[data_idx].dma_address_valid = true;
  1414. desc[data_idx].dma_address = meta->dma_address;
  1415. }
  1416. if (num_frags == 0) {
  1417. if (ipa3_send(sys, data_idx + 1, desc, true)) {
  1418. IPAERR("fail to send skb %pK HWP\n", skb);
  1419. goto fail_mem;
  1420. }
  1421. } else {
  1422. for (f = 0; f < num_frags; f++) {
  1423. desc[data_idx+f+1].frag =
  1424. &skb_shinfo(skb)->frags[f];
  1425. desc[data_idx+f+1].type =
  1426. IPA_DATA_DESC_SKB_PAGED;
  1427. desc[data_idx+f+1].len =
  1428. skb_frag_size(desc[data_idx+f+1].frag);
  1429. }
  1430. /* don't free skb till frag mappings are released */
  1431. desc[data_idx+f].callback = desc[data_idx].callback;
  1432. desc[data_idx+f].user1 = desc[data_idx].user1;
  1433. desc[data_idx+f].user2 = desc[data_idx].user2;
  1434. desc[data_idx].callback = NULL;
  1435. if (ipa3_send(sys, num_frags + data_idx + 1,
  1436. desc, true)) {
  1437. IPAERR("fail to send skb %pK num_frags %u\n",
  1438. skb, num_frags);
  1439. goto fail_mem;
  1440. }
  1441. }
  1442. IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
  1443. }
  1444. if (num_frags) {
  1445. kfree(desc);
  1446. IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
  1447. }
  1448. return 0;
  1449. fail_send:
  1450. ipahal_destroy_imm_cmd(cmd_pyld);
  1451. fail_mem:
  1452. if (num_frags)
  1453. kfree(desc);
  1454. fail_gen:
  1455. return -EFAULT;
  1456. fail_pipe_not_valid:
  1457. return -EPIPE;
  1458. }
  1459. static void ipa3_wq_handle_rx(struct work_struct *work)
  1460. {
  1461. struct ipa3_sys_context *sys;
  1462. sys = container_of(work, struct ipa3_sys_context, work);
  1463. if (sys->napi_obj) {
  1464. ipa_pm_activate_sync(sys->pm_hdl);
  1465. napi_schedule(sys->napi_obj);
  1466. } else
  1467. ipa3_handle_rx(sys);
  1468. }
  1469. static void ipa3_wq_repl_rx(struct work_struct *work)
  1470. {
  1471. struct ipa3_sys_context *sys;
  1472. void *ptr;
  1473. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1474. gfp_t flag = GFP_KERNEL;
  1475. u32 next;
  1476. u32 curr;
  1477. sys = container_of(work, struct ipa3_sys_context, repl_work);
  1478. atomic_set(&sys->repl->pending, 0);
  1479. curr = atomic_read(&sys->repl->tail_idx);
  1480. begin:
  1481. while (1) {
  1482. next = (curr + 1) % sys->repl->capacity;
  1483. if (next == atomic_read(&sys->repl->head_idx))
  1484. goto fail_kmem_cache_alloc;
  1485. rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
  1486. flag);
  1487. if (!rx_pkt)
  1488. goto fail_kmem_cache_alloc;
  1489. INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
  1490. rx_pkt->sys = sys;
  1491. rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
  1492. if (rx_pkt->data.skb == NULL)
  1493. goto fail_skb_alloc;
  1494. ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
  1495. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
  1496. sys->rx_buff_sz,
  1497. DMA_FROM_DEVICE);
  1498. if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
  1499. pr_err_ratelimited("%s dma map fail %pK for %pK sys=%pK\n",
  1500. __func__, (void *)rx_pkt->data.dma_addr,
  1501. ptr, sys);
  1502. goto fail_dma_mapping;
  1503. }
  1504. sys->repl->cache[curr] = rx_pkt;
  1505. curr = next;
  1506. /* ensure write is done before setting tail index */
  1507. mb();
  1508. atomic_set(&sys->repl->tail_idx, next);
  1509. }
  1510. return;
  1511. fail_dma_mapping:
  1512. sys->free_skb(rx_pkt->data.skb);
  1513. fail_skb_alloc:
  1514. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1515. fail_kmem_cache_alloc:
  1516. if (atomic_read(&sys->repl->tail_idx) ==
  1517. atomic_read(&sys->repl->head_idx)) {
  1518. if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
  1519. sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  1520. IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
  1521. else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
  1522. IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
  1523. pr_err_ratelimited("%s sys=%pK repl ring empty\n",
  1524. __func__, sys);
  1525. goto begin;
  1526. }
  1527. }
  1528. static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
  1529. {
  1530. struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
  1531. struct ipa3_rx_pkt_wrapper *tmp;
  1532. int ret;
  1533. struct gsi_xfer_elem gsi_xfer_elem_one;
  1534. u32 rx_len_cached = 0;
  1535. IPADBG_LOW("\n");
  1536. spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1537. rx_len_cached = sys->len;
  1538. if (rx_len_cached < sys->rx_pool_sz) {
  1539. list_for_each_entry_safe(rx_pkt, tmp,
  1540. &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
  1541. list_del(&rx_pkt->link);
  1542. if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
  1543. ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
  1544. rx_pkt->len = 0;
  1545. rx_pkt->sys = sys;
  1546. memset(&gsi_xfer_elem_one, 0,
  1547. sizeof(gsi_xfer_elem_one));
  1548. gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
  1549. gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
  1550. gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
  1551. gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
  1552. gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
  1553. gsi_xfer_elem_one.xfer_user_data = rx_pkt;
  1554. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
  1555. &gsi_xfer_elem_one, true);
  1556. if (ret) {
  1557. IPAERR("failed to provide buffer: %d\n", ret);
  1558. goto fail_provide_rx_buffer;
  1559. }
  1560. rx_len_cached = ++sys->len;
  1561. if (rx_len_cached >= sys->rx_pool_sz) {
  1562. spin_unlock_bh(
  1563. &ipa3_ctx->wc_memb.wlan_spinlock);
  1564. return;
  1565. }
  1566. }
  1567. }
  1568. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1569. if (rx_len_cached < sys->rx_pool_sz &&
  1570. ipa3_ctx->wc_memb.wlan_comm_total_cnt <
  1571. IPA_WLAN_COMM_RX_POOL_HIGH) {
  1572. ipa3_replenish_rx_cache(sys);
  1573. ipa3_ctx->wc_memb.wlan_comm_total_cnt +=
  1574. (sys->rx_pool_sz - rx_len_cached);
  1575. }
  1576. return;
  1577. fail_provide_rx_buffer:
  1578. list_del(&rx_pkt->link);
  1579. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1580. }
  1581. static void ipa3_cleanup_wlan_rx_common_cache(void)
  1582. {
  1583. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1584. struct ipa3_rx_pkt_wrapper *tmp;
  1585. spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1586. list_for_each_entry_safe(rx_pkt, tmp,
  1587. &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
  1588. list_del(&rx_pkt->link);
  1589. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  1590. IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
  1591. dev_kfree_skb_any(rx_pkt->data.skb);
  1592. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1593. ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
  1594. ipa3_ctx->wc_memb.wlan_comm_total_cnt--;
  1595. }
  1596. ipa3_ctx->wc_memb.total_tx_pkts_freed = 0;
  1597. if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0)
  1598. IPAERR("wlan comm buff free cnt: %d\n",
  1599. ipa3_ctx->wc_memb.wlan_comm_free_cnt);
  1600. if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0)
  1601. IPAERR("wlan comm buff total cnt: %d\n",
  1602. ipa3_ctx->wc_memb.wlan_comm_total_cnt);
  1603. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1604. }
  1605. static void ipa3_alloc_wlan_rx_common_cache(u32 size)
  1606. {
  1607. void *ptr;
  1608. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1609. int rx_len_cached = 0;
  1610. gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
  1611. rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt;
  1612. while (rx_len_cached < size) {
  1613. rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
  1614. flag);
  1615. if (!rx_pkt)
  1616. goto fail_kmem_cache_alloc;
  1617. INIT_LIST_HEAD(&rx_pkt->link);
  1618. INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
  1619. rx_pkt->data.skb =
  1620. ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
  1621. flag);
  1622. if (rx_pkt->data.skb == NULL) {
  1623. IPAERR("failed to alloc skb\n");
  1624. goto fail_skb_alloc;
  1625. }
  1626. ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
  1627. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
  1628. IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
  1629. if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
  1630. IPAERR("dma_map_single failure %pK for %pK\n",
  1631. (void *)rx_pkt->data.dma_addr, ptr);
  1632. goto fail_dma_mapping;
  1633. }
  1634. spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1635. list_add_tail(&rx_pkt->link,
  1636. &ipa3_ctx->wc_memb.wlan_comm_desc_list);
  1637. rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
  1638. ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
  1639. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  1640. }
  1641. return;
  1642. fail_dma_mapping:
  1643. dev_kfree_skb_any(rx_pkt->data.skb);
  1644. fail_skb_alloc:
  1645. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1646. fail_kmem_cache_alloc:
  1647. return;
  1648. }
  1649. /**
  1650. * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
  1651. *
  1652. * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
  1653. * are IPA_RX_POOL_CEIL buffers in the cache.
  1654. * - Allocate a buffer in the cache
  1655. * - Initialized the packets link
  1656. * - Initialize the packets work struct
  1657. * - Allocate the packets socket buffer (skb)
  1658. * - Fill the packets skb with data
  1659. * - Make the packet DMAable
  1660. * - Add the packet to the system pipe linked list
  1661. */
  1662. static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
  1663. {
  1664. void *ptr;
  1665. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1666. int ret;
  1667. int idx = 0;
  1668. int rx_len_cached = 0;
  1669. struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
  1670. gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
  1671. rx_len_cached = sys->len;
  1672. /* start replenish only when buffers go lower than the threshold */
  1673. if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
  1674. return;
  1675. while (rx_len_cached < sys->rx_pool_sz) {
  1676. rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
  1677. flag);
  1678. if (!rx_pkt)
  1679. goto fail_kmem_cache_alloc;
  1680. INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
  1681. rx_pkt->sys = sys;
  1682. rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
  1683. if (rx_pkt->data.skb == NULL) {
  1684. IPAERR("failed to alloc skb\n");
  1685. goto fail_skb_alloc;
  1686. }
  1687. ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
  1688. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
  1689. sys->rx_buff_sz,
  1690. DMA_FROM_DEVICE);
  1691. if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
  1692. IPAERR("dma_map_single failure %pK for %pK\n",
  1693. (void *)rx_pkt->data.dma_addr, ptr);
  1694. goto fail_dma_mapping;
  1695. }
  1696. gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
  1697. gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
  1698. gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
  1699. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
  1700. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
  1701. gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
  1702. gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
  1703. idx++;
  1704. rx_len_cached++;
  1705. /*
  1706. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
  1707. * If this size is reached we need to queue the xfers.
  1708. */
  1709. if (idx == IPA_REPL_XFER_MAX) {
  1710. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1711. gsi_xfer_elem_array, false);
  1712. if (ret != GSI_STATUS_SUCCESS) {
  1713. /* we don't expect this will happen */
  1714. IPAERR("failed to provide buffer: %d\n", ret);
  1715. WARN_ON(1);
  1716. break;
  1717. }
  1718. idx = 0;
  1719. }
  1720. }
  1721. goto done;
  1722. fail_dma_mapping:
  1723. sys->free_skb(rx_pkt->data.skb);
  1724. fail_skb_alloc:
  1725. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1726. fail_kmem_cache_alloc:
  1727. if (rx_len_cached == 0)
  1728. queue_delayed_work(sys->wq, &sys->replenish_rx_work,
  1729. msecs_to_jiffies(1));
  1730. done:
  1731. /* only ring doorbell once here */
  1732. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1733. gsi_xfer_elem_array, true);
  1734. if (ret == GSI_STATUS_SUCCESS) {
  1735. sys->len = rx_len_cached;
  1736. } else {
  1737. /* we don't expect this will happen */
  1738. IPAERR("failed to provide buffer: %d\n", ret);
  1739. WARN_ON(1);
  1740. }
  1741. }
  1742. static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
  1743. {
  1744. void *ptr;
  1745. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1746. int ret;
  1747. int idx = 0;
  1748. int rx_len_cached = 0;
  1749. struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
  1750. gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
  1751. /* start replenish only when buffers go lower than the threshold */
  1752. if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
  1753. return;
  1754. rx_len_cached = sys->len;
  1755. while (rx_len_cached < sys->rx_pool_sz) {
  1756. if (list_empty(&sys->rcycl_list)) {
  1757. rx_pkt = kmem_cache_zalloc(
  1758. ipa3_ctx->rx_pkt_wrapper_cache, flag);
  1759. if (!rx_pkt)
  1760. goto fail_kmem_cache_alloc;
  1761. INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
  1762. rx_pkt->sys = sys;
  1763. rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
  1764. if (rx_pkt->data.skb == NULL) {
  1765. IPAERR("failed to alloc skb\n");
  1766. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
  1767. rx_pkt);
  1768. goto fail_kmem_cache_alloc;
  1769. }
  1770. ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
  1771. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
  1772. ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
  1773. if (dma_mapping_error(ipa3_ctx->pdev,
  1774. rx_pkt->data.dma_addr)) {
  1775. IPAERR("dma_map_single failure %pK for %pK\n",
  1776. (void *)rx_pkt->data.dma_addr, ptr);
  1777. goto fail_dma_mapping;
  1778. }
  1779. } else {
  1780. spin_lock_bh(&sys->spinlock);
  1781. rx_pkt = list_first_entry(&sys->rcycl_list,
  1782. struct ipa3_rx_pkt_wrapper, link);
  1783. list_del(&rx_pkt->link);
  1784. spin_unlock_bh(&sys->spinlock);
  1785. ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
  1786. rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
  1787. ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
  1788. if (dma_mapping_error(ipa3_ctx->pdev,
  1789. rx_pkt->data.dma_addr)) {
  1790. IPAERR("dma_map_single failure %pK for %pK\n",
  1791. (void *)rx_pkt->data.dma_addr, ptr);
  1792. goto fail_dma_mapping;
  1793. }
  1794. }
  1795. gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
  1796. gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
  1797. gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
  1798. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
  1799. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
  1800. gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
  1801. gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
  1802. idx++;
  1803. rx_len_cached++;
  1804. /*
  1805. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
  1806. * If this size is reached we need to queue the xfers.
  1807. */
  1808. if (idx == IPA_REPL_XFER_MAX) {
  1809. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1810. gsi_xfer_elem_array, false);
  1811. if (ret != GSI_STATUS_SUCCESS) {
  1812. /* we don't expect this will happen */
  1813. IPAERR("failed to provide buffer: %d\n", ret);
  1814. WARN_ON(1);
  1815. break;
  1816. }
  1817. idx = 0;
  1818. }
  1819. }
  1820. goto done;
  1821. fail_dma_mapping:
  1822. spin_lock_bh(&sys->spinlock);
  1823. list_add_tail(&rx_pkt->link, &sys->rcycl_list);
  1824. INIT_LIST_HEAD(&rx_pkt->link);
  1825. spin_unlock_bh(&sys->spinlock);
  1826. fail_kmem_cache_alloc:
  1827. if (rx_len_cached == 0)
  1828. queue_delayed_work(sys->wq, &sys->replenish_rx_work,
  1829. msecs_to_jiffies(1));
  1830. done:
  1831. /* only ring doorbell once here */
  1832. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1833. gsi_xfer_elem_array, true);
  1834. if (ret == GSI_STATUS_SUCCESS) {
  1835. sys->len = rx_len_cached;
  1836. } else {
  1837. /* we don't expect this will happen */
  1838. IPAERR("failed to provide buffer: %d\n", ret);
  1839. WARN_ON(1);
  1840. }
  1841. }
  1842. static inline void __trigger_repl_work(struct ipa3_sys_context *sys)
  1843. {
  1844. int tail, head, avail;
  1845. if (atomic_read(&sys->repl->pending))
  1846. return;
  1847. tail = atomic_read(&sys->repl->tail_idx);
  1848. head = atomic_read(&sys->repl->head_idx);
  1849. avail = (tail - head) % sys->repl->capacity;
  1850. if (avail < sys->repl->capacity / 4) {
  1851. atomic_set(&sys->repl->pending, 1);
  1852. queue_work(sys->repl_wq, &sys->repl_work);
  1853. }
  1854. }
  1855. static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
  1856. {
  1857. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1858. int ret;
  1859. int rx_len_cached = 0;
  1860. struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
  1861. u32 curr;
  1862. int idx = 0;
  1863. /* start replenish only when buffers go lower than the threshold */
  1864. if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
  1865. return;
  1866. spin_lock_bh(&sys->spinlock);
  1867. rx_len_cached = sys->len;
  1868. curr = atomic_read(&sys->repl->head_idx);
  1869. while (rx_len_cached < sys->rx_pool_sz) {
  1870. if (curr == atomic_read(&sys->repl->tail_idx))
  1871. break;
  1872. rx_pkt = sys->repl->cache[curr];
  1873. gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
  1874. gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
  1875. gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
  1876. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
  1877. gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
  1878. gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
  1879. gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
  1880. rx_len_cached++;
  1881. curr = (++curr == sys->repl->capacity) ? 0 : curr;
  1882. idx++;
  1883. /*
  1884. * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
  1885. * If this size is reached we need to queue the xfers.
  1886. */
  1887. if (idx == IPA_REPL_XFER_MAX) {
  1888. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1889. gsi_xfer_elem_array, false);
  1890. if (ret != GSI_STATUS_SUCCESS) {
  1891. /* we don't expect this will happen */
  1892. IPAERR("failed to provide buffer: %d\n", ret);
  1893. WARN_ON(1);
  1894. break;
  1895. }
  1896. idx = 0;
  1897. }
  1898. }
  1899. /* only ring doorbell once here */
  1900. ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
  1901. gsi_xfer_elem_array, true);
  1902. if (ret == GSI_STATUS_SUCCESS) {
  1903. /* ensure write is done before setting head index */
  1904. mb();
  1905. atomic_set(&sys->repl->head_idx, curr);
  1906. sys->len = rx_len_cached;
  1907. } else {
  1908. /* we don't expect this will happen */
  1909. IPAERR("failed to provide buffer: %d\n", ret);
  1910. WARN_ON(1);
  1911. }
  1912. spin_unlock_bh(&sys->spinlock);
  1913. __trigger_repl_work(sys);
  1914. if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
  1915. if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
  1916. sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  1917. IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
  1918. else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
  1919. IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
  1920. else
  1921. WARN_ON(1);
  1922. queue_delayed_work(sys->wq, &sys->replenish_rx_work,
  1923. msecs_to_jiffies(1));
  1924. }
  1925. }
  1926. static void ipa3_replenish_rx_work_func(struct work_struct *work)
  1927. {
  1928. struct delayed_work *dwork;
  1929. struct ipa3_sys_context *sys;
  1930. dwork = container_of(work, struct delayed_work, work);
  1931. sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
  1932. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  1933. sys->repl_hdlr(sys);
  1934. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  1935. }
  1936. /**
  1937. * free_rx_pkt() - function to free the skb and rx_pkt_wrapper
  1938. *
  1939. * @chan_user_data: ipa_sys_context used for skb size and skb_free func
  1940. * @xfer_uder_data: rx_pkt wrapper to be freed
  1941. *
  1942. */
  1943. static void free_rx_pkt(void *chan_user_data, void *xfer_user_data)
  1944. {
  1945. struct ipa3_rx_pkt_wrapper *rx_pkt = (struct ipa3_rx_pkt_wrapper *)
  1946. xfer_user_data;
  1947. struct ipa3_sys_context *sys = (struct ipa3_sys_context *)
  1948. chan_user_data;
  1949. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  1950. sys->rx_buff_sz, DMA_FROM_DEVICE);
  1951. sys->free_skb(rx_pkt->data.skb);
  1952. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1953. }
  1954. /**
  1955. * ipa3_cleanup_rx() - release RX queue resources
  1956. *
  1957. */
  1958. static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
  1959. {
  1960. struct ipa3_rx_pkt_wrapper *rx_pkt;
  1961. struct ipa3_rx_pkt_wrapper *r;
  1962. u32 head;
  1963. u32 tail;
  1964. /*
  1965. * buffers not consumed by gsi are cleaned up using cleanup callback
  1966. * provided to gsi
  1967. */
  1968. list_for_each_entry_safe(rx_pkt, r,
  1969. &sys->rcycl_list, link) {
  1970. list_del(&rx_pkt->link);
  1971. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  1972. sys->rx_buff_sz, DMA_FROM_DEVICE);
  1973. sys->free_skb(rx_pkt->data.skb);
  1974. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1975. }
  1976. if (sys->repl) {
  1977. head = atomic_read(&sys->repl->head_idx);
  1978. tail = atomic_read(&sys->repl->tail_idx);
  1979. while (head != tail) {
  1980. rx_pkt = sys->repl->cache[head];
  1981. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  1982. sys->rx_buff_sz, DMA_FROM_DEVICE);
  1983. sys->free_skb(rx_pkt->data.skb);
  1984. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1985. head = (head + 1) % sys->repl->capacity;
  1986. }
  1987. kfree(sys->repl->cache);
  1988. kfree(sys->repl);
  1989. }
  1990. }
  1991. static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
  1992. {
  1993. struct sk_buff *skb2 = NULL;
  1994. skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
  1995. if (likely(skb2)) {
  1996. /* Set the data pointer */
  1997. skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
  1998. memcpy(skb2->data, skb->data, len);
  1999. skb2->len = len;
  2000. skb_set_tail_pointer(skb2, len);
  2001. }
  2002. return skb2;
  2003. }
  2004. static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
  2005. struct ipa3_sys_context *sys)
  2006. {
  2007. int rc = 0;
  2008. struct ipahal_pkt_status status;
  2009. u32 pkt_status_sz;
  2010. struct sk_buff *skb2;
  2011. int pad_len_byte;
  2012. int len;
  2013. unsigned char *buf;
  2014. int src_pipe;
  2015. unsigned int used = *(unsigned int *)skb->cb;
  2016. unsigned int used_align = ALIGN(used, 32);
  2017. unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
  2018. struct ipa3_tx_pkt_wrapper *tx_pkt = NULL;
  2019. unsigned long ptr;
  2020. IPA_DUMP_BUFF(skb->data, 0, skb->len);
  2021. if (skb->len == 0) {
  2022. IPAERR("ZLT packet arrived to AP\n");
  2023. goto out;
  2024. }
  2025. if (sys->len_partial) {
  2026. IPADBG_LOW("len_partial %d\n", sys->len_partial);
  2027. buf = skb_push(skb, sys->len_partial);
  2028. memcpy(buf, sys->prev_skb->data, sys->len_partial);
  2029. sys->len_partial = 0;
  2030. sys->free_skb(sys->prev_skb);
  2031. sys->prev_skb = NULL;
  2032. goto begin;
  2033. }
  2034. /* this pipe has TX comp (status only) + mux-ed LAN RX data
  2035. * (status+data)
  2036. */
  2037. if (sys->len_rem) {
  2038. IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
  2039. sys->len_pad);
  2040. if (sys->len_rem <= skb->len) {
  2041. if (sys->prev_skb) {
  2042. skb2 = skb_copy_expand(sys->prev_skb, 0,
  2043. sys->len_rem, GFP_KERNEL);
  2044. if (likely(skb2)) {
  2045. memcpy(skb_put(skb2, sys->len_rem),
  2046. skb->data, sys->len_rem);
  2047. skb_trim(skb2,
  2048. skb2->len - sys->len_pad);
  2049. skb2->truesize = skb2->len +
  2050. sizeof(struct sk_buff);
  2051. if (sys->drop_packet)
  2052. dev_kfree_skb_any(skb2);
  2053. else
  2054. sys->ep->client_notify(
  2055. sys->ep->priv,
  2056. IPA_RECEIVE,
  2057. (unsigned long)(skb2));
  2058. } else {
  2059. IPAERR("copy expand failed\n");
  2060. }
  2061. dev_kfree_skb_any(sys->prev_skb);
  2062. }
  2063. skb_pull(skb, sys->len_rem);
  2064. sys->prev_skb = NULL;
  2065. sys->len_rem = 0;
  2066. sys->len_pad = 0;
  2067. } else {
  2068. if (sys->prev_skb) {
  2069. skb2 = skb_copy_expand(sys->prev_skb, 0,
  2070. skb->len, GFP_KERNEL);
  2071. if (likely(skb2)) {
  2072. memcpy(skb_put(skb2, skb->len),
  2073. skb->data, skb->len);
  2074. } else {
  2075. IPAERR("copy expand failed\n");
  2076. }
  2077. dev_kfree_skb_any(sys->prev_skb);
  2078. sys->prev_skb = skb2;
  2079. }
  2080. sys->len_rem -= skb->len;
  2081. goto out;
  2082. }
  2083. }
  2084. begin:
  2085. pkt_status_sz = ipahal_pkt_status_get_size();
  2086. while (skb->len) {
  2087. sys->drop_packet = false;
  2088. IPADBG_LOW("LEN_REM %d\n", skb->len);
  2089. if (skb->len < pkt_status_sz) {
  2090. WARN_ON(sys->prev_skb != NULL);
  2091. IPADBG_LOW("status straddles buffer\n");
  2092. sys->prev_skb = skb_copy(skb, GFP_KERNEL);
  2093. sys->len_partial = skb->len;
  2094. goto out;
  2095. }
  2096. ipahal_pkt_status_parse(skb->data, &status);
  2097. IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
  2098. status.status_opcode, status.endp_src_idx,
  2099. status.endp_dest_idx, status.pkt_len);
  2100. if (sys->status_stat) {
  2101. sys->status_stat->status[sys->status_stat->curr] =
  2102. status;
  2103. sys->status_stat->curr++;
  2104. if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
  2105. sys->status_stat->curr = 0;
  2106. }
  2107. if ((status.status_opcode !=
  2108. IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
  2109. (status.status_opcode !=
  2110. IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
  2111. (status.status_opcode !=
  2112. IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) &&
  2113. (status.status_opcode !=
  2114. IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
  2115. IPAERR("unsupported opcode(%d)\n",
  2116. status.status_opcode);
  2117. skb_pull(skb, pkt_status_sz);
  2118. continue;
  2119. }
  2120. IPA_STATS_EXCP_CNT(status.exception,
  2121. ipa3_ctx->stats.rx_excp_pkts);
  2122. if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
  2123. status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
  2124. IPAERR("status fields invalid\n");
  2125. IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
  2126. status.status_opcode, status.endp_src_idx,
  2127. status.endp_dest_idx, status.pkt_len);
  2128. WARN_ON(1);
  2129. /* HW gave an unexpected status */
  2130. ipa_assert();
  2131. }
  2132. if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
  2133. IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) {
  2134. struct ipa3_tag_completion *comp;
  2135. IPADBG_LOW("TAG packet arrived\n");
  2136. if (status.tag_info == IPA_COOKIE) {
  2137. skb_pull(skb, pkt_status_sz);
  2138. if (skb->len < sizeof(comp)) {
  2139. IPAERR("TAG arrived without packet\n");
  2140. goto out;
  2141. }
  2142. memcpy(&comp, skb->data, sizeof(comp));
  2143. skb_pull(skb, sizeof(comp));
  2144. complete(&comp->comp);
  2145. if (atomic_dec_return(&comp->cnt) == 0)
  2146. kfree(comp);
  2147. continue;
  2148. } else {
  2149. ptr = tag_to_pointer_wa(status.tag_info);
  2150. tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
  2151. IPADBG_LOW("tx_pkt recv = %pK\n", tx_pkt);
  2152. }
  2153. }
  2154. if (status.pkt_len == 0) {
  2155. IPADBG_LOW("Skip aggr close status\n");
  2156. skb_pull(skb, pkt_status_sz);
  2157. IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
  2158. IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
  2159. [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
  2160. continue;
  2161. }
  2162. if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) {
  2163. /* RX data */
  2164. src_pipe = status.endp_src_idx;
  2165. /*
  2166. * A packet which is received back to the AP after
  2167. * there was no route match.
  2168. */
  2169. if (status.exception ==
  2170. IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
  2171. ipahal_is_rule_miss_id(status.rt_rule_id))
  2172. sys->drop_packet = true;
  2173. if (skb->len == pkt_status_sz &&
  2174. status.exception ==
  2175. IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
  2176. WARN_ON(sys->prev_skb != NULL);
  2177. IPADBG_LOW("Ins header in next buffer\n");
  2178. sys->prev_skb = skb_copy(skb, GFP_KERNEL);
  2179. sys->len_partial = skb->len;
  2180. goto out;
  2181. }
  2182. pad_len_byte = ((status.pkt_len + 3) & ~3) -
  2183. status.pkt_len;
  2184. len = status.pkt_len + pad_len_byte;
  2185. IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
  2186. status.pkt_len, len);
  2187. if (status.exception ==
  2188. IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
  2189. IPADBG_LOW(
  2190. "Dropping packet on DeAggr Exception\n");
  2191. sys->drop_packet = true;
  2192. }
  2193. skb2 = ipa3_skb_copy_for_client(skb,
  2194. min(status.pkt_len + pkt_status_sz, skb->len));
  2195. if (likely(skb2)) {
  2196. if (skb->len < len + pkt_status_sz) {
  2197. IPADBG_LOW("SPL skb len %d len %d\n",
  2198. skb->len, len);
  2199. sys->prev_skb = skb2;
  2200. sys->len_rem = len - skb->len +
  2201. pkt_status_sz;
  2202. sys->len_pad = pad_len_byte;
  2203. skb_pull(skb, skb->len);
  2204. } else {
  2205. skb_trim(skb2, status.pkt_len +
  2206. pkt_status_sz);
  2207. IPADBG_LOW("rx avail for %d\n",
  2208. status.endp_dest_idx);
  2209. if (sys->drop_packet) {
  2210. dev_kfree_skb_any(skb2);
  2211. } else if (status.pkt_len >
  2212. IPA_GENERIC_AGGR_BYTE_LIMIT *
  2213. 1024) {
  2214. IPAERR("packet size invalid\n");
  2215. IPAERR("STATUS opcode=%d\n",
  2216. status.status_opcode);
  2217. IPAERR("src=%d dst=%d len=%d\n",
  2218. status.endp_src_idx,
  2219. status.endp_dest_idx,
  2220. status.pkt_len);
  2221. /* Unexpected HW status */
  2222. ipa_assert();
  2223. } else {
  2224. skb2->truesize = skb2->len +
  2225. sizeof(struct sk_buff) +
  2226. (ALIGN(len +
  2227. pkt_status_sz, 32) *
  2228. unused / used_align);
  2229. sys->ep->client_notify(
  2230. sys->ep->priv,
  2231. IPA_RECEIVE,
  2232. (unsigned long)(skb2));
  2233. }
  2234. skb_pull(skb, len + pkt_status_sz);
  2235. }
  2236. } else {
  2237. IPAERR("fail to alloc skb\n");
  2238. if (skb->len < len) {
  2239. sys->prev_skb = NULL;
  2240. sys->len_rem = len - skb->len +
  2241. pkt_status_sz;
  2242. sys->len_pad = pad_len_byte;
  2243. skb_pull(skb, skb->len);
  2244. } else {
  2245. skb_pull(skb, len + pkt_status_sz);
  2246. }
  2247. }
  2248. /* TX comp */
  2249. ipa3_wq_write_done_status(src_pipe, tx_pkt);
  2250. IPADBG_LOW("tx comp imp for %d\n", src_pipe);
  2251. } else {
  2252. /* TX comp */
  2253. ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt);
  2254. IPADBG_LOW("tx comp exp for %d\n",
  2255. status.endp_src_idx);
  2256. skb_pull(skb, pkt_status_sz);
  2257. IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
  2258. IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
  2259. [IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
  2260. }
  2261. tx_pkt = NULL;
  2262. }
  2263. out:
  2264. ipa3_skb_recycle(skb);
  2265. return rc;
  2266. }
  2267. static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
  2268. struct sk_buff *skb, unsigned int len)
  2269. {
  2270. struct sk_buff *skb2;
  2271. skb2 = skb_copy_expand(prev_skb, 0,
  2272. len, GFP_KERNEL);
  2273. if (likely(skb2)) {
  2274. memcpy(skb_put(skb2, len),
  2275. skb->data, len);
  2276. } else {
  2277. IPAERR("copy expand failed\n");
  2278. skb2 = NULL;
  2279. }
  2280. dev_kfree_skb_any(prev_skb);
  2281. return skb2;
  2282. }
  2283. static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
  2284. struct ipa3_sys_context *sys)
  2285. {
  2286. struct sk_buff *skb2;
  2287. IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
  2288. if (sys->len_rem <= skb->len) {
  2289. if (sys->prev_skb) {
  2290. skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
  2291. sys->len_rem);
  2292. if (likely(skb2)) {
  2293. IPADBG_LOW(
  2294. "removing Status element from skb and sending to WAN client");
  2295. skb_pull(skb2, ipahal_pkt_status_get_size());
  2296. skb2->truesize = skb2->len +
  2297. sizeof(struct sk_buff);
  2298. sys->ep->client_notify(sys->ep->priv,
  2299. IPA_RECEIVE,
  2300. (unsigned long)(skb2));
  2301. }
  2302. }
  2303. skb_pull(skb, sys->len_rem);
  2304. sys->prev_skb = NULL;
  2305. sys->len_rem = 0;
  2306. } else {
  2307. if (sys->prev_skb) {
  2308. skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
  2309. skb->len);
  2310. sys->prev_skb = skb2;
  2311. }
  2312. sys->len_rem -= skb->len;
  2313. skb_pull(skb, skb->len);
  2314. }
  2315. }
  2316. static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
  2317. struct ipa3_sys_context *sys)
  2318. {
  2319. struct ipahal_pkt_status status;
  2320. unsigned char *skb_data;
  2321. u32 pkt_status_sz;
  2322. struct sk_buff *skb2;
  2323. u16 pkt_len_with_pad;
  2324. u32 qmap_hdr;
  2325. int checksum_trailer_exists;
  2326. int frame_len;
  2327. int ep_idx;
  2328. unsigned int used = *(unsigned int *)skb->cb;
  2329. unsigned int used_align = ALIGN(used, 32);
  2330. unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
  2331. IPA_DUMP_BUFF(skb->data, 0, skb->len);
  2332. if (skb->len == 0) {
  2333. IPAERR("ZLT\n");
  2334. goto bail;
  2335. }
  2336. if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) {
  2337. sys->ep->client_notify(sys->ep->priv,
  2338. IPA_RECEIVE, (unsigned long)(skb));
  2339. return 0;
  2340. }
  2341. if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
  2342. IPAERR("Recycle should enable only with GRO Aggr\n");
  2343. ipa_assert();
  2344. }
  2345. /*
  2346. * payload splits across 2 buff or more,
  2347. * take the start of the payload from prev_skb
  2348. */
  2349. if (sys->len_rem)
  2350. ipa3_wan_rx_handle_splt_pyld(skb, sys);
  2351. pkt_status_sz = ipahal_pkt_status_get_size();
  2352. while (skb->len) {
  2353. IPADBG_LOW("LEN_REM %d\n", skb->len);
  2354. if (skb->len < pkt_status_sz) {
  2355. IPAERR("status straddles buffer\n");
  2356. WARN_ON(1);
  2357. goto bail;
  2358. }
  2359. ipahal_pkt_status_parse(skb->data, &status);
  2360. skb_data = skb->data;
  2361. IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
  2362. status.status_opcode, status.endp_src_idx,
  2363. status.endp_dest_idx, status.pkt_len);
  2364. if (sys->status_stat) {
  2365. sys->status_stat->status[sys->status_stat->curr] =
  2366. status;
  2367. sys->status_stat->curr++;
  2368. if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
  2369. sys->status_stat->curr = 0;
  2370. }
  2371. if ((status.status_opcode !=
  2372. IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
  2373. (status.status_opcode !=
  2374. IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
  2375. (status.status_opcode !=
  2376. IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
  2377. IPAERR("unsupported opcode(%d)\n",
  2378. status.status_opcode);
  2379. skb_pull(skb, pkt_status_sz);
  2380. continue;
  2381. }
  2382. IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
  2383. if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
  2384. status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
  2385. IPAERR("status fields invalid\n");
  2386. WARN_ON(1);
  2387. goto bail;
  2388. }
  2389. if (status.pkt_len == 0) {
  2390. IPADBG_LOW("Skip aggr close status\n");
  2391. skb_pull(skb, pkt_status_sz);
  2392. IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
  2393. IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
  2394. continue;
  2395. }
  2396. ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
  2397. if (status.endp_dest_idx != ep_idx) {
  2398. IPAERR("expected endp_dest_idx %d received %d\n",
  2399. ep_idx, status.endp_dest_idx);
  2400. WARN_ON(1);
  2401. goto bail;
  2402. }
  2403. /* RX data */
  2404. if (skb->len == pkt_status_sz) {
  2405. IPAERR("Ins header in next buffer\n");
  2406. WARN_ON(1);
  2407. goto bail;
  2408. }
  2409. qmap_hdr = *(u32 *)(skb_data + pkt_status_sz);
  2410. /*
  2411. * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
  2412. * header
  2413. */
  2414. /*QMAP is BE: convert the pkt_len field from BE to LE*/
  2415. pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
  2416. IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
  2417. /*get the CHECKSUM_PROCESS bit*/
  2418. checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
  2419. IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status);
  2420. IPADBG_LOW("checksum_trailer_exists %d\n",
  2421. checksum_trailer_exists);
  2422. frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH +
  2423. pkt_len_with_pad;
  2424. if (checksum_trailer_exists)
  2425. frame_len += IPA_DL_CHECKSUM_LENGTH;
  2426. IPADBG_LOW("frame_len %d\n", frame_len);
  2427. skb2 = skb_clone(skb, GFP_KERNEL);
  2428. if (likely(skb2)) {
  2429. /*
  2430. * the len of actual data is smaller than expected
  2431. * payload split across 2 buff
  2432. */
  2433. if (skb->len < frame_len) {
  2434. IPADBG_LOW("SPL skb len %d len %d\n",
  2435. skb->len, frame_len);
  2436. sys->prev_skb = skb2;
  2437. sys->len_rem = frame_len - skb->len;
  2438. skb_pull(skb, skb->len);
  2439. } else {
  2440. skb_trim(skb2, frame_len);
  2441. IPADBG_LOW("rx avail for %d\n",
  2442. status.endp_dest_idx);
  2443. IPADBG_LOW(
  2444. "removing Status element from skb and sending to WAN client");
  2445. skb_pull(skb2, pkt_status_sz);
  2446. skb2->truesize = skb2->len +
  2447. sizeof(struct sk_buff) +
  2448. (ALIGN(frame_len, 32) *
  2449. unused / used_align);
  2450. sys->ep->client_notify(sys->ep->priv,
  2451. IPA_RECEIVE, (unsigned long)(skb2));
  2452. skb_pull(skb, frame_len);
  2453. }
  2454. } else {
  2455. IPAERR("fail to clone\n");
  2456. if (skb->len < frame_len) {
  2457. sys->prev_skb = NULL;
  2458. sys->len_rem = frame_len - skb->len;
  2459. skb_pull(skb, skb->len);
  2460. } else {
  2461. skb_pull(skb, frame_len);
  2462. }
  2463. }
  2464. }
  2465. bail:
  2466. sys->free_skb(skb);
  2467. return 0;
  2468. }
  2469. static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
  2470. {
  2471. return __dev_alloc_skb(len, flags);
  2472. }
  2473. static void ipa3_free_skb_rx(struct sk_buff *skb)
  2474. {
  2475. dev_kfree_skb_any(skb);
  2476. }
  2477. void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
  2478. {
  2479. struct sk_buff *rx_skb = (struct sk_buff *)data;
  2480. struct ipahal_pkt_status status;
  2481. struct ipa3_ep_context *ep;
  2482. unsigned int src_pipe;
  2483. u32 metadata;
  2484. u8 ucp;
  2485. ipahal_pkt_status_parse(rx_skb->data, &status);
  2486. src_pipe = status.endp_src_idx;
  2487. metadata = status.metadata;
  2488. ucp = status.ucp;
  2489. ep = &ipa3_ctx->ep[src_pipe];
  2490. if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
  2491. !ep->valid ||
  2492. !ep->client_notify)) {
  2493. IPAERR_RL("drop pipe=%d ep_valid=%d client_notify=%pK\n",
  2494. src_pipe, ep->valid, ep->client_notify);
  2495. dev_kfree_skb_any(rx_skb);
  2496. return;
  2497. }
  2498. if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
  2499. skb_pull(rx_skb, ipahal_pkt_status_get_size() +
  2500. IPA_LAN_RX_HEADER_LENGTH);
  2501. else
  2502. skb_pull(rx_skb, ipahal_pkt_status_get_size());
  2503. /* Metadata Info
  2504. * ------------------------------------------
  2505. * | 3 | 2 | 1 | 0 |
  2506. * | fw_desc | vdev_id | qmap mux id | Resv |
  2507. * ------------------------------------------
  2508. */
  2509. *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
  2510. *(u8 *)(rx_skb->cb + 4) = ucp;
  2511. IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
  2512. metadata, *(u32 *)rx_skb->cb);
  2513. IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
  2514. ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
  2515. }
  2516. static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
  2517. {
  2518. rx_pkt->data.dma_addr = 0;
  2519. /* skb recycle was moved to pyld_hdlr */
  2520. INIT_LIST_HEAD(&rx_pkt->link);
  2521. spin_lock_bh(&rx_pkt->sys->spinlock);
  2522. list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
  2523. spin_unlock_bh(&rx_pkt->sys->spinlock);
  2524. }
  2525. /**
  2526. * handle_skb_completion()- Handle event completion EOB or EOT and prep the skb
  2527. *
  2528. * if eob: Set skb values, put rx_pkt at the end of the list and return NULL
  2529. *
  2530. * if eot: Set skb values, put skb at the end of the list. Then update the
  2531. * length and chain the skbs together while also freeing and unmapping the
  2532. * corresponding rx pkt. Once finished return the head_skb to be sent up the
  2533. * network stack.
  2534. */
  2535. static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
  2536. *notify, bool update_truesize)
  2537. {
  2538. struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
  2539. struct sk_buff *rx_skb, *next_skb = NULL;
  2540. struct list_head *head;
  2541. struct ipa3_sys_context *sys;
  2542. sys = (struct ipa3_sys_context *) notify->chan_user_data;
  2543. rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
  2544. spin_lock_bh(&rx_pkt->sys->spinlock);
  2545. rx_pkt->sys->len--;
  2546. spin_unlock_bh(&rx_pkt->sys->spinlock);
  2547. if (notify->bytes_xfered)
  2548. rx_pkt->len = notify->bytes_xfered;
  2549. rx_skb = rx_pkt->data.skb;
  2550. skb_set_tail_pointer(rx_skb, rx_pkt->len);
  2551. rx_skb->len = rx_pkt->len;
  2552. if (update_truesize) {
  2553. *(unsigned int *)rx_skb->cb = rx_skb->len;
  2554. rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
  2555. }
  2556. if (notify->veid >= GSI_VEID_MAX) {
  2557. WARN_ON(1);
  2558. return NULL;
  2559. }
  2560. /*Assesrt when WAN consumer channel receive EOB event*/
  2561. if (notify->evt_id == GSI_CHAN_EVT_EOB &&
  2562. sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
  2563. IPAERR("EOB event received on WAN consumer channel\n");
  2564. ipa_assert();
  2565. }
  2566. head = &rx_pkt->sys->pending_pkts[notify->veid];
  2567. INIT_LIST_HEAD(&rx_pkt->link);
  2568. list_add_tail(&rx_pkt->link, head);
  2569. /* Check added for handling LAN consumer packet without EOT flag */
  2570. if (notify->evt_id == GSI_CHAN_EVT_EOT ||
  2571. sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
  2572. /* go over the list backward to save computations on updating length */
  2573. list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
  2574. rx_skb = rx_pkt->data.skb;
  2575. list_del(&rx_pkt->link);
  2576. dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
  2577. sys->rx_buff_sz, DMA_FROM_DEVICE);
  2578. sys->free_rx_wrapper(rx_pkt);
  2579. if (next_skb) {
  2580. skb_shinfo(rx_skb)->frag_list = next_skb;
  2581. rx_skb->len += next_skb->len;
  2582. rx_skb->data_len += next_skb->len;
  2583. }
  2584. next_skb = rx_skb;
  2585. }
  2586. } else {
  2587. return NULL;
  2588. }
  2589. return rx_skb;
  2590. }
  2591. static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
  2592. struct gsi_chan_xfer_notify *notify)
  2593. {
  2594. struct sk_buff *rx_skb;
  2595. struct ipa3_sys_context *coal_sys;
  2596. int ipa_ep_idx;
  2597. if (!notify) {
  2598. IPAERR_RL("gsi_chan_xfer_notify is null\n");
  2599. return;
  2600. }
  2601. rx_skb = handle_skb_completion(notify, true);
  2602. if (rx_skb) {
  2603. sys->pyld_hdlr(rx_skb, sys);
  2604. /* For coalescing, we have 2 transfer rings to replenish */
  2605. if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  2606. ipa_ep_idx = ipa3_get_ep_mapping(
  2607. IPA_CLIENT_APPS_WAN_CONS);
  2608. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  2609. IPAERR("Invalid client.\n");
  2610. return;
  2611. }
  2612. coal_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
  2613. coal_sys->repl_hdlr(coal_sys);
  2614. }
  2615. sys->repl_hdlr(sys);
  2616. }
  2617. }
  2618. static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys,
  2619. struct gsi_chan_xfer_notify *notify, uint32_t num)
  2620. {
  2621. struct ipa3_sys_context *wan_def_sys;
  2622. int i, ipa_ep_idx;
  2623. struct sk_buff *rx_skb, *first_skb = NULL, *prev_skb = NULL;
  2624. /* non-coalescing case (SKB chaining enabled) */
  2625. if (sys->ep->client != IPA_CLIENT_APPS_WAN_COAL_CONS) {
  2626. for (i = 0; i < num; i++) {
  2627. rx_skb = handle_skb_completion(&notify[i], false);
  2628. /* this is always true for EOTs */
  2629. if (rx_skb) {
  2630. if (!first_skb)
  2631. first_skb = rx_skb;
  2632. if (prev_skb)
  2633. skb_shinfo(prev_skb)->frag_list =
  2634. rx_skb;
  2635. prev_skb = rx_skb;
  2636. }
  2637. }
  2638. if (prev_skb) {
  2639. skb_shinfo(prev_skb)->frag_list = NULL;
  2640. sys->pyld_hdlr(first_skb, sys);
  2641. }
  2642. /* TODO: add chaining for coal case */
  2643. } else {
  2644. for (i = 0; i < num; i++) {
  2645. rx_skb = handle_skb_completion(&notify[i], false);
  2646. if (rx_skb) {
  2647. sys->pyld_hdlr(rx_skb, sys);
  2648. /*
  2649. * For coalescing, we have 2 transfer rings to replenish
  2650. */
  2651. ipa_ep_idx = ipa3_get_ep_mapping(
  2652. IPA_CLIENT_APPS_WAN_CONS);
  2653. if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
  2654. IPAERR("Invalid client.\n");
  2655. return;
  2656. }
  2657. wan_def_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
  2658. wan_def_sys->repl_hdlr(wan_def_sys);
  2659. }
  2660. }
  2661. }
  2662. }
  2663. static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
  2664. struct gsi_chan_xfer_notify *notify)
  2665. {
  2666. struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
  2667. struct sk_buff *rx_skb;
  2668. rx_pkt_expected = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
  2669. sys->len--;
  2670. if (notify->bytes_xfered)
  2671. rx_pkt_expected->len = notify->bytes_xfered;
  2672. rx_skb = rx_pkt_expected->data.skb;
  2673. skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
  2674. rx_skb->len = rx_pkt_expected->len;
  2675. rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
  2676. sys->ep->wstats.tx_pkts_rcvd++;
  2677. if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
  2678. ipa3_free_skb(&rx_pkt_expected->data);
  2679. sys->ep->wstats.tx_pkts_dropped++;
  2680. } else {
  2681. sys->ep->wstats.tx_pkts_sent++;
  2682. sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
  2683. (unsigned long)(&rx_pkt_expected->data));
  2684. }
  2685. ipa3_replenish_wlan_rx_cache(sys);
  2686. }
  2687. static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys)
  2688. {
  2689. IPADBG_LOW("ENTER.\n");
  2690. if (unlikely(list_empty(&sys->head_desc_list))) {
  2691. IPAERR("descriptor list is empty!\n");
  2692. WARN_ON(1);
  2693. return;
  2694. }
  2695. sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, 0);
  2696. IPADBG_LOW("EXIT\n");
  2697. }
  2698. static void ipa3_wq_rx_avail(struct work_struct *work)
  2699. {
  2700. struct ipa3_rx_pkt_wrapper *rx_pkt;
  2701. struct ipa3_sys_context *sys;
  2702. rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work);
  2703. WARN(unlikely(rx_pkt == NULL), "rx pkt is null");
  2704. sys = rx_pkt->sys;
  2705. ipa3_wq_rx_common(sys, 0);
  2706. }
  2707. static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
  2708. struct ipa3_sys_context *sys)
  2709. {
  2710. if (sys->ep->client_notify) {
  2711. sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
  2712. (unsigned long)(rx_skb));
  2713. } else {
  2714. dev_kfree_skb_any(rx_skb);
  2715. WARN(1, "client notify is null");
  2716. }
  2717. return 0;
  2718. }
  2719. static int ipa3_odl_dpl_rx_pyld_hdlr(struct sk_buff *rx_skb,
  2720. struct ipa3_sys_context *sys)
  2721. {
  2722. if (WARN(!sys->ep->client_notify, "sys->ep->client_notify is NULL\n"))
  2723. dev_kfree_skb_any(rx_skb);
  2724. else
  2725. sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
  2726. (unsigned long)(rx_skb));
  2727. return 0;
  2728. }
  2729. static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
  2730. {
  2731. kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
  2732. }
  2733. static void ipa3_set_aggr_limit(struct ipa_sys_connect_params *in,
  2734. struct ipa3_sys_context *sys)
  2735. {
  2736. u32 *aggr_byte_limit = &in->ipa_ep_cfg.aggr.aggr_byte_limit;
  2737. u32 adjusted_sz = ipa_adjust_ra_buff_base_sz(*aggr_byte_limit);
  2738. IPADBG("get close-by %u\n", adjusted_sz);
  2739. IPADBG("set rx_buff_sz %lu\n", (unsigned long)
  2740. IPA_GENERIC_RX_BUFF_SZ(adjusted_sz));
  2741. /* disable ipa_status */
  2742. sys->ep->status.status_en = false;
  2743. sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(adjusted_sz);
  2744. if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  2745. in->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
  2746. *aggr_byte_limit = sys->rx_buff_sz < *aggr_byte_limit ?
  2747. IPA_ADJUST_AGGR_BYTE_LIMIT(sys->rx_buff_sz) :
  2748. IPA_ADJUST_AGGR_BYTE_LIMIT(*aggr_byte_limit);
  2749. IPADBG("set aggr_limit %lu\n", (unsigned long) *aggr_byte_limit);
  2750. }
  2751. static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
  2752. struct ipa3_sys_context *sys)
  2753. {
  2754. bool apps_wan_cons_agg_gro_flag;
  2755. unsigned long aggr_byte_limit;
  2756. if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
  2757. sys->policy = IPA_POLICY_INTR_MODE;
  2758. sys->use_comm_evt_ring = false;
  2759. return 0;
  2760. }
  2761. if (in->client == IPA_CLIENT_APPS_WAN_PROD) {
  2762. sys->policy = IPA_POLICY_INTR_MODE;
  2763. sys->use_comm_evt_ring = true;
  2764. INIT_WORK(&sys->work, ipa3_send_nop_desc);
  2765. /*
  2766. * enable source notification status for exception packets
  2767. * (i.e. QMAP commands) to be routed to modem.
  2768. */
  2769. sys->ep->status.status_en = true;
  2770. sys->ep->status.status_ep =
  2771. ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS);
  2772. return 0;
  2773. }
  2774. if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
  2775. sys->policy = IPA_POLICY_NOINTR_MODE;
  2776. return 0;
  2777. }
  2778. apps_wan_cons_agg_gro_flag =
  2779. ipa3_ctx->ipa_client_apps_wan_cons_agg_gro;
  2780. aggr_byte_limit = in->ipa_ep_cfg.aggr.aggr_byte_limit;
  2781. if (IPA_CLIENT_IS_PROD(in->client)) {
  2782. if (sys->ep->skip_ep_cfg) {
  2783. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  2784. sys->use_comm_evt_ring = true;
  2785. atomic_set(&sys->curr_polling_state, 0);
  2786. } else {
  2787. sys->policy = IPA_POLICY_INTR_MODE;
  2788. sys->use_comm_evt_ring = true;
  2789. INIT_WORK(&sys->work, ipa3_send_nop_desc);
  2790. }
  2791. } else {
  2792. if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
  2793. in->client == IPA_CLIENT_APPS_WAN_CONS ||
  2794. in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  2795. sys->ep->status.status_en = true;
  2796. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  2797. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  2798. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  2799. ipa3_switch_to_intr_rx_work_func);
  2800. INIT_DELAYED_WORK(&sys->replenish_rx_work,
  2801. ipa3_replenish_rx_work_func);
  2802. INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
  2803. atomic_set(&sys->curr_polling_state, 0);
  2804. sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
  2805. IPA_GENERIC_RX_BUFF_BASE_SZ);
  2806. sys->get_skb = ipa3_get_skb_ipa_rx;
  2807. sys->free_skb = ipa3_free_skb_rx;
  2808. in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
  2809. if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  2810. in->ipa_ep_cfg.aggr.aggr = IPA_COALESCE;
  2811. else
  2812. in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
  2813. in->ipa_ep_cfg.aggr.aggr_time_limit =
  2814. IPA_GENERIC_AGGR_TIME_LIMIT;
  2815. if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
  2816. sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
  2817. sys->repl_hdlr =
  2818. ipa3_replenish_rx_cache_recycle;
  2819. sys->free_rx_wrapper =
  2820. ipa3_recycle_rx_wrapper;
  2821. sys->rx_pool_sz =
  2822. ipa3_ctx->lan_rx_ring_size;
  2823. in->ipa_ep_cfg.aggr.aggr_byte_limit =
  2824. IPA_GENERIC_AGGR_BYTE_LIMIT;
  2825. in->ipa_ep_cfg.aggr.aggr_pkt_limit =
  2826. IPA_GENERIC_AGGR_PKT_LIMIT;
  2827. } else if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
  2828. in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
  2829. sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
  2830. sys->free_rx_wrapper = ipa3_free_rx_wrapper;
  2831. sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
  2832. if (nr_cpu_ids > 1) {
  2833. sys->repl_hdlr =
  2834. ipa3_fast_replenish_rx_cache;
  2835. } else {
  2836. sys->repl_hdlr =
  2837. ipa3_replenish_rx_cache;
  2838. }
  2839. if (in->napi_obj && in->recycle_enabled)
  2840. sys->repl_hdlr =
  2841. ipa3_replenish_rx_cache_recycle;
  2842. in->ipa_ep_cfg.aggr.aggr_sw_eof_active
  2843. = true;
  2844. if (apps_wan_cons_agg_gro_flag) {
  2845. ipa3_set_aggr_limit(in, sys);
  2846. } else {
  2847. in->ipa_ep_cfg.aggr.aggr_byte_limit =
  2848. IPA_GENERIC_AGGR_BYTE_LIMIT;
  2849. in->ipa_ep_cfg.aggr.aggr_pkt_limit =
  2850. IPA_GENERIC_AGGR_PKT_LIMIT;
  2851. }
  2852. }
  2853. } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
  2854. IPADBG("assigning policy to client:%d",
  2855. in->client);
  2856. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  2857. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  2858. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  2859. ipa3_switch_to_intr_rx_work_func);
  2860. INIT_DELAYED_WORK(&sys->replenish_rx_work,
  2861. ipa3_replenish_rx_work_func);
  2862. atomic_set(&sys->curr_polling_state, 0);
  2863. sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
  2864. sys->rx_pool_sz = in->desc_fifo_sz /
  2865. IPA_FIFO_ELEMENT_SIZE - 1;
  2866. if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
  2867. sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
  2868. sys->pyld_hdlr = NULL;
  2869. sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
  2870. sys->get_skb = ipa3_get_skb_ipa_rx;
  2871. sys->free_skb = ipa3_free_skb_rx;
  2872. sys->free_rx_wrapper = ipa3_free_rx_wrapper;
  2873. in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
  2874. } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
  2875. IPADBG("assigning policy to client:%d",
  2876. in->client);
  2877. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  2878. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  2879. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  2880. ipa3_switch_to_intr_rx_work_func);
  2881. INIT_DELAYED_WORK(&sys->replenish_rx_work,
  2882. ipa3_replenish_rx_work_func);
  2883. atomic_set(&sys->curr_polling_state, 0);
  2884. sys->rx_pool_sz = in->desc_fifo_sz /
  2885. IPA_FIFO_ELEMENT_SIZE - 1;
  2886. if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
  2887. sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
  2888. sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
  2889. sys->get_skb = ipa3_get_skb_ipa_rx;
  2890. sys->free_skb = ipa3_free_skb_rx;
  2891. /* recycle skb for GSB use case */
  2892. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
  2893. sys->free_rx_wrapper =
  2894. ipa3_free_rx_wrapper;
  2895. sys->repl_hdlr =
  2896. ipa3_replenish_rx_cache;
  2897. /* Overwrite buffer size & aggr limit for GSB */
  2898. sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
  2899. IPA_GSB_RX_BUFF_BASE_SZ);
  2900. in->ipa_ep_cfg.aggr.aggr_byte_limit =
  2901. IPA_GSB_AGGR_BYTE_LIMIT;
  2902. } else {
  2903. sys->free_rx_wrapper =
  2904. ipa3_free_rx_wrapper;
  2905. sys->repl_hdlr = ipa3_replenish_rx_cache;
  2906. sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
  2907. }
  2908. } else if (in->client ==
  2909. IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
  2910. IPADBG("assigning policy to client:%d",
  2911. in->client);
  2912. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  2913. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  2914. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  2915. ipa3_switch_to_intr_rx_work_func);
  2916. } else if (in->client ==
  2917. IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
  2918. IPADBG("assigning policy to client:%d",
  2919. in->client);
  2920. sys->policy = IPA_POLICY_NOINTR_MODE;
  2921. } else if (in->client == IPA_CLIENT_ODL_DPL_CONS) {
  2922. IPADBG("assigning policy to ODL client:%d\n",
  2923. in->client);
  2924. sys->ep->status.status_en = true;
  2925. sys->policy = IPA_POLICY_INTR_POLL_MODE;
  2926. INIT_WORK(&sys->work, ipa3_wq_handle_rx);
  2927. INIT_DELAYED_WORK(&sys->switch_to_intr_work,
  2928. ipa3_switch_to_intr_rx_work_func);
  2929. INIT_DELAYED_WORK(&sys->replenish_rx_work,
  2930. ipa3_replenish_rx_work_func);
  2931. atomic_set(&sys->curr_polling_state, 0);
  2932. sys->rx_buff_sz =
  2933. IPA_GENERIC_RX_BUFF_SZ(IPA_ODL_RX_BUFF_SZ);
  2934. sys->pyld_hdlr = ipa3_odl_dpl_rx_pyld_hdlr;
  2935. sys->get_skb = ipa3_get_skb_ipa_rx;
  2936. sys->free_skb = ipa3_free_skb_rx;
  2937. sys->free_rx_wrapper = ipa3_recycle_rx_wrapper;
  2938. sys->repl_hdlr = ipa3_replenish_rx_cache_recycle;
  2939. sys->rx_pool_sz = in->desc_fifo_sz /
  2940. IPA_FIFO_ELEMENT_SIZE - 1;
  2941. } else {
  2942. WARN(1, "Need to install a RX pipe hdlr\n");
  2943. return -EINVAL;
  2944. }
  2945. }
  2946. return 0;
  2947. }
  2948. /**
  2949. * ipa3_tx_client_rx_notify_release() - Callback function
  2950. * which will call the user supplied callback function to
  2951. * release the skb, or release it on its own if no callback
  2952. * function was supplied
  2953. *
  2954. * @user1: [in] - Data Descriptor
  2955. * @user2: [in] - endpoint idx
  2956. *
  2957. * This notified callback is for the destination client
  2958. * This function is supplied in ipa3_tx_dp_mul
  2959. */
  2960. static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
  2961. {
  2962. struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
  2963. int ep_idx = user2;
  2964. IPADBG_LOW("Received data desc anchor:%pK\n", dd);
  2965. atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
  2966. ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
  2967. /* wlan host driver waits till tx complete before unload */
  2968. IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
  2969. ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
  2970. IPADBG_LOW("calling client notify callback with priv:%pK\n",
  2971. ipa3_ctx->ep[ep_idx].priv);
  2972. if (ipa3_ctx->ep[ep_idx].client_notify) {
  2973. ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
  2974. IPA_WRITE_DONE, (unsigned long)user1);
  2975. ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++;
  2976. }
  2977. }
  2978. /**
  2979. * ipa3_tx_client_rx_pkt_status() - Callback function
  2980. * which will call the user supplied callback function to
  2981. * increase the available fifo descriptor
  2982. *
  2983. * @user1: [in] - Data Descriptor
  2984. * @user2: [in] - endpoint idx
  2985. *
  2986. * This notified callback is for the destination client
  2987. * This function is supplied in ipa3_tx_dp_mul
  2988. */
  2989. static void ipa3_tx_client_rx_pkt_status(void *user1, int user2)
  2990. {
  2991. int ep_idx = user2;
  2992. atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
  2993. ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
  2994. }
  2995. /**
  2996. * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets
  2997. * @src: [in] - Client that is sending data
  2998. * @ipa_tx_data_desc: [in] data descriptors from wlan
  2999. *
  3000. * this is used for to transfer data descriptors that received
  3001. * from WLAN1_PROD pipe to IPA HW
  3002. *
  3003. * The function will send data descriptors from WLAN1_PROD (one
  3004. * at a time). Will set EOT flag for last descriptor Once this send was done
  3005. * from transport point-of-view the IPA driver will get notified by the
  3006. * supplied callback - ipa_gsi_irq_tx_notify_cb()
  3007. *
  3008. * ipa_gsi_irq_tx_notify_cb will call to the user supplied callback
  3009. *
  3010. * Returns: 0 on success, negative on failure
  3011. */
  3012. int ipa3_tx_dp_mul(enum ipa_client_type src,
  3013. struct ipa_tx_data_desc *data_desc)
  3014. {
  3015. /* The second byte in wlan header holds qmap id */
  3016. #define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
  3017. struct ipa_tx_data_desc *entry;
  3018. struct ipa3_sys_context *sys;
  3019. struct ipa3_desc desc[2];
  3020. u32 num_desc, cnt;
  3021. int ep_idx;
  3022. IPADBG_LOW("Received data desc anchor:%pK\n", data_desc);
  3023. spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
  3024. ep_idx = ipa3_get_ep_mapping(src);
  3025. if (unlikely(ep_idx == -1)) {
  3026. IPAERR("dest EP does not exist.\n");
  3027. goto fail_send;
  3028. }
  3029. IPADBG_LOW("ep idx:%d\n", ep_idx);
  3030. sys = ipa3_ctx->ep[ep_idx].sys;
  3031. if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
  3032. IPAERR("dest EP not valid.\n");
  3033. goto fail_send;
  3034. }
  3035. sys->ep->wstats.rx_hd_rcvd++;
  3036. /* Calculate the number of descriptors */
  3037. num_desc = 0;
  3038. list_for_each_entry(entry, &data_desc->link, link) {
  3039. num_desc++;
  3040. }
  3041. IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
  3042. if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
  3043. IPAERR("Insufficient data descriptors available\n");
  3044. goto fail_send;
  3045. }
  3046. /* Assign callback only for last data descriptor */
  3047. cnt = 0;
  3048. list_for_each_entry(entry, &data_desc->link, link) {
  3049. memset(desc, 0, 2 * sizeof(struct ipa3_desc));
  3050. IPADBG_LOW("Parsing data desc :%d\n", cnt);
  3051. cnt++;
  3052. ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
  3053. (u8)sys->ep->cfg.meta.qmap_id;
  3054. /* the tag field will be populated in ipa3_send() function */
  3055. desc[0].is_tag_status = true;
  3056. desc[1].pyld = entry->pyld_buffer;
  3057. desc[1].len = entry->pyld_len;
  3058. desc[1].type = IPA_DATA_DESC_SKB;
  3059. desc[1].user1 = data_desc;
  3060. desc[1].user2 = ep_idx;
  3061. IPADBG_LOW("priv:%pK pyld_buf:0x%pK pyld_len:%d\n",
  3062. entry->priv, desc[1].pyld, desc[1].len);
  3063. /* In case of last descriptor populate callback */
  3064. if (cnt == num_desc) {
  3065. IPADBG_LOW("data desc:%pK\n", data_desc);
  3066. desc[1].callback = ipa3_tx_client_rx_notify_release;
  3067. } else {
  3068. desc[1].callback = ipa3_tx_client_rx_pkt_status;
  3069. }
  3070. IPADBG_LOW("calling ipa3_send()\n");
  3071. if (ipa3_send(sys, 2, desc, true)) {
  3072. IPAERR("fail to send skb\n");
  3073. sys->ep->wstats.rx_pkt_leak += (cnt-1);
  3074. sys->ep->wstats.rx_dp_fail++;
  3075. goto fail_send;
  3076. }
  3077. if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
  3078. atomic_dec(&sys->ep->avail_fifo_desc);
  3079. sys->ep->wstats.rx_pkts_rcvd++;
  3080. IPADBG_LOW("ep=%d fifo desc=%d\n",
  3081. ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
  3082. }
  3083. sys->ep->wstats.rx_hd_processed++;
  3084. spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
  3085. return 0;
  3086. fail_send:
  3087. spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
  3088. return -EFAULT;
  3089. }
  3090. void ipa3_free_skb(struct ipa_rx_data *data)
  3091. {
  3092. struct ipa3_rx_pkt_wrapper *rx_pkt;
  3093. spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  3094. ipa3_ctx->wc_memb.total_tx_pkts_freed++;
  3095. rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data);
  3096. ipa3_skb_recycle(rx_pkt->data.skb);
  3097. (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
  3098. list_add_tail(&rx_pkt->link,
  3099. &ipa3_ctx->wc_memb.wlan_comm_desc_list);
  3100. ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
  3101. spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
  3102. }
  3103. /* Functions added to support kernel tests */
  3104. int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
  3105. unsigned long *ipa_transport_hdl,
  3106. u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
  3107. {
  3108. struct ipa3_ep_context *ep;
  3109. int ipa_ep_idx;
  3110. int result = -EINVAL;
  3111. if (sys_in == NULL || clnt_hdl == NULL) {
  3112. IPAERR("NULL args\n");
  3113. goto fail_gen;
  3114. }
  3115. if (ipa_transport_hdl == NULL || ipa_pipe_num == NULL) {
  3116. IPAERR("NULL args\n");
  3117. goto fail_gen;
  3118. }
  3119. if (sys_in->client >= IPA_CLIENT_MAX) {
  3120. IPAERR("bad parm client:%d\n", sys_in->client);
  3121. goto fail_gen;
  3122. }
  3123. ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
  3124. if (ipa_ep_idx == -1) {
  3125. IPAERR("Invalid client :%d\n", sys_in->client);
  3126. goto fail_gen;
  3127. }
  3128. ep = &ipa3_ctx->ep[ipa_ep_idx];
  3129. IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
  3130. if (ep->valid == 1) {
  3131. if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) {
  3132. IPAERR("EP %d already allocated\n", ipa_ep_idx);
  3133. goto fail_and_disable_clocks;
  3134. } else {
  3135. if (ipa3_cfg_ep_hdr(ipa_ep_idx,
  3136. &sys_in->ipa_ep_cfg.hdr)) {
  3137. IPAERR("fail to configure hdr prop of EP %d\n",
  3138. ipa_ep_idx);
  3139. result = -EFAULT;
  3140. goto fail_and_disable_clocks;
  3141. }
  3142. if (ipa3_cfg_ep_hdr_ext(ipa_ep_idx,
  3143. &sys_in->ipa_ep_cfg.hdr_ext)) {
  3144. IPAERR("fail config hdr_ext prop of EP %d\n",
  3145. ipa_ep_idx);
  3146. result = -EFAULT;
  3147. goto fail_and_disable_clocks;
  3148. }
  3149. if (ipa3_cfg_ep_cfg(ipa_ep_idx,
  3150. &sys_in->ipa_ep_cfg.cfg)) {
  3151. IPAERR("fail to configure cfg prop of EP %d\n",
  3152. ipa_ep_idx);
  3153. result = -EFAULT;
  3154. goto fail_and_disable_clocks;
  3155. }
  3156. IPAERR("client %d (ep: %d) overlay ok sys=%pK\n",
  3157. sys_in->client, ipa_ep_idx, ep->sys);
  3158. ep->client_notify = sys_in->notify;
  3159. ep->priv = sys_in->priv;
  3160. *clnt_hdl = ipa_ep_idx;
  3161. if (!ep->keep_ipa_awake)
  3162. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  3163. return 0;
  3164. }
  3165. }
  3166. memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
  3167. ep->valid = 1;
  3168. ep->client = sys_in->client;
  3169. ep->client_notify = sys_in->notify;
  3170. ep->priv = sys_in->priv;
  3171. ep->keep_ipa_awake = true;
  3172. if (en_status) {
  3173. ep->status.status_en = true;
  3174. ep->status.status_ep = ipa_ep_idx;
  3175. }
  3176. result = ipa3_enable_data_path(ipa_ep_idx);
  3177. if (result) {
  3178. IPAERR("enable data path failed res=%d clnt=%d.\n",
  3179. result, ipa_ep_idx);
  3180. goto fail_gen2;
  3181. }
  3182. if (!ep->skip_ep_cfg) {
  3183. if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
  3184. IPAERR("fail to configure EP.\n");
  3185. goto fail_gen2;
  3186. }
  3187. if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
  3188. IPAERR("fail to configure status of EP.\n");
  3189. goto fail_gen2;
  3190. }
  3191. IPADBG("ep configuration successful\n");
  3192. } else {
  3193. IPADBG("skipping ep configuration\n");
  3194. }
  3195. *clnt_hdl = ipa_ep_idx;
  3196. *ipa_pipe_num = ipa_ep_idx;
  3197. *ipa_transport_hdl = ipa3_ctx->gsi_dev_hdl;
  3198. if (!ep->keep_ipa_awake)
  3199. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  3200. ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
  3201. IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client,
  3202. ipa_ep_idx, ep->sys);
  3203. return 0;
  3204. fail_gen2:
  3205. fail_and_disable_clocks:
  3206. IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
  3207. fail_gen:
  3208. return result;
  3209. }
  3210. int ipa3_sys_teardown(u32 clnt_hdl)
  3211. {
  3212. struct ipa3_ep_context *ep;
  3213. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  3214. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  3215. IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
  3216. return -EINVAL;
  3217. }
  3218. ep = &ipa3_ctx->ep[clnt_hdl];
  3219. if (!ep->keep_ipa_awake)
  3220. IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
  3221. ipa3_disable_data_path(clnt_hdl);
  3222. ep->valid = 0;
  3223. IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
  3224. IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
  3225. return 0;
  3226. }
  3227. int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
  3228. unsigned long gsi_ev_hdl)
  3229. {
  3230. struct ipa3_ep_context *ep;
  3231. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  3232. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  3233. IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
  3234. return -EINVAL;
  3235. }
  3236. ep = &ipa3_ctx->ep[clnt_hdl];
  3237. ep->gsi_chan_hdl = gsi_ch_hdl;
  3238. ep->gsi_evt_ring_hdl = gsi_ev_hdl;
  3239. return 0;
  3240. }
  3241. static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
  3242. {
  3243. switch (notify->evt_id) {
  3244. case GSI_EVT_OUT_OF_BUFFERS_ERR:
  3245. IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
  3246. break;
  3247. case GSI_EVT_OUT_OF_RESOURCES_ERR:
  3248. IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
  3249. break;
  3250. case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
  3251. IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
  3252. break;
  3253. case GSI_EVT_EVT_RING_EMPTY_ERR:
  3254. IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
  3255. break;
  3256. default:
  3257. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  3258. }
  3259. }
  3260. static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
  3261. {
  3262. switch (notify->evt_id) {
  3263. case GSI_CHAN_INVALID_TRE_ERR:
  3264. IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
  3265. break;
  3266. case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
  3267. IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
  3268. break;
  3269. case GSI_CHAN_OUT_OF_BUFFERS_ERR:
  3270. IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
  3271. break;
  3272. case GSI_CHAN_OUT_OF_RESOURCES_ERR:
  3273. IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
  3274. break;
  3275. case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
  3276. IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
  3277. break;
  3278. case GSI_CHAN_HWO_1_ERR:
  3279. IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
  3280. break;
  3281. default:
  3282. IPAERR("Unexpected err evt: %d\n", notify->evt_id);
  3283. }
  3284. }
  3285. static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
  3286. {
  3287. struct ipa3_tx_pkt_wrapper *tx_pkt;
  3288. IPADBG_LOW("event %d notified\n", notify->evt_id);
  3289. switch (notify->evt_id) {
  3290. case GSI_CHAN_EVT_EOT:
  3291. atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
  3292. tx_pkt = notify->xfer_user_data;
  3293. tx_pkt->xmit_done = true;
  3294. atomic_inc(&tx_pkt->sys->xmit_eot_cnt);
  3295. tasklet_schedule(&tx_pkt->sys->tasklet);
  3296. break;
  3297. default:
  3298. IPAERR("received unexpected event id %d\n", notify->evt_id);
  3299. }
  3300. }
  3301. void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
  3302. {
  3303. bool clk_off;
  3304. atomic_set(&sys->curr_polling_state, 1);
  3305. ipa3_inc_acquire_wakelock();
  3306. /*
  3307. * pm deactivate is done in wq context
  3308. * or after NAPI poll
  3309. */
  3310. clk_off = ipa_pm_activate(sys->pm_hdl);
  3311. if (!clk_off && sys->napi_obj) {
  3312. napi_schedule(sys->napi_obj);
  3313. return;
  3314. }
  3315. queue_work(sys->wq, &sys->work);
  3316. return;
  3317. }
  3318. static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
  3319. {
  3320. struct ipa3_sys_context *sys;
  3321. if (!notify) {
  3322. IPAERR("gsi notify is NULL.\n");
  3323. return;
  3324. }
  3325. IPADBG_LOW("event %d notified\n", notify->evt_id);
  3326. sys = (struct ipa3_sys_context *)notify->chan_user_data;
  3327. sys->ep->xfer_notify_valid = true;
  3328. sys->ep->xfer_notify = *notify;
  3329. switch (notify->evt_id) {
  3330. case GSI_CHAN_EVT_EOT:
  3331. case GSI_CHAN_EVT_EOB:
  3332. atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
  3333. if (!atomic_read(&sys->curr_polling_state)) {
  3334. /* put the gsi channel into polling mode */
  3335. gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
  3336. GSI_CHAN_MODE_POLL);
  3337. __ipa_gsi_irq_rx_scedule_poll(sys);
  3338. }
  3339. break;
  3340. default:
  3341. IPAERR("received unexpected event id %d\n", notify->evt_id);
  3342. }
  3343. }
  3344. static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
  3345. {
  3346. struct ipa3_sys_context *sys;
  3347. if (!notify) {
  3348. IPAERR("gsi notify is NULL.\n");
  3349. return;
  3350. }
  3351. IPADBG_LOW("event %d notified\n", notify->evt_id);
  3352. sys = (struct ipa3_sys_context *)notify->chan_user_data;
  3353. if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
  3354. IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
  3355. return;
  3356. }
  3357. sys->ep->xfer_notify_valid = true;
  3358. sys->ep->xfer_notify = *notify;
  3359. switch (notify->evt_id) {
  3360. case GSI_CHAN_EVT_EOT:
  3361. if (!atomic_read(&sys->curr_polling_state)) {
  3362. /* put the gsi channel into polling mode */
  3363. gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
  3364. GSI_CHAN_MODE_POLL);
  3365. ipa3_inc_acquire_wakelock();
  3366. atomic_set(&sys->curr_polling_state, 1);
  3367. queue_work(sys->wq, &sys->work);
  3368. }
  3369. break;
  3370. default:
  3371. IPAERR("received unexpected event id %d\n", notify->evt_id);
  3372. }
  3373. }
  3374. int ipa3_alloc_common_event_ring(void)
  3375. {
  3376. struct gsi_evt_ring_props gsi_evt_ring_props;
  3377. dma_addr_t evt_dma_addr;
  3378. int result;
  3379. memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
  3380. gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
  3381. gsi_evt_ring_props.intr = GSI_INTR_IRQ;
  3382. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  3383. gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE;
  3384. gsi_evt_ring_props.ring_base_vaddr =
  3385. dma_alloc_coherent(ipa3_ctx->pdev,
  3386. gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
  3387. if (!gsi_evt_ring_props.ring_base_vaddr) {
  3388. IPAERR("fail to dma alloc %u bytes\n",
  3389. gsi_evt_ring_props.ring_len);
  3390. return -ENOMEM;
  3391. }
  3392. gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
  3393. gsi_evt_ring_props.int_modt = 0;
  3394. gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
  3395. gsi_evt_ring_props.rp_update_addr = 0;
  3396. gsi_evt_ring_props.exclusive = false;
  3397. gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
  3398. gsi_evt_ring_props.user_data = NULL;
  3399. result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
  3400. ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
  3401. if (result) {
  3402. IPAERR("gsi_alloc_evt_ring failed %d\n", result);
  3403. return result;
  3404. }
  3405. ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
  3406. return 0;
  3407. }
  3408. static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
  3409. struct ipa3_ep_context *ep)
  3410. {
  3411. u32 ring_size;
  3412. int result;
  3413. gfp_t mem_flag = GFP_KERNEL;
  3414. u32 coale_ep_idx;
  3415. if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
  3416. in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
  3417. in->client == IPA_CLIENT_APPS_WAN_PROD)
  3418. mem_flag = GFP_ATOMIC;
  3419. if (!ep) {
  3420. IPAERR("EP context is empty\n");
  3421. return -EINVAL;
  3422. }
  3423. coale_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
  3424. /*
  3425. * GSI ring length is calculated based on the desc_fifo_sz
  3426. * which was meant to define the BAM desc fifo. GSI descriptors
  3427. * are 16B as opposed to 8B for BAM.
  3428. */
  3429. ring_size = 2 * in->desc_fifo_sz;
  3430. ep->gsi_evt_ring_hdl = ~0;
  3431. if (ep->sys->use_comm_evt_ring) {
  3432. if (ipa3_ctx->gsi_evt_comm_ring_rem < ring_size) {
  3433. IPAERR("not enough space in common event ring\n");
  3434. IPAERR("available: %d needed: %d\n",
  3435. ipa3_ctx->gsi_evt_comm_ring_rem,
  3436. ring_size);
  3437. WARN_ON(1);
  3438. return -EFAULT;
  3439. }
  3440. ipa3_ctx->gsi_evt_comm_ring_rem -= (ring_size);
  3441. ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
  3442. } else if (in->client == IPA_CLIENT_APPS_WAN_CONS &&
  3443. coale_ep_idx != IPA_EP_NOT_ALLOCATED &&
  3444. ipa3_ctx->ep[coale_ep_idx].valid == 1) {
  3445. IPADBG("Wan consumer pipe configured\n");
  3446. result = ipa_gsi_setup_coal_def_channel(in, ep,
  3447. &ipa3_ctx->ep[coale_ep_idx]);
  3448. if (result) {
  3449. IPAERR("Failed to setup default coal GSI channel\n");
  3450. goto fail_setup_event_ring;
  3451. }
  3452. return result;
  3453. } else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
  3454. IPA_CLIENT_IS_CONS(ep->client)) {
  3455. result = ipa_gsi_setup_event_ring(ep, ring_size, mem_flag);
  3456. if (result)
  3457. goto fail_setup_event_ring;
  3458. }
  3459. result = ipa_gsi_setup_transfer_ring(ep, ring_size,
  3460. ep->sys, mem_flag);
  3461. if (result)
  3462. goto fail_setup_transfer_ring;
  3463. if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
  3464. gsi_config_channel_mode(ep->gsi_chan_hdl,
  3465. GSI_CHAN_MODE_POLL);
  3466. return 0;
  3467. fail_setup_transfer_ring:
  3468. if (ep->gsi_mem_info.evt_ring_base_vaddr)
  3469. dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.evt_ring_len,
  3470. ep->gsi_mem_info.evt_ring_base_vaddr,
  3471. ep->gsi_mem_info.evt_ring_base_addr);
  3472. fail_setup_event_ring:
  3473. IPAERR("Return with err: %d\n", result);
  3474. return result;
  3475. }
  3476. static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
  3477. u32 ring_size, gfp_t mem_flag)
  3478. {
  3479. struct gsi_evt_ring_props gsi_evt_ring_props;
  3480. dma_addr_t evt_dma_addr;
  3481. int result;
  3482. evt_dma_addr = 0;
  3483. memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
  3484. gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
  3485. gsi_evt_ring_props.intr = GSI_INTR_IRQ;
  3486. gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
  3487. gsi_evt_ring_props.ring_len = ring_size;
  3488. gsi_evt_ring_props.ring_base_vaddr =
  3489. dma_alloc_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
  3490. &evt_dma_addr, mem_flag);
  3491. if (!gsi_evt_ring_props.ring_base_vaddr) {
  3492. IPAERR("fail to dma alloc %u bytes\n",
  3493. gsi_evt_ring_props.ring_len);
  3494. return -ENOMEM;
  3495. }
  3496. gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
  3497. /* copy mem info */
  3498. ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
  3499. ep->gsi_mem_info.evt_ring_base_addr =
  3500. gsi_evt_ring_props.ring_base_addr;
  3501. ep->gsi_mem_info.evt_ring_base_vaddr =
  3502. gsi_evt_ring_props.ring_base_vaddr;
  3503. if (ep->sys->napi_obj) {
  3504. gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
  3505. gsi_evt_ring_props.int_modc = IPA_GSI_EVT_RING_INT_MODC;
  3506. } else {
  3507. gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
  3508. gsi_evt_ring_props.int_modc = 1;
  3509. }
  3510. IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
  3511. ep->client,
  3512. gsi_evt_ring_props.int_modt,
  3513. gsi_evt_ring_props.int_modc);
  3514. gsi_evt_ring_props.rp_update_addr = 0;
  3515. gsi_evt_ring_props.exclusive = true;
  3516. gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
  3517. gsi_evt_ring_props.user_data = NULL;
  3518. result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
  3519. ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl);
  3520. if (result != GSI_STATUS_SUCCESS)
  3521. goto fail_alloc_evt_ring;
  3522. return 0;
  3523. fail_alloc_evt_ring:
  3524. if (ep->gsi_mem_info.evt_ring_base_vaddr)
  3525. dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.evt_ring_len,
  3526. ep->gsi_mem_info.evt_ring_base_vaddr,
  3527. ep->gsi_mem_info.evt_ring_base_addr);
  3528. IPAERR("Return with err: %d\n", result);
  3529. return result;
  3530. }
  3531. static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
  3532. u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag)
  3533. {
  3534. dma_addr_t dma_addr;
  3535. union __packed gsi_channel_scratch ch_scratch;
  3536. struct gsi_chan_props gsi_channel_props;
  3537. const struct ipa_gsi_ep_config *gsi_ep_info;
  3538. int result;
  3539. memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
  3540. if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
  3541. gsi_channel_props.prot = GSI_CHAN_PROT_GCI;
  3542. else
  3543. gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
  3544. if (IPA_CLIENT_IS_PROD(ep->client)) {
  3545. gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  3546. } else {
  3547. gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
  3548. gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
  3549. }
  3550. gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
  3551. if (!gsi_ep_info) {
  3552. IPAERR("Failed getting GSI EP info for client=%d\n",
  3553. ep->client);
  3554. result = -EINVAL;
  3555. goto fail_get_gsi_ep_info;
  3556. } else {
  3557. gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
  3558. }
  3559. gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
  3560. gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  3561. gsi_channel_props.ring_len = ring_size;
  3562. gsi_channel_props.ring_base_vaddr =
  3563. dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
  3564. &dma_addr, mem_flag);
  3565. if (!gsi_channel_props.ring_base_vaddr) {
  3566. IPAERR("fail to dma alloc %u bytes\n",
  3567. gsi_channel_props.ring_len);
  3568. result = -ENOMEM;
  3569. goto fail_alloc_channel_ring;
  3570. }
  3571. gsi_channel_props.ring_base_addr = dma_addr;
  3572. /* copy mem info */
  3573. ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
  3574. ep->gsi_mem_info.chan_ring_base_addr =
  3575. gsi_channel_props.ring_base_addr;
  3576. ep->gsi_mem_info.chan_ring_base_vaddr =
  3577. gsi_channel_props.ring_base_vaddr;
  3578. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
  3579. gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
  3580. else
  3581. gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  3582. gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  3583. if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
  3584. gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
  3585. else
  3586. gsi_channel_props.low_weight = 1;
  3587. gsi_channel_props.prefetch_mode = gsi_ep_info->prefetch_mode;
  3588. gsi_channel_props.empty_lvl_threshold = gsi_ep_info->prefetch_threshold;
  3589. gsi_channel_props.chan_user_data = user_data;
  3590. gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
  3591. if (IPA_CLIENT_IS_PROD(ep->client))
  3592. gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
  3593. else
  3594. gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb;
  3595. if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
  3596. gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
  3597. if (IPA_CLIENT_IS_CONS(ep->client))
  3598. gsi_channel_props.cleanup_cb = free_rx_pkt;
  3599. result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
  3600. &ep->gsi_chan_hdl);
  3601. if (result != GSI_STATUS_SUCCESS) {
  3602. IPAERR("Failed to alloc GSI chan.\n");
  3603. goto fail_alloc_channel;
  3604. }
  3605. memset(&ch_scratch, 0, sizeof(ch_scratch));
  3606. /*
  3607. * Update scratch for MCS smart prefetch:
  3608. * Starting IPA4.5, smart prefetch implemented by H/W.
  3609. * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
  3610. * so keep the fields zero.
  3611. */
  3612. if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
  3613. ch_scratch.gpi.max_outstanding_tre =
  3614. gsi_ep_info->ipa_if_tlv * GSI_CHAN_RE_SIZE_16B;
  3615. ch_scratch.gpi.outstanding_threshold =
  3616. 2 * GSI_CHAN_RE_SIZE_16B;
  3617. }
  3618. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
  3619. ch_scratch.gpi.dl_nlo_channel = 0;
  3620. result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
  3621. if (result != GSI_STATUS_SUCCESS) {
  3622. IPAERR("failed to write scratch %d\n", result);
  3623. goto fail_write_channel_scratch;
  3624. }
  3625. return 0;
  3626. fail_write_channel_scratch:
  3627. if (gsi_dealloc_channel(ep->gsi_chan_hdl)
  3628. != GSI_STATUS_SUCCESS) {
  3629. IPAERR("Failed to dealloc GSI chan.\n");
  3630. WARN_ON(1);
  3631. }
  3632. fail_alloc_channel:
  3633. dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.chan_ring_len,
  3634. ep->gsi_mem_info.chan_ring_base_vaddr,
  3635. ep->gsi_mem_info.chan_ring_base_addr);
  3636. fail_alloc_channel_ring:
  3637. fail_get_gsi_ep_info:
  3638. if (ep->gsi_evt_ring_hdl != ~0) {
  3639. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  3640. ep->gsi_evt_ring_hdl = ~0;
  3641. }
  3642. return result;
  3643. }
  3644. static int ipa_gsi_setup_coal_def_channel(struct ipa_sys_connect_params *in,
  3645. struct ipa3_ep_context *ep, struct ipa3_ep_context *coal_ep)
  3646. {
  3647. u32 ring_size;
  3648. int result;
  3649. ring_size = 2 * in->desc_fifo_sz;
  3650. /* copy event ring handle */
  3651. ep->gsi_evt_ring_hdl = coal_ep->gsi_evt_ring_hdl;
  3652. result = ipa_gsi_setup_transfer_ring(ep, ring_size,
  3653. coal_ep->sys, GFP_ATOMIC);
  3654. if (result) {
  3655. if (ep->gsi_mem_info.evt_ring_base_vaddr)
  3656. dma_free_coherent(ipa3_ctx->pdev,
  3657. ep->gsi_mem_info.chan_ring_len,
  3658. ep->gsi_mem_info.chan_ring_base_vaddr,
  3659. ep->gsi_mem_info.chan_ring_base_addr);
  3660. IPAERR("Destroying WAN_COAL_CONS evt_ring");
  3661. if (ep->gsi_evt_ring_hdl != ~0) {
  3662. gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
  3663. ep->gsi_evt_ring_hdl = ~0;
  3664. }
  3665. IPAERR("Return with err: %d\n", result);
  3666. return result;
  3667. }
  3668. return 0;
  3669. }
  3670. static int ipa_populate_tag_field(struct ipa3_desc *desc,
  3671. struct ipa3_tx_pkt_wrapper *tx_pkt,
  3672. struct ipahal_imm_cmd_pyld **tag_pyld_ret)
  3673. {
  3674. struct ipahal_imm_cmd_pyld *tag_pyld;
  3675. struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
  3676. /* populate tag field only if it is NULL */
  3677. if (desc->pyld == NULL) {
  3678. tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
  3679. tag_pyld = ipahal_construct_imm_cmd(
  3680. IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
  3681. if (unlikely(!tag_pyld)) {
  3682. IPAERR("Failed to construct ip_packet_tag_status\n");
  3683. return -EFAULT;
  3684. }
  3685. /*
  3686. * This is for 32-bit pointer, will need special
  3687. * handling if 64-bit pointer is used
  3688. */
  3689. IPADBG_LOW("tx_pkt sent in tag: 0x%pK\n", tx_pkt);
  3690. desc->pyld = tag_pyld->data;
  3691. desc->opcode = tag_pyld->opcode;
  3692. desc->len = tag_pyld->len;
  3693. desc->user1 = tag_pyld;
  3694. desc->type = IPA_IMM_CMD_DESC;
  3695. desc->callback = ipa3_tag_destroy_imm;
  3696. *tag_pyld_ret = tag_pyld;
  3697. }
  3698. return 0;
  3699. }
  3700. static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
  3701. struct gsi_chan_xfer_notify *notify)
  3702. {
  3703. int unused_var;
  3704. return ipa_poll_gsi_n_pkt(sys, notify, 1, &unused_var);
  3705. }
  3706. static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
  3707. struct gsi_chan_xfer_notify *notify,
  3708. int expected_num, int *actual_num)
  3709. {
  3710. int ret;
  3711. int idx = 0;
  3712. int poll_num = 0;
  3713. if (!actual_num || expected_num <= 0 ||
  3714. expected_num > IPA_WAN_NAPI_MAX_FRAMES) {
  3715. IPAERR("bad params actual_num=%pK expected_num=%d\n",
  3716. actual_num, expected_num);
  3717. return GSI_STATUS_INVALID_PARAMS;
  3718. }
  3719. if (sys->ep->xfer_notify_valid) {
  3720. *notify = sys->ep->xfer_notify;
  3721. sys->ep->xfer_notify_valid = false;
  3722. idx++;
  3723. }
  3724. if (expected_num == idx) {
  3725. *actual_num = idx;
  3726. return GSI_STATUS_SUCCESS;
  3727. }
  3728. ret = gsi_poll_n_channel(sys->ep->gsi_chan_hdl,
  3729. &notify[idx], expected_num - idx, &poll_num);
  3730. if (ret == GSI_STATUS_POLL_EMPTY) {
  3731. if (idx) {
  3732. *actual_num = idx;
  3733. return GSI_STATUS_SUCCESS;
  3734. }
  3735. *actual_num = 0;
  3736. return ret;
  3737. } else if (ret != GSI_STATUS_SUCCESS) {
  3738. if (idx) {
  3739. *actual_num = idx;
  3740. return GSI_STATUS_SUCCESS;
  3741. }
  3742. *actual_num = 0;
  3743. IPAERR("Poll channel err: %d\n", ret);
  3744. return ret;
  3745. }
  3746. *actual_num = idx + poll_num;
  3747. return ret;
  3748. }
  3749. /**
  3750. * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
  3751. * function is exectued in the softirq context
  3752. *
  3753. * if input budget is zero, the driver switches back to
  3754. * interrupt mode.
  3755. *
  3756. * return number of polled packets, on error 0(zero)
  3757. */
  3758. int ipa3_rx_poll(u32 clnt_hdl, int weight)
  3759. {
  3760. struct ipa3_ep_context *ep;
  3761. int ret;
  3762. int cnt = 0;
  3763. int num = 0;
  3764. int remain_aggr_weight;
  3765. struct ipa_active_client_logging_info log;
  3766. struct gsi_chan_xfer_notify notify[IPA_WAN_NAPI_MAX_FRAMES];
  3767. IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
  3768. if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
  3769. ipa3_ctx->ep[clnt_hdl].valid == 0) {
  3770. IPAERR("bad parm 0x%x\n", clnt_hdl);
  3771. return cnt;
  3772. }
  3773. remain_aggr_weight = weight / IPA_WAN_AGGR_PKT_CNT;
  3774. if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) {
  3775. IPAERR("NAPI weight is higher than expected\n");
  3776. IPAERR("expected %d got %d\n",
  3777. IPA_WAN_NAPI_MAX_FRAMES, remain_aggr_weight);
  3778. return -EINVAL;
  3779. }
  3780. ep = &ipa3_ctx->ep[clnt_hdl];
  3781. start_poll:
  3782. while (remain_aggr_weight > 0 &&
  3783. atomic_read(&ep->sys->curr_polling_state)) {
  3784. atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
  3785. if (ipa3_ctx->enable_napi_chain) {
  3786. ret = ipa_poll_gsi_n_pkt(ep->sys, notify,
  3787. remain_aggr_weight, &num);
  3788. } else {
  3789. ret = ipa_poll_gsi_n_pkt(ep->sys, notify,
  3790. 1, &num);
  3791. }
  3792. if (ret)
  3793. break;
  3794. trace_ipa3_rx_poll_num(num);
  3795. ipa3_wq_rx_napi_chain(ep->sys, notify, num);
  3796. remain_aggr_weight -= num;
  3797. trace_ipa3_rx_poll_cnt(ep->sys->len);
  3798. if (ep->sys->len == 0) {
  3799. if (remain_aggr_weight == 0)
  3800. cnt--;
  3801. break;
  3802. }
  3803. }
  3804. cnt += weight - remain_aggr_weight * IPA_WAN_AGGR_PKT_CNT;
  3805. /* call repl_hdlr before napi_reschedule / napi_complete */
  3806. if (cnt)
  3807. ep->sys->repl_hdlr(ep->sys);
  3808. if (cnt < weight) {
  3809. napi_complete(ep->sys->napi_obj);
  3810. ret = ipa3_rx_switch_to_intr_mode(ep->sys);
  3811. if (ret == -GSI_STATUS_PENDING_IRQ &&
  3812. napi_reschedule(ep->sys->napi_obj))
  3813. goto start_poll;
  3814. ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
  3815. }
  3816. return cnt;
  3817. }
  3818. static unsigned long tag_to_pointer_wa(uint64_t tag)
  3819. {
  3820. return 0xFFFF000000000000 | (unsigned long) tag;
  3821. }
  3822. static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
  3823. {
  3824. u16 temp;
  3825. /* Add the check but it might have throughput issue */
  3826. if (BITS_PER_LONG == 64) {
  3827. temp = (u16) (~((unsigned long) tx_pkt &
  3828. 0xFFFF000000000000) >> 48);
  3829. if (temp) {
  3830. IPAERR("The 16 prefix is not all 1s (%pK)\n",
  3831. tx_pkt);
  3832. /*
  3833. * We need all addresses starting at 0xFFFF to
  3834. * pass it to HW.
  3835. */
  3836. ipa_assert();
  3837. }
  3838. }
  3839. return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
  3840. }
  3841. /**
  3842. * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
  3843. *
  3844. * A hardware limitation requires to avoid using GSI physical channel 20.
  3845. * This function allocates GSI physical channel 20 and holds it to prevent
  3846. * others to use it.
  3847. *
  3848. * Return codes: 0 on success, negative on failure
  3849. */
  3850. int ipa_gsi_ch20_wa(void)
  3851. {
  3852. struct gsi_chan_props gsi_channel_props;
  3853. dma_addr_t dma_addr;
  3854. int result;
  3855. int i;
  3856. unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
  3857. unsigned long chan_hdl_to_keep;
  3858. memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
  3859. gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
  3860. gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
  3861. gsi_channel_props.evt_ring_hdl = ~0;
  3862. gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
  3863. gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
  3864. gsi_channel_props.ring_base_vaddr =
  3865. dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
  3866. &dma_addr, 0);
  3867. gsi_channel_props.ring_base_addr = dma_addr;
  3868. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
  3869. gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
  3870. else
  3871. gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
  3872. gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
  3873. gsi_channel_props.low_weight = 1;
  3874. gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
  3875. gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
  3876. /* first allocate channels up to channel 20 */
  3877. for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
  3878. gsi_channel_props.ch_id = i;
  3879. result = gsi_alloc_channel(&gsi_channel_props,
  3880. ipa3_ctx->gsi_dev_hdl,
  3881. &chan_hdl[i]);
  3882. if (result != GSI_STATUS_SUCCESS) {
  3883. IPAERR("failed to alloc channel %d err %d\n",
  3884. i, result);
  3885. return result;
  3886. }
  3887. }
  3888. /* allocate channel 20 */
  3889. gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
  3890. result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
  3891. &chan_hdl_to_keep);
  3892. if (result != GSI_STATUS_SUCCESS) {
  3893. IPAERR("failed to alloc channel %d err %d\n",
  3894. i, result);
  3895. return result;
  3896. }
  3897. /* release all other channels */
  3898. for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
  3899. result = gsi_dealloc_channel(chan_hdl[i]);
  3900. if (result != GSI_STATUS_SUCCESS) {
  3901. IPAERR("failed to dealloc channel %d err %d\n",
  3902. i, result);
  3903. return result;
  3904. }
  3905. }
  3906. /* DMA memory shall not be freed as it is used by channel 20 */
  3907. return 0;
  3908. }
  3909. /**
  3910. * ipa_adjust_ra_buff_base_sz()
  3911. *
  3912. * Return value: the largest power of two which is smaller
  3913. * than the input value
  3914. */
  3915. static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
  3916. {
  3917. aggr_byte_limit += IPA_MTU;
  3918. aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
  3919. aggr_byte_limit--;
  3920. aggr_byte_limit |= aggr_byte_limit >> 1;
  3921. aggr_byte_limit |= aggr_byte_limit >> 2;
  3922. aggr_byte_limit |= aggr_byte_limit >> 4;
  3923. aggr_byte_limit |= aggr_byte_limit >> 8;
  3924. aggr_byte_limit |= aggr_byte_limit >> 16;
  3925. aggr_byte_limit++;
  3926. return aggr_byte_limit >> 1;
  3927. }