ctrl.c 171 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602
  1. // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
  2. /* Copyright (c) 2015 - 2021 Intel Corporation */
  3. #include <linux/etherdevice.h>
  4. #include "osdep.h"
  5. #include "hmc.h"
  6. #include "defs.h"
  7. #include "type.h"
  8. #include "ws.h"
  9. #include "protos.h"
  10. /**
  11. * irdma_get_qp_from_list - get next qp from a list
  12. * @head: Listhead of qp's
  13. * @qp: current qp
  14. */
  15. struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
  16. struct irdma_sc_qp *qp)
  17. {
  18. struct list_head *lastentry;
  19. struct list_head *entry = NULL;
  20. if (list_empty(head))
  21. return NULL;
  22. if (!qp) {
  23. entry = head->next;
  24. } else {
  25. lastentry = &qp->list;
  26. entry = lastentry->next;
  27. if (entry == head)
  28. return NULL;
  29. }
  30. return container_of(entry, struct irdma_sc_qp, list);
  31. }
  32. /**
  33. * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
  34. * @vsi: the VSI struct pointer
  35. * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
  36. */
  37. void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
  38. {
  39. struct irdma_sc_qp *qp = NULL;
  40. u8 i;
  41. for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
  42. mutex_lock(&vsi->qos[i].qos_mutex);
  43. qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
  44. while (qp) {
  45. if (op == IRDMA_OP_RESUME) {
  46. if (!qp->dev->ws_add(vsi, i)) {
  47. qp->qs_handle =
  48. vsi->qos[qp->user_pri].qs_handle;
  49. irdma_cqp_qp_suspend_resume(qp, op);
  50. } else {
  51. irdma_cqp_qp_suspend_resume(qp, op);
  52. irdma_modify_qp_to_err(qp);
  53. }
  54. } else if (op == IRDMA_OP_SUSPEND) {
  55. /* issue cqp suspend command */
  56. if (!irdma_cqp_qp_suspend_resume(qp, op))
  57. atomic_inc(&vsi->qp_suspend_reqs);
  58. }
  59. qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
  60. }
  61. mutex_unlock(&vsi->qos[i].qos_mutex);
  62. }
  63. }
  64. static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
  65. struct irdma_l2params *l2p)
  66. {
  67. u8 i;
  68. vsi->qos_rel_bw = l2p->vsi_rel_bw;
  69. vsi->qos_prio_type = l2p->vsi_prio_type;
  70. vsi->dscp_mode = l2p->dscp_mode;
  71. if (l2p->dscp_mode) {
  72. memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
  73. for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
  74. l2p->up2tc[i] = i;
  75. }
  76. for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
  77. if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  78. vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
  79. vsi->qos[i].traffic_class = l2p->up2tc[i];
  80. vsi->qos[i].rel_bw =
  81. l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
  82. vsi->qos[i].prio_type =
  83. l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
  84. vsi->qos[i].valid = false;
  85. }
  86. }
  87. /**
  88. * irdma_change_l2params - given the new l2 parameters, change all qp
  89. * @vsi: RDMA VSI pointer
  90. * @l2params: New parameters from l2
  91. */
  92. void irdma_change_l2params(struct irdma_sc_vsi *vsi,
  93. struct irdma_l2params *l2params)
  94. {
  95. if (l2params->mtu_changed) {
  96. vsi->mtu = l2params->mtu;
  97. if (vsi->ieq)
  98. irdma_reinitialize_ieq(vsi);
  99. }
  100. if (!l2params->tc_changed)
  101. return;
  102. vsi->tc_change_pending = false;
  103. irdma_set_qos_info(vsi, l2params);
  104. irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
  105. }
  106. /**
  107. * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
  108. * @qp: qp to be removed from qos
  109. */
  110. void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
  111. {
  112. struct irdma_sc_vsi *vsi = qp->vsi;
  113. ibdev_dbg(to_ibdev(qp->dev),
  114. "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
  115. qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
  116. qp->on_qoslist);
  117. mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
  118. if (qp->on_qoslist) {
  119. qp->on_qoslist = false;
  120. list_del(&qp->list);
  121. }
  122. mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
  123. }
  124. /**
  125. * irdma_qp_add_qos - called during setctx for qp to be added to qos
  126. * @qp: qp to be added to qos
  127. */
  128. void irdma_qp_add_qos(struct irdma_sc_qp *qp)
  129. {
  130. struct irdma_sc_vsi *vsi = qp->vsi;
  131. ibdev_dbg(to_ibdev(qp->dev),
  132. "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
  133. qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
  134. qp->on_qoslist);
  135. mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
  136. if (!qp->on_qoslist) {
  137. list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
  138. qp->on_qoslist = true;
  139. qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
  140. }
  141. mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
  142. }
  143. /**
  144. * irdma_sc_pd_init - initialize sc pd struct
  145. * @dev: sc device struct
  146. * @pd: sc pd ptr
  147. * @pd_id: pd_id for allocated pd
  148. * @abi_ver: User/Kernel ABI version
  149. */
  150. void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
  151. int abi_ver)
  152. {
  153. pd->pd_id = pd_id;
  154. pd->abi_ver = abi_ver;
  155. pd->dev = dev;
  156. }
  157. /**
  158. * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
  159. * @cqp: struct for cqp hw
  160. * @info: arp entry information
  161. * @scratch: u64 saved to be used during cqp completion
  162. * @post_sq: flag for cqp db to ring
  163. */
  164. static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
  165. struct irdma_add_arp_cache_entry_info *info,
  166. u64 scratch, bool post_sq)
  167. {
  168. __le64 *wqe;
  169. u64 hdr;
  170. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  171. if (!wqe)
  172. return -ENOMEM;
  173. set_64bit_val(wqe, 8, info->reach_max);
  174. set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
  175. hdr = info->arp_index |
  176. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
  177. FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
  178. FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
  179. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  180. dma_wmb(); /* make sure WQE is written before valid bit is set */
  181. set_64bit_val(wqe, 24, hdr);
  182. print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
  183. 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  184. if (post_sq)
  185. irdma_sc_cqp_post_sq(cqp);
  186. return 0;
  187. }
  188. /**
  189. * irdma_sc_del_arp_cache_entry - dele arp cache entry
  190. * @cqp: struct for cqp hw
  191. * @scratch: u64 saved to be used during cqp completion
  192. * @arp_index: arp index to delete arp entry
  193. * @post_sq: flag for cqp db to ring
  194. */
  195. static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
  196. u16 arp_index, bool post_sq)
  197. {
  198. __le64 *wqe;
  199. u64 hdr;
  200. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  201. if (!wqe)
  202. return -ENOMEM;
  203. hdr = arp_index |
  204. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
  205. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  206. dma_wmb(); /* make sure WQE is written before valid bit is set */
  207. set_64bit_val(wqe, 24, hdr);
  208. print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
  209. DUMP_PREFIX_OFFSET, 16, 8, wqe,
  210. IRDMA_CQP_WQE_SIZE * 8, false);
  211. if (post_sq)
  212. irdma_sc_cqp_post_sq(cqp);
  213. return 0;
  214. }
  215. /**
  216. * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
  217. * @cqp: struct for cqp hw
  218. * @info: info for apbvt entry to add or delete
  219. * @scratch: u64 saved to be used during cqp completion
  220. * @post_sq: flag for cqp db to ring
  221. */
  222. static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
  223. struct irdma_apbvt_info *info,
  224. u64 scratch, bool post_sq)
  225. {
  226. __le64 *wqe;
  227. u64 hdr;
  228. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  229. if (!wqe)
  230. return -ENOMEM;
  231. set_64bit_val(wqe, 16, info->port);
  232. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
  233. FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
  234. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  235. dma_wmb(); /* make sure WQE is written before valid bit is set */
  236. set_64bit_val(wqe, 24, hdr);
  237. print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
  238. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  239. if (post_sq)
  240. irdma_sc_cqp_post_sq(cqp);
  241. return 0;
  242. }
  243. /**
  244. * irdma_sc_manage_qhash_table_entry - manage quad hash entries
  245. * @cqp: struct for cqp hw
  246. * @info: info for quad hash to manage
  247. * @scratch: u64 saved to be used during cqp completion
  248. * @post_sq: flag for cqp db to ring
  249. *
  250. * This is called before connection establishment is started.
  251. * For passive connections, when listener is created, it will
  252. * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local
  253. * ip address and tcp port. When SYN is received (passive
  254. * connections) or sent (active connections), this routine is
  255. * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
  256. * and quad is passed in info.
  257. *
  258. * When iwarp connection is done and its state moves to RTS, the
  259. * quad hash entry in the hardware will point to iwarp's qp
  260. * number and requires no calls from the driver.
  261. */
  262. static int
  263. irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
  264. struct irdma_qhash_table_info *info,
  265. u64 scratch, bool post_sq)
  266. {
  267. __le64 *wqe;
  268. u64 qw1 = 0;
  269. u64 qw2 = 0;
  270. u64 temp;
  271. struct irdma_sc_vsi *vsi = info->vsi;
  272. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  273. if (!wqe)
  274. return -ENOMEM;
  275. set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
  276. qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
  277. FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
  278. if (info->ipv4_valid) {
  279. set_64bit_val(wqe, 48,
  280. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
  281. } else {
  282. set_64bit_val(wqe, 56,
  283. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
  284. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
  285. set_64bit_val(wqe, 48,
  286. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
  287. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
  288. }
  289. qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
  290. vsi->qos[info->user_pri].qs_handle);
  291. if (info->vlan_valid)
  292. qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
  293. set_64bit_val(wqe, 16, qw2);
  294. if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
  295. qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
  296. if (!info->ipv4_valid) {
  297. set_64bit_val(wqe, 40,
  298. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
  299. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
  300. set_64bit_val(wqe, 32,
  301. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
  302. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
  303. } else {
  304. set_64bit_val(wqe, 32,
  305. FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
  306. }
  307. }
  308. set_64bit_val(wqe, 8, qw1);
  309. temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
  310. FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
  311. IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
  312. FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
  313. FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
  314. FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
  315. FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
  316. dma_wmb(); /* make sure WQE is written before valid bit is set */
  317. set_64bit_val(wqe, 24, temp);
  318. print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
  319. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  320. if (post_sq)
  321. irdma_sc_cqp_post_sq(cqp);
  322. return 0;
  323. }
  324. /**
  325. * irdma_sc_qp_init - initialize qp
  326. * @qp: sc qp
  327. * @info: initialization qp info
  328. */
  329. int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
  330. {
  331. int ret_code;
  332. u32 pble_obj_cnt;
  333. u16 wqe_size;
  334. if (info->qp_uk_init_info.max_sq_frag_cnt >
  335. info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
  336. info->qp_uk_init_info.max_rq_frag_cnt >
  337. info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
  338. return -EINVAL;
  339. qp->dev = info->pd->dev;
  340. qp->vsi = info->vsi;
  341. qp->ieq_qp = info->vsi->exception_lan_q;
  342. qp->sq_pa = info->sq_pa;
  343. qp->rq_pa = info->rq_pa;
  344. qp->hw_host_ctx_pa = info->host_ctx_pa;
  345. qp->q2_pa = info->q2_pa;
  346. qp->shadow_area_pa = info->shadow_area_pa;
  347. qp->q2_buf = info->q2;
  348. qp->pd = info->pd;
  349. qp->hw_host_ctx = info->host_ctx;
  350. info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
  351. ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
  352. if (ret_code)
  353. return ret_code;
  354. qp->virtual_map = info->virtual_map;
  355. pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
  356. if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
  357. (info->virtual_map && info->rq_pa >= pble_obj_cnt))
  358. return -EINVAL;
  359. qp->llp_stream_handle = (void *)(-1);
  360. qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
  361. IRDMA_QUEUE_TYPE_SQ_RQ);
  362. ibdev_dbg(to_ibdev(qp->dev),
  363. "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
  364. qp->hw_sq_size, qp->qp_uk.sq_ring.size);
  365. if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
  366. wqe_size = IRDMA_WQE_SIZE_128;
  367. else
  368. ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
  369. &wqe_size);
  370. if (ret_code)
  371. return ret_code;
  372. qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
  373. (wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
  374. ibdev_dbg(to_ibdev(qp->dev),
  375. "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
  376. qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
  377. qp->sq_tph_val = info->sq_tph_val;
  378. qp->rq_tph_val = info->rq_tph_val;
  379. qp->sq_tph_en = info->sq_tph_en;
  380. qp->rq_tph_en = info->rq_tph_en;
  381. qp->rcv_tph_en = info->rcv_tph_en;
  382. qp->xmit_tph_en = info->xmit_tph_en;
  383. qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
  384. qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
  385. return 0;
  386. }
  387. /**
  388. * irdma_sc_qp_create - create qp
  389. * @qp: sc qp
  390. * @info: qp create info
  391. * @scratch: u64 saved to be used during cqp completion
  392. * @post_sq: flag for cqp db to ring
  393. */
  394. int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
  395. u64 scratch, bool post_sq)
  396. {
  397. struct irdma_sc_cqp *cqp;
  398. __le64 *wqe;
  399. u64 hdr;
  400. cqp = qp->dev->cqp;
  401. if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
  402. qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
  403. return -EINVAL;
  404. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  405. if (!wqe)
  406. return -ENOMEM;
  407. set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
  408. set_64bit_val(wqe, 40, qp->shadow_area_pa);
  409. hdr = qp->qp_uk.qp_id |
  410. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
  411. FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
  412. FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
  413. FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
  414. FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
  415. FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
  416. FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
  417. FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
  418. FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
  419. info->arp_cache_idx_valid) |
  420. FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
  421. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  422. dma_wmb(); /* make sure WQE is written before valid bit is set */
  423. set_64bit_val(wqe, 24, hdr);
  424. print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
  425. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  426. if (post_sq)
  427. irdma_sc_cqp_post_sq(cqp);
  428. return 0;
  429. }
  430. /**
  431. * irdma_sc_qp_modify - modify qp cqp wqe
  432. * @qp: sc qp
  433. * @info: modify qp info
  434. * @scratch: u64 saved to be used during cqp completion
  435. * @post_sq: flag for cqp db to ring
  436. */
  437. int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
  438. u64 scratch, bool post_sq)
  439. {
  440. __le64 *wqe;
  441. struct irdma_sc_cqp *cqp;
  442. u64 hdr;
  443. u8 term_actions = 0;
  444. u8 term_len = 0;
  445. cqp = qp->dev->cqp;
  446. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  447. if (!wqe)
  448. return -ENOMEM;
  449. if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
  450. if (info->dont_send_fin)
  451. term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
  452. if (info->dont_send_term)
  453. term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
  454. if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
  455. term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
  456. term_len = info->termlen;
  457. }
  458. set_64bit_val(wqe, 8,
  459. FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
  460. FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
  461. set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
  462. set_64bit_val(wqe, 40, qp->shadow_area_pa);
  463. hdr = qp->qp_uk.qp_id |
  464. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
  465. FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
  466. FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
  467. FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
  468. info->cached_var_valid) |
  469. FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
  470. FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
  471. FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
  472. FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
  473. FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
  474. FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
  475. FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
  476. info->remove_hash_idx) |
  477. FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
  478. FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
  479. FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
  480. info->arp_cache_idx_valid) |
  481. FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
  482. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  483. dma_wmb(); /* make sure WQE is written before valid bit is set */
  484. set_64bit_val(wqe, 24, hdr);
  485. print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
  486. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  487. if (post_sq)
  488. irdma_sc_cqp_post_sq(cqp);
  489. return 0;
  490. }
  491. /**
  492. * irdma_sc_qp_destroy - cqp destroy qp
  493. * @qp: sc qp
  494. * @scratch: u64 saved to be used during cqp completion
  495. * @remove_hash_idx: flag if to remove hash idx
  496. * @ignore_mw_bnd: memory window bind flag
  497. * @post_sq: flag for cqp db to ring
  498. */
  499. int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
  500. bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
  501. {
  502. __le64 *wqe;
  503. struct irdma_sc_cqp *cqp;
  504. u64 hdr;
  505. cqp = qp->dev->cqp;
  506. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  507. if (!wqe)
  508. return -ENOMEM;
  509. set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
  510. set_64bit_val(wqe, 40, qp->shadow_area_pa);
  511. hdr = qp->qp_uk.qp_id |
  512. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
  513. FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
  514. FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
  515. FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
  516. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  517. dma_wmb(); /* make sure WQE is written before valid bit is set */
  518. set_64bit_val(wqe, 24, hdr);
  519. print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
  520. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  521. if (post_sq)
  522. irdma_sc_cqp_post_sq(cqp);
  523. return 0;
  524. }
  525. /**
  526. * irdma_sc_get_encoded_ird_size -
  527. * @ird_size: IRD size
  528. * The ird from the connection is rounded to a supported HW setting and then encoded
  529. * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
  530. * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
  531. */
  532. static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
  533. {
  534. switch (ird_size ?
  535. roundup_pow_of_two(2 * ird_size) : 4) {
  536. case 256:
  537. return IRDMA_IRD_HW_SIZE_256;
  538. case 128:
  539. return IRDMA_IRD_HW_SIZE_128;
  540. case 64:
  541. case 32:
  542. return IRDMA_IRD_HW_SIZE_64;
  543. case 16:
  544. case 8:
  545. return IRDMA_IRD_HW_SIZE_16;
  546. case 4:
  547. default:
  548. break;
  549. }
  550. return IRDMA_IRD_HW_SIZE_4;
  551. }
  552. /**
  553. * irdma_sc_qp_setctx_roce - set qp's context
  554. * @qp: sc qp
  555. * @qp_ctx: context ptr
  556. * @info: ctx info
  557. */
  558. void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
  559. struct irdma_qp_host_ctx_info *info)
  560. {
  561. struct irdma_roce_offload_info *roce_info;
  562. struct irdma_udp_offload_info *udp;
  563. u8 push_mode_en;
  564. u32 push_idx;
  565. roce_info = info->roce_info;
  566. udp = info->udp_info;
  567. qp->user_pri = info->user_pri;
  568. if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
  569. push_mode_en = 0;
  570. push_idx = 0;
  571. } else {
  572. push_mode_en = 1;
  573. push_idx = qp->push_idx;
  574. }
  575. set_64bit_val(qp_ctx, 0,
  576. FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
  577. FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
  578. FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
  579. FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
  580. FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
  581. FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
  582. FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
  583. FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
  584. FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
  585. FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
  586. FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
  587. FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
  588. FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
  589. FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
  590. set_64bit_val(qp_ctx, 8, qp->sq_pa);
  591. set_64bit_val(qp_ctx, 16, qp->rq_pa);
  592. if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
  593. !(udp->tos & 0x03))
  594. udp->tos |= ECN_CODE_PT_VAL;
  595. set_64bit_val(qp_ctx, 24,
  596. FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
  597. FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
  598. FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
  599. FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
  600. FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
  601. set_64bit_val(qp_ctx, 32,
  602. FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
  603. FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
  604. set_64bit_val(qp_ctx, 40,
  605. FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
  606. FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
  607. set_64bit_val(qp_ctx, 48,
  608. FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
  609. FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
  610. FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
  611. set_64bit_val(qp_ctx, 56,
  612. FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
  613. FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
  614. FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
  615. FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
  616. set_64bit_val(qp_ctx, 64,
  617. FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
  618. FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
  619. set_64bit_val(qp_ctx, 80,
  620. FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
  621. FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
  622. set_64bit_val(qp_ctx, 88,
  623. FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
  624. set_64bit_val(qp_ctx, 96,
  625. FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
  626. FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
  627. set_64bit_val(qp_ctx, 112,
  628. FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
  629. set_64bit_val(qp_ctx, 128,
  630. FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
  631. FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
  632. FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
  633. FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
  634. set_64bit_val(qp_ctx, 136,
  635. FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
  636. FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
  637. set_64bit_val(qp_ctx, 144,
  638. FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
  639. set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
  640. set_64bit_val(qp_ctx, 160,
  641. FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
  642. FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
  643. FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
  644. FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
  645. FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
  646. FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
  647. FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
  648. FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
  649. FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
  650. FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
  651. FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
  652. FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
  653. FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
  654. set_64bit_val(qp_ctx, 168,
  655. FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
  656. set_64bit_val(qp_ctx, 176,
  657. FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
  658. FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
  659. FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
  660. set_64bit_val(qp_ctx, 184,
  661. FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
  662. FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
  663. set_64bit_val(qp_ctx, 192,
  664. FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
  665. FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
  666. set_64bit_val(qp_ctx, 200,
  667. FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
  668. FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
  669. set_64bit_val(qp_ctx, 208,
  670. FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
  671. print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
  672. 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
  673. }
  674. /* irdma_sc_alloc_local_mac_entry - allocate a mac entry
  675. * @cqp: struct for cqp hw
  676. * @scratch: u64 saved to be used during cqp completion
  677. * @post_sq: flag for cqp db to ring
  678. */
  679. static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
  680. bool post_sq)
  681. {
  682. __le64 *wqe;
  683. u64 hdr;
  684. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  685. if (!wqe)
  686. return -ENOMEM;
  687. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  688. IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
  689. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  690. dma_wmb(); /* make sure WQE is written before valid bit is set */
  691. set_64bit_val(wqe, 24, hdr);
  692. print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
  693. DUMP_PREFIX_OFFSET, 16, 8, wqe,
  694. IRDMA_CQP_WQE_SIZE * 8, false);
  695. if (post_sq)
  696. irdma_sc_cqp_post_sq(cqp);
  697. return 0;
  698. }
  699. /**
  700. * irdma_sc_add_local_mac_entry - add mac enry
  701. * @cqp: struct for cqp hw
  702. * @info:mac addr info
  703. * @scratch: u64 saved to be used during cqp completion
  704. * @post_sq: flag for cqp db to ring
  705. */
  706. static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
  707. struct irdma_local_mac_entry_info *info,
  708. u64 scratch, bool post_sq)
  709. {
  710. __le64 *wqe;
  711. u64 header;
  712. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  713. if (!wqe)
  714. return -ENOMEM;
  715. set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
  716. header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
  717. FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  718. IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
  719. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  720. dma_wmb(); /* make sure WQE is written before valid bit is set */
  721. set_64bit_val(wqe, 24, header);
  722. print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
  723. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  724. if (post_sq)
  725. irdma_sc_cqp_post_sq(cqp);
  726. return 0;
  727. }
  728. /**
  729. * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
  730. * @cqp: struct for cqp hw
  731. * @scratch: u64 saved to be used during cqp completion
  732. * @entry_idx: index of mac entry
  733. * @ignore_ref_count: to force mac adde delete
  734. * @post_sq: flag for cqp db to ring
  735. */
  736. static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
  737. u16 entry_idx, u8 ignore_ref_count,
  738. bool post_sq)
  739. {
  740. __le64 *wqe;
  741. u64 header;
  742. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  743. if (!wqe)
  744. return -ENOMEM;
  745. header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
  746. FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  747. IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
  748. FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
  749. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
  750. FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
  751. dma_wmb(); /* make sure WQE is written before valid bit is set */
  752. set_64bit_val(wqe, 24, header);
  753. print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
  754. DUMP_PREFIX_OFFSET, 16, 8, wqe,
  755. IRDMA_CQP_WQE_SIZE * 8, false);
  756. if (post_sq)
  757. irdma_sc_cqp_post_sq(cqp);
  758. return 0;
  759. }
  760. /**
  761. * irdma_sc_qp_setctx - set qp's context
  762. * @qp: sc qp
  763. * @qp_ctx: context ptr
  764. * @info: ctx info
  765. */
  766. void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
  767. struct irdma_qp_host_ctx_info *info)
  768. {
  769. struct irdma_iwarp_offload_info *iw;
  770. struct irdma_tcp_offload_info *tcp;
  771. struct irdma_sc_dev *dev;
  772. u8 push_mode_en;
  773. u32 push_idx;
  774. u64 qw0, qw3, qw7 = 0, qw16 = 0;
  775. u64 mac = 0;
  776. iw = info->iwarp_info;
  777. tcp = info->tcp_info;
  778. dev = qp->dev;
  779. if (iw->rcv_mark_en) {
  780. qp->pfpdu.marker_len = 4;
  781. qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
  782. }
  783. qp->user_pri = info->user_pri;
  784. if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
  785. push_mode_en = 0;
  786. push_idx = 0;
  787. } else {
  788. push_mode_en = 1;
  789. push_idx = qp->push_idx;
  790. }
  791. qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
  792. FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
  793. FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
  794. FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
  795. FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
  796. FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
  797. FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
  798. set_64bit_val(qp_ctx, 8, qp->sq_pa);
  799. set_64bit_val(qp_ctx, 16, qp->rq_pa);
  800. qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
  801. FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
  802. if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  803. qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
  804. qp->src_mac_addr_idx);
  805. set_64bit_val(qp_ctx, 136,
  806. FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
  807. FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
  808. set_64bit_val(qp_ctx, 168,
  809. FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
  810. set_64bit_val(qp_ctx, 176,
  811. FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
  812. FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
  813. FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
  814. FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
  815. if (info->iwarp_info_valid) {
  816. qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
  817. FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
  818. FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
  819. FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
  820. FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
  821. FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
  822. FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
  823. iw->err_rq_idx_valid);
  824. qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
  825. qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
  826. FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
  827. set_64bit_val(qp_ctx, 144,
  828. FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
  829. FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
  830. if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  831. mac = ether_addr_to_u64(iw->mac_addr);
  832. set_64bit_val(qp_ctx, 152,
  833. mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
  834. set_64bit_val(qp_ctx, 160,
  835. FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
  836. FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
  837. FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
  838. FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
  839. FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
  840. FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
  841. FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
  842. FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
  843. FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
  844. FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
  845. FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
  846. FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
  847. FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
  848. FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
  849. FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
  850. FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
  851. }
  852. if (info->tcp_info_valid) {
  853. qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
  854. FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
  855. FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
  856. tcp->insert_vlan_tag) |
  857. FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
  858. FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
  859. FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
  860. FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
  861. if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
  862. tcp->tos |= ECN_CODE_PT_VAL;
  863. qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
  864. FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
  865. FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
  866. FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
  867. FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
  868. if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
  869. qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
  870. qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
  871. }
  872. set_64bit_val(qp_ctx, 32,
  873. FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
  874. FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
  875. set_64bit_val(qp_ctx, 40,
  876. FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
  877. FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
  878. set_64bit_val(qp_ctx, 48,
  879. FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
  880. FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
  881. FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
  882. FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
  883. qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
  884. FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
  885. FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
  886. tcp->ignore_tcp_opt) |
  887. FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
  888. tcp->ignore_tcp_uns_opt) |
  889. FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
  890. FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
  891. FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
  892. set_64bit_val(qp_ctx, 72,
  893. FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
  894. FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
  895. set_64bit_val(qp_ctx, 80,
  896. FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
  897. FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
  898. set_64bit_val(qp_ctx, 88,
  899. FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
  900. FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
  901. set_64bit_val(qp_ctx, 96,
  902. FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
  903. FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
  904. set_64bit_val(qp_ctx, 104,
  905. FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
  906. FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
  907. set_64bit_val(qp_ctx, 112,
  908. FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
  909. FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
  910. set_64bit_val(qp_ctx, 120,
  911. FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
  912. FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
  913. qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
  914. FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
  915. set_64bit_val(qp_ctx, 184,
  916. FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
  917. FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
  918. set_64bit_val(qp_ctx, 192,
  919. FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
  920. FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
  921. set_64bit_val(qp_ctx, 200,
  922. FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
  923. FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
  924. set_64bit_val(qp_ctx, 208,
  925. FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
  926. }
  927. set_64bit_val(qp_ctx, 0, qw0);
  928. set_64bit_val(qp_ctx, 24, qw3);
  929. set_64bit_val(qp_ctx, 56, qw7);
  930. set_64bit_val(qp_ctx, 128, qw16);
  931. print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8,
  932. qp_ctx, IRDMA_QP_CTX_SIZE, false);
  933. }
  934. /**
  935. * irdma_sc_alloc_stag - mr stag alloc
  936. * @dev: sc device struct
  937. * @info: stag info
  938. * @scratch: u64 saved to be used during cqp completion
  939. * @post_sq: flag for cqp db to ring
  940. */
  941. static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
  942. struct irdma_allocate_stag_info *info,
  943. u64 scratch, bool post_sq)
  944. {
  945. __le64 *wqe;
  946. struct irdma_sc_cqp *cqp;
  947. u64 hdr;
  948. enum irdma_page_size page_size;
  949. if (!info->total_len && !info->all_memory)
  950. return -EINVAL;
  951. if (info->page_size == 0x40000000)
  952. page_size = IRDMA_PAGE_SIZE_1G;
  953. else if (info->page_size == 0x200000)
  954. page_size = IRDMA_PAGE_SIZE_2M;
  955. else
  956. page_size = IRDMA_PAGE_SIZE_4K;
  957. cqp = dev->cqp;
  958. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  959. if (!wqe)
  960. return -ENOMEM;
  961. set_64bit_val(wqe, 8,
  962. FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
  963. FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
  964. set_64bit_val(wqe, 16,
  965. FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
  966. set_64bit_val(wqe, 40,
  967. FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
  968. if (info->chunk_size)
  969. set_64bit_val(wqe, 48,
  970. FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
  971. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
  972. FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
  973. FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
  974. FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
  975. FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
  976. FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
  977. FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
  978. FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
  979. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  980. dma_wmb(); /* make sure WQE is written before valid bit is set */
  981. set_64bit_val(wqe, 24, hdr);
  982. print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8,
  983. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  984. if (post_sq)
  985. irdma_sc_cqp_post_sq(cqp);
  986. return 0;
  987. }
  988. /**
  989. * irdma_sc_mr_reg_non_shared - non-shared mr registration
  990. * @dev: sc device struct
  991. * @info: mr info
  992. * @scratch: u64 saved to be used during cqp completion
  993. * @post_sq: flag for cqp db to ring
  994. */
  995. static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
  996. struct irdma_reg_ns_stag_info *info,
  997. u64 scratch, bool post_sq)
  998. {
  999. __le64 *wqe;
  1000. u64 fbo;
  1001. struct irdma_sc_cqp *cqp;
  1002. u64 hdr;
  1003. u32 pble_obj_cnt;
  1004. bool remote_access;
  1005. u8 addr_type;
  1006. enum irdma_page_size page_size;
  1007. if (!info->total_len && !info->all_memory)
  1008. return -EINVAL;
  1009. if (info->page_size == 0x40000000)
  1010. page_size = IRDMA_PAGE_SIZE_1G;
  1011. else if (info->page_size == 0x200000)
  1012. page_size = IRDMA_PAGE_SIZE_2M;
  1013. else if (info->page_size == 0x1000)
  1014. page_size = IRDMA_PAGE_SIZE_4K;
  1015. else
  1016. return -EINVAL;
  1017. if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
  1018. IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
  1019. remote_access = true;
  1020. else
  1021. remote_access = false;
  1022. pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
  1023. if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
  1024. return -EINVAL;
  1025. cqp = dev->cqp;
  1026. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  1027. if (!wqe)
  1028. return -ENOMEM;
  1029. fbo = info->va & (info->page_size - 1);
  1030. set_64bit_val(wqe, 0,
  1031. (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
  1032. info->va : fbo));
  1033. set_64bit_val(wqe, 8,
  1034. FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
  1035. FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
  1036. set_64bit_val(wqe, 16,
  1037. FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
  1038. FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
  1039. if (!info->chunk_size) {
  1040. set_64bit_val(wqe, 32, info->reg_addr_pa);
  1041. set_64bit_val(wqe, 48, 0);
  1042. } else {
  1043. set_64bit_val(wqe, 32, 0);
  1044. set_64bit_val(wqe, 48,
  1045. FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
  1046. }
  1047. set_64bit_val(wqe, 40, info->hmc_fcn_index);
  1048. set_64bit_val(wqe, 56, 0);
  1049. addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
  1050. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
  1051. FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
  1052. FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
  1053. FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
  1054. FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
  1055. FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
  1056. FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
  1057. FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
  1058. FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
  1059. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  1060. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1061. set_64bit_val(wqe, 24, hdr);
  1062. print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8,
  1063. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  1064. if (post_sq)
  1065. irdma_sc_cqp_post_sq(cqp);
  1066. return 0;
  1067. }
  1068. /**
  1069. * irdma_sc_dealloc_stag - deallocate stag
  1070. * @dev: sc device struct
  1071. * @info: dealloc stag info
  1072. * @scratch: u64 saved to be used during cqp completion
  1073. * @post_sq: flag for cqp db to ring
  1074. */
  1075. static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
  1076. struct irdma_dealloc_stag_info *info,
  1077. u64 scratch, bool post_sq)
  1078. {
  1079. u64 hdr;
  1080. __le64 *wqe;
  1081. struct irdma_sc_cqp *cqp;
  1082. cqp = dev->cqp;
  1083. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  1084. if (!wqe)
  1085. return -ENOMEM;
  1086. set_64bit_val(wqe, 8,
  1087. FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
  1088. set_64bit_val(wqe, 16,
  1089. FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
  1090. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
  1091. FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
  1092. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  1093. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1094. set_64bit_val(wqe, 24, hdr);
  1095. print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16,
  1096. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  1097. if (post_sq)
  1098. irdma_sc_cqp_post_sq(cqp);
  1099. return 0;
  1100. }
  1101. /**
  1102. * irdma_sc_mw_alloc - mw allocate
  1103. * @dev: sc device struct
  1104. * @info: memory window allocation information
  1105. * @scratch: u64 saved to be used during cqp completion
  1106. * @post_sq: flag for cqp db to ring
  1107. */
  1108. static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
  1109. struct irdma_mw_alloc_info *info, u64 scratch,
  1110. bool post_sq)
  1111. {
  1112. u64 hdr;
  1113. struct irdma_sc_cqp *cqp;
  1114. __le64 *wqe;
  1115. cqp = dev->cqp;
  1116. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  1117. if (!wqe)
  1118. return -ENOMEM;
  1119. set_64bit_val(wqe, 8,
  1120. FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
  1121. set_64bit_val(wqe, 16,
  1122. FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
  1123. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
  1124. FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
  1125. FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
  1126. info->mw1_bind_dont_vldt_key) |
  1127. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  1128. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1129. set_64bit_val(wqe, 24, hdr);
  1130. print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8,
  1131. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  1132. if (post_sq)
  1133. irdma_sc_cqp_post_sq(cqp);
  1134. return 0;
  1135. }
  1136. /**
  1137. * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
  1138. * @qp: sc qp struct
  1139. * @info: fast mr info
  1140. * @post_sq: flag for cqp db to ring
  1141. */
  1142. int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
  1143. struct irdma_fast_reg_stag_info *info,
  1144. bool post_sq)
  1145. {
  1146. u64 temp, hdr;
  1147. __le64 *wqe;
  1148. u32 wqe_idx;
  1149. enum irdma_page_size page_size;
  1150. struct irdma_post_sq_info sq_info = {};
  1151. if (info->page_size == 0x40000000)
  1152. page_size = IRDMA_PAGE_SIZE_1G;
  1153. else if (info->page_size == 0x200000)
  1154. page_size = IRDMA_PAGE_SIZE_2M;
  1155. else
  1156. page_size = IRDMA_PAGE_SIZE_4K;
  1157. sq_info.wr_id = info->wr_id;
  1158. sq_info.signaled = info->signaled;
  1159. sq_info.push_wqe = info->push_wqe;
  1160. wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
  1161. IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
  1162. if (!wqe)
  1163. return -ENOMEM;
  1164. irdma_clr_wqes(&qp->qp_uk, wqe_idx);
  1165. ibdev_dbg(to_ibdev(qp->dev),
  1166. "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
  1167. info->wr_id, wqe_idx,
  1168. &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
  1169. temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
  1170. (uintptr_t)info->va : info->fbo;
  1171. set_64bit_val(wqe, 0, temp);
  1172. temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
  1173. info->first_pm_pbl_index >> 16);
  1174. set_64bit_val(wqe, 8,
  1175. FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
  1176. FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
  1177. set_64bit_val(wqe, 16,
  1178. info->total_len |
  1179. FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
  1180. hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
  1181. FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
  1182. FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
  1183. FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
  1184. FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
  1185. FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
  1186. FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
  1187. FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
  1188. FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
  1189. FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
  1190. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
  1191. FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
  1192. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1193. set_64bit_val(wqe, 24, hdr);
  1194. print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
  1195. wqe, IRDMA_QP_WQE_MIN_SIZE, false);
  1196. if (sq_info.push_wqe) {
  1197. irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
  1198. wqe_idx, post_sq);
  1199. } else {
  1200. if (post_sq)
  1201. irdma_uk_qp_post_wr(&qp->qp_uk);
  1202. }
  1203. return 0;
  1204. }
  1205. /**
  1206. * irdma_sc_gen_rts_ae - request AE generated after RTS
  1207. * @qp: sc qp struct
  1208. */
  1209. static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
  1210. {
  1211. __le64 *wqe;
  1212. u64 hdr;
  1213. struct irdma_qp_uk *qp_uk;
  1214. qp_uk = &qp->qp_uk;
  1215. wqe = qp_uk->sq_base[1].elem;
  1216. hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
  1217. FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
  1218. FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
  1219. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1220. set_64bit_val(wqe, 24, hdr);
  1221. print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET,
  1222. 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
  1223. wqe = qp_uk->sq_base[2].elem;
  1224. hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
  1225. FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
  1226. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1227. set_64bit_val(wqe, 24, hdr);
  1228. print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8,
  1229. wqe, IRDMA_QP_WQE_MIN_SIZE, false);
  1230. }
  1231. /**
  1232. * irdma_sc_send_lsmm - send last streaming mode message
  1233. * @qp: sc qp struct
  1234. * @lsmm_buf: buffer with lsmm message
  1235. * @size: size of lsmm buffer
  1236. * @stag: stag of lsmm buffer
  1237. */
  1238. void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
  1239. irdma_stag stag)
  1240. {
  1241. __le64 *wqe;
  1242. u64 hdr;
  1243. struct irdma_qp_uk *qp_uk;
  1244. qp_uk = &qp->qp_uk;
  1245. wqe = qp_uk->sq_base->elem;
  1246. set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
  1247. if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
  1248. set_64bit_val(wqe, 8,
  1249. FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
  1250. FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
  1251. } else {
  1252. set_64bit_val(wqe, 8,
  1253. FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
  1254. FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
  1255. FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
  1256. }
  1257. set_64bit_val(wqe, 16, 0);
  1258. hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
  1259. FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
  1260. FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
  1261. FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
  1262. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1263. set_64bit_val(wqe, 24, hdr);
  1264. print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8,
  1265. wqe, IRDMA_QP_WQE_MIN_SIZE, false);
  1266. if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
  1267. irdma_sc_gen_rts_ae(qp);
  1268. }
  1269. /**
  1270. * irdma_sc_send_rtt - send last read0 or write0
  1271. * @qp: sc qp struct
  1272. * @read: Do read0 or write0
  1273. */
  1274. void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
  1275. {
  1276. __le64 *wqe;
  1277. u64 hdr;
  1278. struct irdma_qp_uk *qp_uk;
  1279. qp_uk = &qp->qp_uk;
  1280. wqe = qp_uk->sq_base->elem;
  1281. set_64bit_val(wqe, 0, 0);
  1282. set_64bit_val(wqe, 16, 0);
  1283. if (read) {
  1284. if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
  1285. set_64bit_val(wqe, 8,
  1286. FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
  1287. } else {
  1288. set_64bit_val(wqe, 8,
  1289. (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
  1290. }
  1291. hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
  1292. FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
  1293. FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
  1294. } else {
  1295. if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
  1296. set_64bit_val(wqe, 8, 0);
  1297. } else {
  1298. set_64bit_val(wqe, 8,
  1299. FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
  1300. }
  1301. hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
  1302. FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
  1303. }
  1304. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1305. set_64bit_val(wqe, 24, hdr);
  1306. print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
  1307. IRDMA_QP_WQE_MIN_SIZE, false);
  1308. if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
  1309. irdma_sc_gen_rts_ae(qp);
  1310. }
  1311. /**
  1312. * irdma_iwarp_opcode - determine if incoming is rdma layer
  1313. * @info: aeq info for the packet
  1314. * @pkt: packet for error
  1315. */
  1316. static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
  1317. {
  1318. __be16 *mpa;
  1319. u32 opcode = 0xffffffff;
  1320. if (info->q2_data_written) {
  1321. mpa = (__be16 *)pkt;
  1322. opcode = ntohs(mpa[1]) & 0xf;
  1323. }
  1324. return opcode;
  1325. }
  1326. /**
  1327. * irdma_locate_mpa - return pointer to mpa in the pkt
  1328. * @pkt: packet with data
  1329. */
  1330. static u8 *irdma_locate_mpa(u8 *pkt)
  1331. {
  1332. /* skip over ethernet header */
  1333. pkt += IRDMA_MAC_HLEN;
  1334. /* Skip over IP and TCP headers */
  1335. pkt += 4 * (pkt[0] & 0x0f);
  1336. pkt += 4 * ((pkt[12] >> 4) & 0x0f);
  1337. return pkt;
  1338. }
  1339. /**
  1340. * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
  1341. * @qp: sc qp ptr for pkt
  1342. * @hdr: term hdr
  1343. * @opcode: flush opcode for termhdr
  1344. * @layer_etype: error layer + error type
  1345. * @err: error cod ein the header
  1346. */
  1347. static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
  1348. struct irdma_terminate_hdr *hdr,
  1349. enum irdma_flush_opcode opcode,
  1350. u8 layer_etype, u8 err)
  1351. {
  1352. qp->flush_code = opcode;
  1353. hdr->layer_etype = layer_etype;
  1354. hdr->error_code = err;
  1355. }
  1356. /**
  1357. * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
  1358. * @pkt: ptr to mpa in offending pkt
  1359. * @hdr: term hdr
  1360. * @copy_len: offending pkt length to be copied to term hdr
  1361. * @is_tagged: DDP tagged or untagged
  1362. */
  1363. static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
  1364. int *copy_len, u8 *is_tagged)
  1365. {
  1366. u16 ddp_seg_len;
  1367. ddp_seg_len = ntohs(*(__be16 *)pkt);
  1368. if (ddp_seg_len) {
  1369. *copy_len = 2;
  1370. hdr->hdrct = DDP_LEN_FLAG;
  1371. if (pkt[2] & 0x80) {
  1372. *is_tagged = 1;
  1373. if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
  1374. *copy_len += TERM_DDP_LEN_TAGGED;
  1375. hdr->hdrct |= DDP_HDR_FLAG;
  1376. }
  1377. } else {
  1378. if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
  1379. *copy_len += TERM_DDP_LEN_UNTAGGED;
  1380. hdr->hdrct |= DDP_HDR_FLAG;
  1381. }
  1382. if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
  1383. ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
  1384. *copy_len += TERM_RDMA_LEN;
  1385. hdr->hdrct |= RDMA_HDR_FLAG;
  1386. }
  1387. }
  1388. }
  1389. }
  1390. /**
  1391. * irdma_bld_terminate_hdr - build terminate message header
  1392. * @qp: qp associated with received terminate AE
  1393. * @info: the struct contiaing AE information
  1394. */
  1395. static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
  1396. struct irdma_aeqe_info *info)
  1397. {
  1398. u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
  1399. int copy_len = 0;
  1400. u8 is_tagged = 0;
  1401. u32 opcode;
  1402. struct irdma_terminate_hdr *termhdr;
  1403. termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
  1404. memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
  1405. if (info->q2_data_written) {
  1406. pkt = irdma_locate_mpa(pkt);
  1407. irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
  1408. }
  1409. opcode = irdma_iwarp_opcode(info, pkt);
  1410. qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
  1411. qp->sq_flush_code = info->sq;
  1412. qp->rq_flush_code = info->rq;
  1413. switch (info->ae_id) {
  1414. case IRDMA_AE_AMP_UNALLOCATED_STAG:
  1415. qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
  1416. if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
  1417. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
  1418. (LAYER_DDP << 4) | DDP_TAGGED_BUF,
  1419. DDP_TAGGED_INV_STAG);
  1420. else
  1421. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  1422. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
  1423. RDMAP_INV_STAG);
  1424. break;
  1425. case IRDMA_AE_AMP_BOUNDS_VIOLATION:
  1426. qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
  1427. if (info->q2_data_written)
  1428. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
  1429. (LAYER_DDP << 4) | DDP_TAGGED_BUF,
  1430. DDP_TAGGED_BOUNDS);
  1431. else
  1432. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  1433. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
  1434. RDMAP_INV_BOUNDS);
  1435. break;
  1436. case IRDMA_AE_AMP_BAD_PD:
  1437. switch (opcode) {
  1438. case IRDMA_OP_TYPE_RDMA_WRITE:
  1439. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
  1440. (LAYER_DDP << 4) | DDP_TAGGED_BUF,
  1441. DDP_TAGGED_UNASSOC_STAG);
  1442. break;
  1443. case IRDMA_OP_TYPE_SEND_INV:
  1444. case IRDMA_OP_TYPE_SEND_SOL_INV:
  1445. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  1446. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
  1447. RDMAP_CANT_INV_STAG);
  1448. break;
  1449. default:
  1450. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  1451. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
  1452. RDMAP_UNASSOC_STAG);
  1453. }
  1454. break;
  1455. case IRDMA_AE_AMP_INVALID_STAG:
  1456. qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
  1457. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  1458. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
  1459. RDMAP_INV_STAG);
  1460. break;
  1461. case IRDMA_AE_AMP_BAD_QP:
  1462. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
  1463. (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
  1464. DDP_UNTAGGED_INV_QN);
  1465. break;
  1466. case IRDMA_AE_AMP_BAD_STAG_KEY:
  1467. case IRDMA_AE_AMP_BAD_STAG_INDEX:
  1468. qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
  1469. switch (opcode) {
  1470. case IRDMA_OP_TYPE_SEND_INV:
  1471. case IRDMA_OP_TYPE_SEND_SOL_INV:
  1472. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
  1473. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
  1474. RDMAP_CANT_INV_STAG);
  1475. break;
  1476. default:
  1477. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  1478. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
  1479. RDMAP_INV_STAG);
  1480. }
  1481. break;
  1482. case IRDMA_AE_AMP_RIGHTS_VIOLATION:
  1483. case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
  1484. case IRDMA_AE_PRIV_OPERATION_DENIED:
  1485. qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
  1486. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  1487. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
  1488. RDMAP_ACCESS);
  1489. break;
  1490. case IRDMA_AE_AMP_TO_WRAP:
  1491. qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
  1492. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  1493. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
  1494. RDMAP_TO_WRAP);
  1495. break;
  1496. case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
  1497. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
  1498. (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
  1499. break;
  1500. case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
  1501. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
  1502. (LAYER_DDP << 4) | DDP_CATASTROPHIC,
  1503. DDP_CATASTROPHIC_LOCAL);
  1504. break;
  1505. case IRDMA_AE_LCE_QP_CATASTROPHIC:
  1506. case IRDMA_AE_DDP_NO_L_BIT:
  1507. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
  1508. (LAYER_DDP << 4) | DDP_CATASTROPHIC,
  1509. DDP_CATASTROPHIC_LOCAL);
  1510. break;
  1511. case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
  1512. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
  1513. (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
  1514. DDP_UNTAGGED_INV_MSN_RANGE);
  1515. break;
  1516. case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
  1517. qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
  1518. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
  1519. (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
  1520. DDP_UNTAGGED_INV_TOO_LONG);
  1521. break;
  1522. case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
  1523. if (is_tagged)
  1524. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
  1525. (LAYER_DDP << 4) | DDP_TAGGED_BUF,
  1526. DDP_TAGGED_INV_DDP_VER);
  1527. else
  1528. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
  1529. (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
  1530. DDP_UNTAGGED_INV_DDP_VER);
  1531. break;
  1532. case IRDMA_AE_DDP_UBE_INVALID_MO:
  1533. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
  1534. (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
  1535. DDP_UNTAGGED_INV_MO);
  1536. break;
  1537. case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
  1538. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
  1539. (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
  1540. DDP_UNTAGGED_INV_MSN_NO_BUF);
  1541. break;
  1542. case IRDMA_AE_DDP_UBE_INVALID_QN:
  1543. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
  1544. (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
  1545. DDP_UNTAGGED_INV_QN);
  1546. break;
  1547. case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
  1548. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
  1549. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
  1550. RDMAP_INV_RDMAP_VER);
  1551. break;
  1552. default:
  1553. irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
  1554. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
  1555. RDMAP_UNSPECIFIED);
  1556. break;
  1557. }
  1558. if (copy_len)
  1559. memcpy(termhdr + 1, pkt, copy_len);
  1560. return sizeof(struct irdma_terminate_hdr) + copy_len;
  1561. }
  1562. /**
  1563. * irdma_terminate_send_fin() - Send fin for terminate message
  1564. * @qp: qp associated with received terminate AE
  1565. */
  1566. void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
  1567. {
  1568. irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
  1569. IRDMAQP_TERM_SEND_FIN_ONLY, 0);
  1570. }
  1571. /**
  1572. * irdma_terminate_connection() - Bad AE and send terminate to remote QP
  1573. * @qp: qp associated with received terminate AE
  1574. * @info: the struct contiaing AE information
  1575. */
  1576. void irdma_terminate_connection(struct irdma_sc_qp *qp,
  1577. struct irdma_aeqe_info *info)
  1578. {
  1579. u8 termlen = 0;
  1580. if (qp->term_flags & IRDMA_TERM_SENT)
  1581. return;
  1582. termlen = irdma_bld_terminate_hdr(qp, info);
  1583. irdma_terminate_start_timer(qp);
  1584. qp->term_flags |= IRDMA_TERM_SENT;
  1585. irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
  1586. IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
  1587. }
  1588. /**
  1589. * irdma_terminate_received - handle terminate received AE
  1590. * @qp: qp associated with received terminate AE
  1591. * @info: the struct contiaing AE information
  1592. */
  1593. void irdma_terminate_received(struct irdma_sc_qp *qp,
  1594. struct irdma_aeqe_info *info)
  1595. {
  1596. u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
  1597. __be32 *mpa;
  1598. u8 ddp_ctl;
  1599. u8 rdma_ctl;
  1600. u16 aeq_id = 0;
  1601. struct irdma_terminate_hdr *termhdr;
  1602. mpa = (__be32 *)irdma_locate_mpa(pkt);
  1603. if (info->q2_data_written) {
  1604. /* did not validate the frame - do it now */
  1605. ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
  1606. rdma_ctl = ntohl(mpa[0]) & 0xff;
  1607. if ((ddp_ctl & 0xc0) != 0x40)
  1608. aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
  1609. else if ((ddp_ctl & 0x03) != 1)
  1610. aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
  1611. else if (ntohl(mpa[2]) != 2)
  1612. aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
  1613. else if (ntohl(mpa[3]) != 1)
  1614. aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
  1615. else if (ntohl(mpa[4]) != 0)
  1616. aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
  1617. else if ((rdma_ctl & 0xc0) != 0x40)
  1618. aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
  1619. info->ae_id = aeq_id;
  1620. if (info->ae_id) {
  1621. /* Bad terminate recvd - send back a terminate */
  1622. irdma_terminate_connection(qp, info);
  1623. return;
  1624. }
  1625. }
  1626. qp->term_flags |= IRDMA_TERM_RCVD;
  1627. qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
  1628. termhdr = (struct irdma_terminate_hdr *)&mpa[5];
  1629. if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
  1630. termhdr->layer_etype == RDMAP_REMOTE_OP) {
  1631. irdma_terminate_done(qp, 0);
  1632. } else {
  1633. irdma_terminate_start_timer(qp);
  1634. irdma_terminate_send_fin(qp);
  1635. }
  1636. }
  1637. static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
  1638. {
  1639. return 0;
  1640. }
  1641. static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
  1642. {
  1643. /* do nothing */
  1644. }
  1645. static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
  1646. {
  1647. /* do nothing */
  1648. }
  1649. /**
  1650. * irdma_sc_vsi_init - Init the vsi structure
  1651. * @vsi: pointer to vsi structure to initialize
  1652. * @info: the info used to initialize the vsi struct
  1653. */
  1654. void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
  1655. struct irdma_vsi_init_info *info)
  1656. {
  1657. int i;
  1658. vsi->dev = info->dev;
  1659. vsi->back_vsi = info->back_vsi;
  1660. vsi->register_qset = info->register_qset;
  1661. vsi->unregister_qset = info->unregister_qset;
  1662. vsi->mtu = info->params->mtu;
  1663. vsi->exception_lan_q = info->exception_lan_q;
  1664. vsi->vsi_idx = info->pf_data_vsi_num;
  1665. if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  1666. vsi->fcn_id = info->dev->hmc_fn_id;
  1667. irdma_set_qos_info(vsi, info->params);
  1668. for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
  1669. mutex_init(&vsi->qos[i].qos_mutex);
  1670. INIT_LIST_HEAD(&vsi->qos[i].qplist);
  1671. }
  1672. if (vsi->register_qset) {
  1673. vsi->dev->ws_add = irdma_ws_add;
  1674. vsi->dev->ws_remove = irdma_ws_remove;
  1675. vsi->dev->ws_reset = irdma_ws_reset;
  1676. } else {
  1677. vsi->dev->ws_add = irdma_null_ws_add;
  1678. vsi->dev->ws_remove = irdma_null_ws_remove;
  1679. vsi->dev->ws_reset = irdma_null_ws_reset;
  1680. }
  1681. }
  1682. /**
  1683. * irdma_get_fcn_id - Return the function id
  1684. * @vsi: pointer to the vsi
  1685. */
  1686. static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
  1687. {
  1688. struct irdma_stats_inst_info stats_info = {};
  1689. struct irdma_sc_dev *dev = vsi->dev;
  1690. u8 fcn_id = IRDMA_INVALID_FCN_ID;
  1691. u8 start_idx, max_stats, i;
  1692. if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
  1693. if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
  1694. &stats_info))
  1695. return stats_info.stats_idx;
  1696. }
  1697. start_idx = 1;
  1698. max_stats = 16;
  1699. for (i = start_idx; i < max_stats; i++)
  1700. if (!dev->fcn_id_array[i]) {
  1701. fcn_id = i;
  1702. dev->fcn_id_array[i] = true;
  1703. break;
  1704. }
  1705. return fcn_id;
  1706. }
  1707. /**
  1708. * irdma_vsi_stats_init - Initialize the vsi statistics
  1709. * @vsi: pointer to the vsi structure
  1710. * @info: The info structure used for initialization
  1711. */
  1712. int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
  1713. struct irdma_vsi_stats_info *info)
  1714. {
  1715. u8 fcn_id = info->fcn_id;
  1716. struct irdma_dma_mem *stats_buff_mem;
  1717. vsi->pestat = info->pestat;
  1718. vsi->pestat->hw = vsi->dev->hw;
  1719. vsi->pestat->vsi = vsi;
  1720. stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
  1721. stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
  1722. stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
  1723. stats_buff_mem->size,
  1724. &stats_buff_mem->pa,
  1725. GFP_KERNEL);
  1726. if (!stats_buff_mem->va)
  1727. return -ENOMEM;
  1728. vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
  1729. vsi->pestat->gather_info.last_gather_stats_va =
  1730. (void *)((uintptr_t)stats_buff_mem->va +
  1731. IRDMA_GATHER_STATS_BUF_SIZE);
  1732. irdma_hw_stats_start_timer(vsi);
  1733. if (info->alloc_fcn_id)
  1734. fcn_id = irdma_get_fcn_id(vsi);
  1735. if (fcn_id == IRDMA_INVALID_FCN_ID)
  1736. goto stats_error;
  1737. vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
  1738. vsi->fcn_id = fcn_id;
  1739. if (info->alloc_fcn_id) {
  1740. vsi->pestat->gather_info.use_stats_inst = true;
  1741. vsi->pestat->gather_info.stats_inst_index = fcn_id;
  1742. }
  1743. return 0;
  1744. stats_error:
  1745. dma_free_coherent(vsi->pestat->hw->device, stats_buff_mem->size,
  1746. stats_buff_mem->va, stats_buff_mem->pa);
  1747. stats_buff_mem->va = NULL;
  1748. return -EIO;
  1749. }
  1750. /**
  1751. * irdma_vsi_stats_free - Free the vsi stats
  1752. * @vsi: pointer to the vsi structure
  1753. */
  1754. void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
  1755. {
  1756. struct irdma_stats_inst_info stats_info = {};
  1757. u8 fcn_id = vsi->fcn_id;
  1758. struct irdma_sc_dev *dev = vsi->dev;
  1759. if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
  1760. if (vsi->stats_fcn_id_alloc) {
  1761. stats_info.stats_idx = vsi->fcn_id;
  1762. irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
  1763. &stats_info);
  1764. }
  1765. } else {
  1766. if (vsi->stats_fcn_id_alloc &&
  1767. fcn_id < vsi->dev->hw_attrs.max_stat_inst)
  1768. vsi->dev->fcn_id_array[fcn_id] = false;
  1769. }
  1770. if (!vsi->pestat)
  1771. return;
  1772. irdma_hw_stats_stop_timer(vsi);
  1773. dma_free_coherent(vsi->pestat->hw->device,
  1774. vsi->pestat->gather_info.stats_buff_mem.size,
  1775. vsi->pestat->gather_info.stats_buff_mem.va,
  1776. vsi->pestat->gather_info.stats_buff_mem.pa);
  1777. vsi->pestat->gather_info.stats_buff_mem.va = NULL;
  1778. }
  1779. /**
  1780. * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
  1781. * @wqsize: size of the wq (sq, rq) to encoded_size
  1782. * @queue_type: queue type selected for the calculation algorithm
  1783. */
  1784. u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
  1785. {
  1786. u8 encoded_size = 0;
  1787. /* cqp sq's hw coded value starts from 1 for size of 4
  1788. * while it starts from 0 for qp' wq's.
  1789. */
  1790. if (queue_type == IRDMA_QUEUE_TYPE_CQP)
  1791. encoded_size = 1;
  1792. wqsize >>= 2;
  1793. while (wqsize >>= 1)
  1794. encoded_size++;
  1795. return encoded_size;
  1796. }
  1797. /**
  1798. * irdma_sc_gather_stats - collect the statistics
  1799. * @cqp: struct for cqp hw
  1800. * @info: gather stats info structure
  1801. * @scratch: u64 saved to be used during cqp completion
  1802. */
  1803. static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
  1804. struct irdma_stats_gather_info *info,
  1805. u64 scratch)
  1806. {
  1807. __le64 *wqe;
  1808. u64 temp;
  1809. if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
  1810. return -ENOMEM;
  1811. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  1812. if (!wqe)
  1813. return -ENOMEM;
  1814. set_64bit_val(wqe, 40,
  1815. FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
  1816. set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
  1817. temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
  1818. FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
  1819. FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
  1820. info->stats_inst_index) |
  1821. FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
  1822. info->use_hmc_fcn_index) |
  1823. FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
  1824. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1825. set_64bit_val(wqe, 24, temp);
  1826. print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET,
  1827. 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  1828. irdma_sc_cqp_post_sq(cqp);
  1829. ibdev_dbg(to_ibdev(cqp->dev),
  1830. "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
  1831. cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
  1832. return 0;
  1833. }
  1834. /**
  1835. * irdma_sc_manage_stats_inst - allocate or free stats instance
  1836. * @cqp: struct for cqp hw
  1837. * @info: stats info structure
  1838. * @alloc: alloc vs. delete flag
  1839. * @scratch: u64 saved to be used during cqp completion
  1840. */
  1841. static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
  1842. struct irdma_stats_inst_info *info,
  1843. bool alloc, u64 scratch)
  1844. {
  1845. __le64 *wqe;
  1846. u64 temp;
  1847. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  1848. if (!wqe)
  1849. return -ENOMEM;
  1850. set_64bit_val(wqe, 40,
  1851. FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
  1852. temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
  1853. FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
  1854. FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
  1855. info->use_hmc_fcn_index) |
  1856. FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
  1857. FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
  1858. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1859. set_64bit_val(wqe, 24, temp);
  1860. print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16,
  1861. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  1862. irdma_sc_cqp_post_sq(cqp);
  1863. return 0;
  1864. }
  1865. /**
  1866. * irdma_sc_set_up_map - set the up map table
  1867. * @cqp: struct for cqp hw
  1868. * @info: User priority map info
  1869. * @scratch: u64 saved to be used during cqp completion
  1870. */
  1871. static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
  1872. struct irdma_up_info *info, u64 scratch)
  1873. {
  1874. __le64 *wqe;
  1875. u64 temp = 0;
  1876. int i;
  1877. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  1878. if (!wqe)
  1879. return -ENOMEM;
  1880. for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
  1881. temp |= (u64)info->map[i] << (i * 8);
  1882. set_64bit_val(wqe, 0, temp);
  1883. set_64bit_val(wqe, 40,
  1884. FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
  1885. FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
  1886. temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
  1887. FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
  1888. FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
  1889. info->use_cnp_up_override) |
  1890. FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
  1891. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1892. set_64bit_val(wqe, 24, temp);
  1893. print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
  1894. IRDMA_CQP_WQE_SIZE * 8, false);
  1895. irdma_sc_cqp_post_sq(cqp);
  1896. return 0;
  1897. }
  1898. /**
  1899. * irdma_sc_manage_ws_node - create/modify/destroy WS node
  1900. * @cqp: struct for cqp hw
  1901. * @info: node info structure
  1902. * @node_op: 0 for add 1 for modify, 2 for delete
  1903. * @scratch: u64 saved to be used during cqp completion
  1904. */
  1905. static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
  1906. struct irdma_ws_node_info *info,
  1907. enum irdma_ws_node_op node_op, u64 scratch)
  1908. {
  1909. __le64 *wqe;
  1910. u64 temp = 0;
  1911. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  1912. if (!wqe)
  1913. return -ENOMEM;
  1914. set_64bit_val(wqe, 32,
  1915. FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
  1916. FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
  1917. temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
  1918. FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
  1919. FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
  1920. FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
  1921. FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
  1922. FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
  1923. FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
  1924. FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
  1925. FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
  1926. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1927. set_64bit_val(wqe, 24, temp);
  1928. print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8,
  1929. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  1930. irdma_sc_cqp_post_sq(cqp);
  1931. return 0;
  1932. }
  1933. /**
  1934. * irdma_sc_qp_flush_wqes - flush qp's wqe
  1935. * @qp: sc qp
  1936. * @info: dlush information
  1937. * @scratch: u64 saved to be used during cqp completion
  1938. * @post_sq: flag for cqp db to ring
  1939. */
  1940. int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
  1941. struct irdma_qp_flush_info *info, u64 scratch,
  1942. bool post_sq)
  1943. {
  1944. u64 temp = 0;
  1945. __le64 *wqe;
  1946. struct irdma_sc_cqp *cqp;
  1947. u64 hdr;
  1948. bool flush_sq = false, flush_rq = false;
  1949. if (info->rq && !qp->flush_rq)
  1950. flush_rq = true;
  1951. if (info->sq && !qp->flush_sq)
  1952. flush_sq = true;
  1953. qp->flush_sq |= flush_sq;
  1954. qp->flush_rq |= flush_rq;
  1955. if (!flush_sq && !flush_rq) {
  1956. ibdev_dbg(to_ibdev(qp->dev),
  1957. "CQP: Additional flush request ignored for qp %x\n",
  1958. qp->qp_uk.qp_id);
  1959. return -EALREADY;
  1960. }
  1961. cqp = qp->pd->dev->cqp;
  1962. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  1963. if (!wqe)
  1964. return -ENOMEM;
  1965. if (info->userflushcode) {
  1966. if (flush_rq)
  1967. temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
  1968. info->rq_minor_code) |
  1969. FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
  1970. info->rq_major_code);
  1971. if (flush_sq)
  1972. temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
  1973. info->sq_minor_code) |
  1974. FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
  1975. info->sq_major_code);
  1976. }
  1977. set_64bit_val(wqe, 16, temp);
  1978. temp = (info->generate_ae) ?
  1979. info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
  1980. info->ae_src) : 0;
  1981. set_64bit_val(wqe, 8, temp);
  1982. hdr = qp->qp_uk.qp_id |
  1983. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
  1984. FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
  1985. FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
  1986. FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
  1987. FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
  1988. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  1989. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1990. set_64bit_val(wqe, 24, hdr);
  1991. print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8,
  1992. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  1993. if (post_sq)
  1994. irdma_sc_cqp_post_sq(cqp);
  1995. return 0;
  1996. }
  1997. /**
  1998. * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
  1999. * @qp: sc qp
  2000. * @info: gen ae information
  2001. * @scratch: u64 saved to be used during cqp completion
  2002. * @post_sq: flag for cqp db to ring
  2003. */
  2004. static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
  2005. struct irdma_gen_ae_info *info, u64 scratch,
  2006. bool post_sq)
  2007. {
  2008. u64 temp;
  2009. __le64 *wqe;
  2010. struct irdma_sc_cqp *cqp;
  2011. u64 hdr;
  2012. cqp = qp->pd->dev->cqp;
  2013. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  2014. if (!wqe)
  2015. return -ENOMEM;
  2016. temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
  2017. info->ae_src);
  2018. set_64bit_val(wqe, 8, temp);
  2019. hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  2020. IRDMA_CQP_OP_GEN_AE) |
  2021. FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
  2022. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  2023. dma_wmb(); /* make sure WQE is written before valid bit is set */
  2024. set_64bit_val(wqe, 24, hdr);
  2025. print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8,
  2026. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  2027. if (post_sq)
  2028. irdma_sc_cqp_post_sq(cqp);
  2029. return 0;
  2030. }
  2031. /*** irdma_sc_qp_upload_context - upload qp's context
  2032. * @dev: sc device struct
  2033. * @info: upload context info ptr for return
  2034. * @scratch: u64 saved to be used during cqp completion
  2035. * @post_sq: flag for cqp db to ring
  2036. */
  2037. static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
  2038. struct irdma_upload_context_info *info,
  2039. u64 scratch, bool post_sq)
  2040. {
  2041. __le64 *wqe;
  2042. struct irdma_sc_cqp *cqp;
  2043. u64 hdr;
  2044. cqp = dev->cqp;
  2045. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  2046. if (!wqe)
  2047. return -ENOMEM;
  2048. set_64bit_val(wqe, 16, info->buf_pa);
  2049. hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
  2050. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
  2051. FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
  2052. FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
  2053. FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
  2054. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  2055. dma_wmb(); /* make sure WQE is written before valid bit is set */
  2056. set_64bit_val(wqe, 24, hdr);
  2057. print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16,
  2058. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  2059. if (post_sq)
  2060. irdma_sc_cqp_post_sq(cqp);
  2061. return 0;
  2062. }
  2063. /**
  2064. * irdma_sc_manage_push_page - Handle push page
  2065. * @cqp: struct for cqp hw
  2066. * @info: push page info
  2067. * @scratch: u64 saved to be used during cqp completion
  2068. * @post_sq: flag for cqp db to ring
  2069. */
  2070. static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
  2071. struct irdma_cqp_manage_push_page_info *info,
  2072. u64 scratch, bool post_sq)
  2073. {
  2074. __le64 *wqe;
  2075. u64 hdr;
  2076. if (info->free_page &&
  2077. info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
  2078. return -EINVAL;
  2079. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  2080. if (!wqe)
  2081. return -ENOMEM;
  2082. set_64bit_val(wqe, 16, info->qs_handle);
  2083. hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
  2084. FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
  2085. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
  2086. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
  2087. FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
  2088. dma_wmb(); /* make sure WQE is written before valid bit is set */
  2089. set_64bit_val(wqe, 24, hdr);
  2090. print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET,
  2091. 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  2092. if (post_sq)
  2093. irdma_sc_cqp_post_sq(cqp);
  2094. return 0;
  2095. }
  2096. /**
  2097. * irdma_sc_suspend_qp - suspend qp for param change
  2098. * @cqp: struct for cqp hw
  2099. * @qp: sc qp struct
  2100. * @scratch: u64 saved to be used during cqp completion
  2101. */
  2102. static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
  2103. u64 scratch)
  2104. {
  2105. u64 hdr;
  2106. __le64 *wqe;
  2107. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  2108. if (!wqe)
  2109. return -ENOMEM;
  2110. hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
  2111. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
  2112. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  2113. dma_wmb(); /* make sure WQE is written before valid bit is set */
  2114. set_64bit_val(wqe, 24, hdr);
  2115. print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
  2116. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  2117. irdma_sc_cqp_post_sq(cqp);
  2118. return 0;
  2119. }
  2120. /**
  2121. * irdma_sc_resume_qp - resume qp after suspend
  2122. * @cqp: struct for cqp hw
  2123. * @qp: sc qp struct
  2124. * @scratch: u64 saved to be used during cqp completion
  2125. */
  2126. static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
  2127. u64 scratch)
  2128. {
  2129. u64 hdr;
  2130. __le64 *wqe;
  2131. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  2132. if (!wqe)
  2133. return -ENOMEM;
  2134. set_64bit_val(wqe, 16,
  2135. FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
  2136. hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
  2137. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
  2138. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  2139. dma_wmb(); /* make sure WQE is written before valid bit is set */
  2140. set_64bit_val(wqe, 24, hdr);
  2141. print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
  2142. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  2143. irdma_sc_cqp_post_sq(cqp);
  2144. return 0;
  2145. }
  2146. /**
  2147. * irdma_sc_cq_ack - acknowledge completion q
  2148. * @cq: cq struct
  2149. */
  2150. static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
  2151. {
  2152. writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
  2153. }
  2154. /**
  2155. * irdma_sc_cq_init - initialize completion q
  2156. * @cq: cq struct
  2157. * @info: cq initialization info
  2158. */
  2159. int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
  2160. {
  2161. u32 pble_obj_cnt;
  2162. pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
  2163. if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
  2164. return -EINVAL;
  2165. cq->cq_pa = info->cq_base_pa;
  2166. cq->dev = info->dev;
  2167. cq->ceq_id = info->ceq_id;
  2168. info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
  2169. info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
  2170. irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
  2171. cq->virtual_map = info->virtual_map;
  2172. cq->pbl_chunk_size = info->pbl_chunk_size;
  2173. cq->ceqe_mask = info->ceqe_mask;
  2174. cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
  2175. cq->shadow_area_pa = info->shadow_area_pa;
  2176. cq->shadow_read_threshold = info->shadow_read_threshold;
  2177. cq->ceq_id_valid = info->ceq_id_valid;
  2178. cq->tph_en = info->tph_en;
  2179. cq->tph_val = info->tph_val;
  2180. cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
  2181. cq->vsi = info->vsi;
  2182. return 0;
  2183. }
  2184. /**
  2185. * irdma_sc_cq_create - create completion q
  2186. * @cq: cq struct
  2187. * @scratch: u64 saved to be used during cqp completion
  2188. * @check_overflow: flag for overflow check
  2189. * @post_sq: flag for cqp db to ring
  2190. */
  2191. static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
  2192. bool check_overflow, bool post_sq)
  2193. {
  2194. __le64 *wqe;
  2195. struct irdma_sc_cqp *cqp;
  2196. u64 hdr;
  2197. struct irdma_sc_ceq *ceq;
  2198. int ret_code = 0;
  2199. cqp = cq->dev->cqp;
  2200. if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
  2201. return -EINVAL;
  2202. if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
  2203. return -EINVAL;
  2204. ceq = cq->dev->ceq[cq->ceq_id];
  2205. if (ceq && ceq->reg_cq)
  2206. ret_code = irdma_sc_add_cq_ctx(ceq, cq);
  2207. if (ret_code)
  2208. return ret_code;
  2209. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  2210. if (!wqe) {
  2211. if (ceq && ceq->reg_cq)
  2212. irdma_sc_remove_cq_ctx(ceq, cq);
  2213. return -ENOMEM;
  2214. }
  2215. set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
  2216. set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
  2217. set_64bit_val(wqe, 16,
  2218. FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
  2219. set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
  2220. set_64bit_val(wqe, 40, cq->shadow_area_pa);
  2221. set_64bit_val(wqe, 48,
  2222. FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
  2223. set_64bit_val(wqe, 56,
  2224. FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
  2225. FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
  2226. hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
  2227. FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
  2228. IRDMA_CQPSQ_CQ_CEQID) |
  2229. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
  2230. FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
  2231. FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
  2232. FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
  2233. FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
  2234. FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
  2235. FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
  2236. FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
  2237. cq->cq_uk.avoid_mem_cflct) |
  2238. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  2239. dma_wmb(); /* make sure WQE is written before valid bit is set */
  2240. set_64bit_val(wqe, 24, hdr);
  2241. print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
  2242. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  2243. if (post_sq)
  2244. irdma_sc_cqp_post_sq(cqp);
  2245. return 0;
  2246. }
  2247. /**
  2248. * irdma_sc_cq_destroy - destroy completion q
  2249. * @cq: cq struct
  2250. * @scratch: u64 saved to be used during cqp completion
  2251. * @post_sq: flag for cqp db to ring
  2252. */
  2253. int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
  2254. {
  2255. struct irdma_sc_cqp *cqp;
  2256. __le64 *wqe;
  2257. u64 hdr;
  2258. struct irdma_sc_ceq *ceq;
  2259. cqp = cq->dev->cqp;
  2260. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  2261. if (!wqe)
  2262. return -ENOMEM;
  2263. ceq = cq->dev->ceq[cq->ceq_id];
  2264. if (ceq && ceq->reg_cq)
  2265. irdma_sc_remove_cq_ctx(ceq, cq);
  2266. set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
  2267. set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
  2268. set_64bit_val(wqe, 40, cq->shadow_area_pa);
  2269. set_64bit_val(wqe, 48,
  2270. (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
  2271. hdr = cq->cq_uk.cq_id |
  2272. FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
  2273. IRDMA_CQPSQ_CQ_CEQID) |
  2274. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
  2275. FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
  2276. FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
  2277. FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
  2278. FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
  2279. FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
  2280. FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
  2281. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  2282. dma_wmb(); /* make sure WQE is written before valid bit is set */
  2283. set_64bit_val(wqe, 24, hdr);
  2284. print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
  2285. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  2286. if (post_sq)
  2287. irdma_sc_cqp_post_sq(cqp);
  2288. return 0;
  2289. }
  2290. /**
  2291. * irdma_sc_cq_resize - set resized cq buffer info
  2292. * @cq: resized cq
  2293. * @info: resized cq buffer info
  2294. */
  2295. void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
  2296. {
  2297. cq->virtual_map = info->virtual_map;
  2298. cq->cq_pa = info->cq_pa;
  2299. cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
  2300. cq->pbl_chunk_size = info->pbl_chunk_size;
  2301. irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
  2302. }
  2303. /**
  2304. * irdma_sc_cq_modify - modify a Completion Queue
  2305. * @cq: cq struct
  2306. * @info: modification info struct
  2307. * @scratch: u64 saved to be used during cqp completion
  2308. * @post_sq: flag to post to sq
  2309. */
  2310. static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
  2311. struct irdma_modify_cq_info *info, u64 scratch,
  2312. bool post_sq)
  2313. {
  2314. struct irdma_sc_cqp *cqp;
  2315. __le64 *wqe;
  2316. u64 hdr;
  2317. u32 pble_obj_cnt;
  2318. pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
  2319. if (info->cq_resize && info->virtual_map &&
  2320. info->first_pm_pbl_idx >= pble_obj_cnt)
  2321. return -EINVAL;
  2322. cqp = cq->dev->cqp;
  2323. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  2324. if (!wqe)
  2325. return -ENOMEM;
  2326. set_64bit_val(wqe, 0, info->cq_size);
  2327. set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
  2328. set_64bit_val(wqe, 16,
  2329. FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
  2330. set_64bit_val(wqe, 32, info->cq_pa);
  2331. set_64bit_val(wqe, 40, cq->shadow_area_pa);
  2332. set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
  2333. set_64bit_val(wqe, 56,
  2334. FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
  2335. FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
  2336. hdr = cq->cq_uk.cq_id |
  2337. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
  2338. FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
  2339. FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
  2340. FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
  2341. FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
  2342. FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
  2343. FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
  2344. FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
  2345. cq->cq_uk.avoid_mem_cflct) |
  2346. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  2347. dma_wmb(); /* make sure WQE is written before valid bit is set */
  2348. set_64bit_val(wqe, 24, hdr);
  2349. print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
  2350. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  2351. if (post_sq)
  2352. irdma_sc_cqp_post_sq(cqp);
  2353. return 0;
  2354. }
  2355. /**
  2356. * irdma_check_cqp_progress - check cqp processing progress
  2357. * @timeout: timeout info struct
  2358. * @dev: sc device struct
  2359. */
  2360. void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
  2361. {
  2362. u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
  2363. if (timeout->compl_cqp_cmds != completed_ops) {
  2364. timeout->compl_cqp_cmds = completed_ops;
  2365. timeout->count = 0;
  2366. } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
  2367. timeout->count++;
  2368. }
  2369. }
  2370. /**
  2371. * irdma_get_cqp_reg_info - get head and tail for cqp using registers
  2372. * @cqp: struct for cqp hw
  2373. * @val: cqp tail register value
  2374. * @tail: wqtail register value
  2375. * @error: cqp processing err
  2376. */
  2377. static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
  2378. u32 *tail, u32 *error)
  2379. {
  2380. *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
  2381. *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
  2382. *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
  2383. }
  2384. /**
  2385. * irdma_cqp_poll_registers - poll cqp registers
  2386. * @cqp: struct for cqp hw
  2387. * @tail: wqtail register value
  2388. * @count: how many times to try for completion
  2389. */
  2390. static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
  2391. u32 count)
  2392. {
  2393. u32 i = 0;
  2394. u32 newtail, error, val;
  2395. while (i++ < count) {
  2396. irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
  2397. if (error) {
  2398. error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
  2399. ibdev_dbg(to_ibdev(cqp->dev),
  2400. "CQP: CQPERRCODES error_code[x%08X]\n",
  2401. error);
  2402. return -EIO;
  2403. }
  2404. if (newtail != tail) {
  2405. /* SUCCESS */
  2406. IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
  2407. atomic64_inc(&cqp->completed_ops);
  2408. return 0;
  2409. }
  2410. udelay(cqp->dev->hw_attrs.max_sleep_count);
  2411. }
  2412. return -ETIMEDOUT;
  2413. }
  2414. /**
  2415. * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
  2416. * @dev: sc device struct
  2417. * @buf: pointer to commit buffer
  2418. * @buf_idx: buffer index
  2419. * @obj_info: object info pointer
  2420. * @rsrc_idx: indexs of memory resource
  2421. */
  2422. static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
  2423. u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
  2424. u32 rsrc_idx)
  2425. {
  2426. u64 temp;
  2427. get_64bit_val(buf, buf_idx, &temp);
  2428. switch (rsrc_idx) {
  2429. case IRDMA_HMC_IW_QP:
  2430. obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
  2431. break;
  2432. case IRDMA_HMC_IW_CQ:
  2433. obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
  2434. break;
  2435. case IRDMA_HMC_IW_APBVT_ENTRY:
  2436. obj_info[rsrc_idx].cnt = 1;
  2437. break;
  2438. default:
  2439. obj_info[rsrc_idx].cnt = (u32)temp;
  2440. break;
  2441. }
  2442. obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512;
  2443. return temp;
  2444. }
  2445. /**
  2446. * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
  2447. * @dev: pointer to dev struct
  2448. * @buf: ptr to fpm commit buffer
  2449. * @info: ptr to irdma_hmc_obj_info struct
  2450. * @sd: number of SDs for HMC objects
  2451. *
  2452. * parses fpm commit info and copy base value
  2453. * of hmc objects in hmc_info
  2454. */
  2455. static void
  2456. irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
  2457. struct irdma_hmc_obj_info *info, u32 *sd)
  2458. {
  2459. u64 size;
  2460. u32 i;
  2461. u64 max_base = 0;
  2462. u32 last_hmc_obj = 0;
  2463. irdma_sc_decode_fpm_commit(dev, buf, 0, info,
  2464. IRDMA_HMC_IW_QP);
  2465. irdma_sc_decode_fpm_commit(dev, buf, 8, info,
  2466. IRDMA_HMC_IW_CQ);
  2467. /* skiping RSRVD */
  2468. irdma_sc_decode_fpm_commit(dev, buf, 24, info,
  2469. IRDMA_HMC_IW_HTE);
  2470. irdma_sc_decode_fpm_commit(dev, buf, 32, info,
  2471. IRDMA_HMC_IW_ARP);
  2472. irdma_sc_decode_fpm_commit(dev, buf, 40, info,
  2473. IRDMA_HMC_IW_APBVT_ENTRY);
  2474. irdma_sc_decode_fpm_commit(dev, buf, 48, info,
  2475. IRDMA_HMC_IW_MR);
  2476. irdma_sc_decode_fpm_commit(dev, buf, 56, info,
  2477. IRDMA_HMC_IW_XF);
  2478. irdma_sc_decode_fpm_commit(dev, buf, 64, info,
  2479. IRDMA_HMC_IW_XFFL);
  2480. irdma_sc_decode_fpm_commit(dev, buf, 72, info,
  2481. IRDMA_HMC_IW_Q1);
  2482. irdma_sc_decode_fpm_commit(dev, buf, 80, info,
  2483. IRDMA_HMC_IW_Q1FL);
  2484. irdma_sc_decode_fpm_commit(dev, buf, 88, info,
  2485. IRDMA_HMC_IW_TIMER);
  2486. irdma_sc_decode_fpm_commit(dev, buf, 112, info,
  2487. IRDMA_HMC_IW_PBLE);
  2488. /* skipping RSVD. */
  2489. if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
  2490. irdma_sc_decode_fpm_commit(dev, buf, 96, info,
  2491. IRDMA_HMC_IW_FSIMC);
  2492. irdma_sc_decode_fpm_commit(dev, buf, 104, info,
  2493. IRDMA_HMC_IW_FSIAV);
  2494. irdma_sc_decode_fpm_commit(dev, buf, 128, info,
  2495. IRDMA_HMC_IW_RRF);
  2496. irdma_sc_decode_fpm_commit(dev, buf, 136, info,
  2497. IRDMA_HMC_IW_RRFFL);
  2498. irdma_sc_decode_fpm_commit(dev, buf, 144, info,
  2499. IRDMA_HMC_IW_HDR);
  2500. irdma_sc_decode_fpm_commit(dev, buf, 152, info,
  2501. IRDMA_HMC_IW_MD);
  2502. irdma_sc_decode_fpm_commit(dev, buf, 160, info,
  2503. IRDMA_HMC_IW_OOISC);
  2504. irdma_sc_decode_fpm_commit(dev, buf, 168, info,
  2505. IRDMA_HMC_IW_OOISCFFL);
  2506. }
  2507. /* searching for the last object in HMC to find the size of the HMC area. */
  2508. for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
  2509. if (info[i].base > max_base) {
  2510. max_base = info[i].base;
  2511. last_hmc_obj = i;
  2512. }
  2513. }
  2514. size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
  2515. info[last_hmc_obj].base;
  2516. if (size & 0x1FFFFF)
  2517. *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
  2518. else
  2519. *sd = (u32)(size >> 21);
  2520. }
  2521. /**
  2522. * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
  2523. * @buf: ptr to fpm query buffer
  2524. * @buf_idx: index into buf
  2525. * @obj_info: ptr to irdma_hmc_obj_info struct
  2526. * @rsrc_idx: resource index into info
  2527. *
  2528. * Decode a 64 bit value from fpm query buffer into max count and size
  2529. */
  2530. static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
  2531. struct irdma_hmc_obj_info *obj_info,
  2532. u32 rsrc_idx)
  2533. {
  2534. u64 temp;
  2535. u32 size;
  2536. get_64bit_val(buf, buf_idx, &temp);
  2537. obj_info[rsrc_idx].max_cnt = (u32)temp;
  2538. size = (u32)(temp >> 32);
  2539. obj_info[rsrc_idx].size = BIT_ULL(size);
  2540. return temp;
  2541. }
  2542. /**
  2543. * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
  2544. * @dev: ptr to shared code device
  2545. * @buf: ptr to fpm query buffer
  2546. * @hmc_info: ptr to irdma_hmc_obj_info struct
  2547. * @hmc_fpm_misc: ptr to fpm data
  2548. *
  2549. * parses fpm query buffer and copy max_cnt and
  2550. * size value of hmc objects in hmc_info
  2551. */
  2552. static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
  2553. struct irdma_hmc_info *hmc_info,
  2554. struct irdma_hmc_fpm_misc *hmc_fpm_misc)
  2555. {
  2556. struct irdma_hmc_obj_info *obj_info;
  2557. u64 temp;
  2558. u32 size;
  2559. u16 max_pe_sds;
  2560. obj_info = hmc_info->hmc_obj;
  2561. get_64bit_val(buf, 0, &temp);
  2562. hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
  2563. max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
  2564. hmc_fpm_misc->max_sds = max_pe_sds;
  2565. hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
  2566. get_64bit_val(buf, 8, &temp);
  2567. obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
  2568. size = (u32)(temp >> 32);
  2569. obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size);
  2570. get_64bit_val(buf, 16, &temp);
  2571. obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
  2572. size = (u32)(temp >> 32);
  2573. obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
  2574. irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
  2575. irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
  2576. obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
  2577. obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
  2578. irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
  2579. irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
  2580. get_64bit_val(buf, 64, &temp);
  2581. obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
  2582. obj_info[IRDMA_HMC_IW_XFFL].size = 4;
  2583. hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
  2584. if (!hmc_fpm_misc->xf_block_size)
  2585. return -EINVAL;
  2586. irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
  2587. get_64bit_val(buf, 80, &temp);
  2588. obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
  2589. obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
  2590. hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
  2591. if (!hmc_fpm_misc->q1_block_size)
  2592. return -EINVAL;
  2593. irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
  2594. get_64bit_val(buf, 112, &temp);
  2595. obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
  2596. obj_info[IRDMA_HMC_IW_PBLE].size = 8;
  2597. get_64bit_val(buf, 120, &temp);
  2598. hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
  2599. hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
  2600. hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
  2601. if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  2602. return 0;
  2603. irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
  2604. irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
  2605. irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
  2606. get_64bit_val(buf, 136, &temp);
  2607. obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
  2608. obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
  2609. hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
  2610. if (!hmc_fpm_misc->rrf_block_size &&
  2611. obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
  2612. return -EINVAL;
  2613. irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
  2614. irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
  2615. irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
  2616. get_64bit_val(buf, 168, &temp);
  2617. obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
  2618. obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
  2619. hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
  2620. if (!hmc_fpm_misc->ooiscf_block_size &&
  2621. obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
  2622. return -EINVAL;
  2623. return 0;
  2624. }
  2625. /**
  2626. * irdma_sc_find_reg_cq - find cq ctx index
  2627. * @ceq: ceq sc structure
  2628. * @cq: cq sc structure
  2629. */
  2630. static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
  2631. struct irdma_sc_cq *cq)
  2632. {
  2633. u32 i;
  2634. for (i = 0; i < ceq->reg_cq_size; i++) {
  2635. if (cq == ceq->reg_cq[i])
  2636. return i;
  2637. }
  2638. return IRDMA_INVALID_CQ_IDX;
  2639. }
  2640. /**
  2641. * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
  2642. * @ceq: ceq sc structure
  2643. * @cq: cq sc structure
  2644. */
  2645. int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
  2646. {
  2647. unsigned long flags;
  2648. spin_lock_irqsave(&ceq->req_cq_lock, flags);
  2649. if (ceq->reg_cq_size == ceq->elem_cnt) {
  2650. spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
  2651. return -ENOMEM;
  2652. }
  2653. ceq->reg_cq[ceq->reg_cq_size++] = cq;
  2654. spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
  2655. return 0;
  2656. }
  2657. /**
  2658. * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
  2659. * @ceq: ceq sc structure
  2660. * @cq: cq sc structure
  2661. */
  2662. void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
  2663. {
  2664. unsigned long flags;
  2665. u32 cq_ctx_idx;
  2666. spin_lock_irqsave(&ceq->req_cq_lock, flags);
  2667. cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
  2668. if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
  2669. goto exit;
  2670. ceq->reg_cq_size--;
  2671. if (cq_ctx_idx != ceq->reg_cq_size)
  2672. ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
  2673. ceq->reg_cq[ceq->reg_cq_size] = NULL;
  2674. exit:
  2675. spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
  2676. }
  2677. /**
  2678. * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
  2679. * @cqp: IWARP control queue pair pointer
  2680. * @info: IWARP control queue pair init info pointer
  2681. *
  2682. * Initializes the object and context buffers for a control Queue Pair.
  2683. */
  2684. int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
  2685. struct irdma_cqp_init_info *info)
  2686. {
  2687. u8 hw_sq_size;
  2688. if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
  2689. info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
  2690. ((info->sq_size & (info->sq_size - 1))))
  2691. return -EINVAL;
  2692. hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
  2693. IRDMA_QUEUE_TYPE_CQP);
  2694. cqp->size = sizeof(*cqp);
  2695. cqp->sq_size = info->sq_size;
  2696. cqp->hw_sq_size = hw_sq_size;
  2697. cqp->sq_base = info->sq;
  2698. cqp->host_ctx = info->host_ctx;
  2699. cqp->sq_pa = info->sq_pa;
  2700. cqp->host_ctx_pa = info->host_ctx_pa;
  2701. cqp->dev = info->dev;
  2702. cqp->struct_ver = info->struct_ver;
  2703. cqp->hw_maj_ver = info->hw_maj_ver;
  2704. cqp->hw_min_ver = info->hw_min_ver;
  2705. cqp->scratch_array = info->scratch_array;
  2706. cqp->polarity = 0;
  2707. cqp->en_datacenter_tcp = info->en_datacenter_tcp;
  2708. cqp->ena_vf_count = info->ena_vf_count;
  2709. cqp->hmc_profile = info->hmc_profile;
  2710. cqp->ceqs_per_vf = info->ceqs_per_vf;
  2711. cqp->disable_packed = info->disable_packed;
  2712. cqp->rocev2_rto_policy = info->rocev2_rto_policy;
  2713. cqp->protocol_used = info->protocol_used;
  2714. memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
  2715. info->dev->cqp = cqp;
  2716. IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
  2717. cqp->requested_ops = 0;
  2718. atomic64_set(&cqp->completed_ops, 0);
  2719. /* for the cqp commands backlog. */
  2720. INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
  2721. writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
  2722. writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
  2723. writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
  2724. ibdev_dbg(to_ibdev(cqp->dev),
  2725. "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
  2726. cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
  2727. (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
  2728. return 0;
  2729. }
  2730. /**
  2731. * irdma_sc_cqp_create - create cqp during bringup
  2732. * @cqp: struct for cqp hw
  2733. * @maj_err: If error, major err number
  2734. * @min_err: If error, minor err number
  2735. */
  2736. int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
  2737. {
  2738. u64 temp;
  2739. u8 hw_rev;
  2740. u32 cnt = 0, p1, p2, val = 0, err_code;
  2741. int ret_code;
  2742. hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
  2743. cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
  2744. IRDMA_SD_BUF_ALIGNMENT);
  2745. cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
  2746. cqp->sdbuf.size, &cqp->sdbuf.pa,
  2747. GFP_KERNEL);
  2748. if (!cqp->sdbuf.va)
  2749. return -ENOMEM;
  2750. spin_lock_init(&cqp->dev->cqp_lock);
  2751. temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
  2752. FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
  2753. FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
  2754. FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
  2755. if (hw_rev >= IRDMA_GEN_2) {
  2756. temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
  2757. cqp->rocev2_rto_policy) |
  2758. FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
  2759. cqp->protocol_used);
  2760. }
  2761. set_64bit_val(cqp->host_ctx, 0, temp);
  2762. set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
  2763. temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
  2764. FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
  2765. set_64bit_val(cqp->host_ctx, 16, temp);
  2766. set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
  2767. temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
  2768. FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
  2769. if (hw_rev >= IRDMA_GEN_2) {
  2770. temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
  2771. FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
  2772. }
  2773. set_64bit_val(cqp->host_ctx, 32, temp);
  2774. set_64bit_val(cqp->host_ctx, 40, 0);
  2775. temp = 0;
  2776. if (hw_rev >= IRDMA_GEN_2) {
  2777. temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
  2778. FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
  2779. FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
  2780. }
  2781. set_64bit_val(cqp->host_ctx, 48, temp);
  2782. temp = 0;
  2783. if (hw_rev >= IRDMA_GEN_2) {
  2784. temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
  2785. FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
  2786. FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
  2787. FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
  2788. }
  2789. set_64bit_val(cqp->host_ctx, 56, temp);
  2790. print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16,
  2791. 8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
  2792. p1 = cqp->host_ctx_pa >> 32;
  2793. p2 = (u32)cqp->host_ctx_pa;
  2794. writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
  2795. writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
  2796. do {
  2797. if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
  2798. ret_code = -ETIMEDOUT;
  2799. goto err;
  2800. }
  2801. udelay(cqp->dev->hw_attrs.max_sleep_count);
  2802. val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
  2803. } while (!val);
  2804. if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
  2805. ret_code = -EOPNOTSUPP;
  2806. goto err;
  2807. }
  2808. cqp->process_cqp_sds = irdma_update_sds_noccq;
  2809. return 0;
  2810. err:
  2811. dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
  2812. cqp->sdbuf.va, cqp->sdbuf.pa);
  2813. cqp->sdbuf.va = NULL;
  2814. err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
  2815. *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
  2816. *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
  2817. return ret_code;
  2818. }
  2819. /**
  2820. * irdma_sc_cqp_post_sq - post of cqp's sq
  2821. * @cqp: struct for cqp hw
  2822. */
  2823. void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
  2824. {
  2825. writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
  2826. ibdev_dbg(to_ibdev(cqp->dev),
  2827. "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
  2828. cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
  2829. }
  2830. /**
  2831. * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
  2832. * and pass back index
  2833. * @cqp: CQP HW structure
  2834. * @scratch: private data for CQP WQE
  2835. * @wqe_idx: WQE index of CQP SQ
  2836. */
  2837. __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
  2838. u32 *wqe_idx)
  2839. {
  2840. __le64 *wqe = NULL;
  2841. int ret_code;
  2842. if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
  2843. ibdev_dbg(to_ibdev(cqp->dev),
  2844. "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
  2845. cqp->sq_ring.head, cqp->sq_ring.tail,
  2846. cqp->sq_ring.size);
  2847. return NULL;
  2848. }
  2849. IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
  2850. if (ret_code)
  2851. return NULL;
  2852. cqp->requested_ops++;
  2853. if (!*wqe_idx)
  2854. cqp->polarity = !cqp->polarity;
  2855. wqe = cqp->sq_base[*wqe_idx].elem;
  2856. cqp->scratch_array[*wqe_idx] = scratch;
  2857. IRDMA_CQP_INIT_WQE(wqe);
  2858. return wqe;
  2859. }
  2860. /**
  2861. * irdma_sc_cqp_destroy - destroy cqp during close
  2862. * @cqp: struct for cqp hw
  2863. */
  2864. int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
  2865. {
  2866. u32 cnt = 0, val;
  2867. int ret_code = 0;
  2868. writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
  2869. writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
  2870. do {
  2871. if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
  2872. ret_code = -ETIMEDOUT;
  2873. break;
  2874. }
  2875. udelay(cqp->dev->hw_attrs.max_sleep_count);
  2876. val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
  2877. } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
  2878. dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
  2879. cqp->sdbuf.va, cqp->sdbuf.pa);
  2880. cqp->sdbuf.va = NULL;
  2881. return ret_code;
  2882. }
  2883. /**
  2884. * irdma_sc_ccq_arm - enable intr for control cq
  2885. * @ccq: ccq sc struct
  2886. */
  2887. void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
  2888. {
  2889. u64 temp_val;
  2890. u16 sw_cq_sel;
  2891. u8 arm_next_se;
  2892. u8 arm_seq_num;
  2893. get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
  2894. sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
  2895. arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
  2896. arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
  2897. arm_seq_num++;
  2898. temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
  2899. FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
  2900. FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
  2901. FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
  2902. set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
  2903. dma_wmb(); /* make sure shadow area is updated before arming */
  2904. writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
  2905. }
  2906. /**
  2907. * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
  2908. * @ccq: ccq sc struct
  2909. * @info: completion q entry to return
  2910. */
  2911. int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
  2912. struct irdma_ccq_cqe_info *info)
  2913. {
  2914. u64 qp_ctx, temp, temp1;
  2915. __le64 *cqe;
  2916. struct irdma_sc_cqp *cqp;
  2917. u32 wqe_idx;
  2918. u32 error;
  2919. u8 polarity;
  2920. int ret_code = 0;
  2921. if (ccq->cq_uk.avoid_mem_cflct)
  2922. cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
  2923. else
  2924. cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
  2925. get_64bit_val(cqe, 24, &temp);
  2926. polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
  2927. if (polarity != ccq->cq_uk.polarity)
  2928. return -ENOENT;
  2929. /* Ensure CEQE contents are read after valid bit is checked */
  2930. dma_rmb();
  2931. get_64bit_val(cqe, 8, &qp_ctx);
  2932. cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
  2933. info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
  2934. info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
  2935. info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
  2936. if (info->error) {
  2937. info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
  2938. error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
  2939. ibdev_dbg(to_ibdev(cqp->dev),
  2940. "CQP: CQPERRCODES error_code[x%08X]\n", error);
  2941. }
  2942. wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
  2943. info->scratch = cqp->scratch_array[wqe_idx];
  2944. get_64bit_val(cqe, 16, &temp1);
  2945. info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
  2946. get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
  2947. info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
  2948. info->cqp = cqp;
  2949. /* move the head for cq */
  2950. IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
  2951. if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
  2952. ccq->cq_uk.polarity ^= 1;
  2953. /* update cq tail in cq shadow memory also */
  2954. IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
  2955. set_64bit_val(ccq->cq_uk.shadow_area, 0,
  2956. IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
  2957. dma_wmb(); /* make sure shadow area is updated before moving tail */
  2958. IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
  2959. atomic64_inc(&cqp->completed_ops);
  2960. return ret_code;
  2961. }
  2962. /**
  2963. * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
  2964. * @cqp: struct for cqp hw
  2965. * @op_code: cqp opcode for completion
  2966. * @compl_info: completion q entry to return
  2967. */
  2968. int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
  2969. struct irdma_ccq_cqe_info *compl_info)
  2970. {
  2971. struct irdma_ccq_cqe_info info = {};
  2972. struct irdma_sc_cq *ccq;
  2973. int ret_code = 0;
  2974. u32 cnt = 0;
  2975. ccq = cqp->dev->ccq;
  2976. while (1) {
  2977. if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
  2978. return -ETIMEDOUT;
  2979. if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
  2980. udelay(cqp->dev->hw_attrs.max_sleep_count);
  2981. continue;
  2982. }
  2983. if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
  2984. ret_code = -EIO;
  2985. break;
  2986. }
  2987. /* make sure op code matches*/
  2988. if (op_code == info.op_code)
  2989. break;
  2990. ibdev_dbg(to_ibdev(cqp->dev),
  2991. "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n",
  2992. op_code, info.op_code);
  2993. }
  2994. if (compl_info)
  2995. memcpy(compl_info, &info, sizeof(*compl_info));
  2996. return ret_code;
  2997. }
  2998. /**
  2999. * irdma_sc_manage_hmc_pm_func_table - manage of function table
  3000. * @cqp: struct for cqp hw
  3001. * @scratch: u64 saved to be used during cqp completion
  3002. * @info: info for the manage function table operation
  3003. * @post_sq: flag for cqp db to ring
  3004. */
  3005. static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
  3006. struct irdma_hmc_fcn_info *info,
  3007. u64 scratch, bool post_sq)
  3008. {
  3009. __le64 *wqe;
  3010. u64 hdr;
  3011. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3012. if (!wqe)
  3013. return -ENOMEM;
  3014. set_64bit_val(wqe, 0, 0);
  3015. set_64bit_val(wqe, 8, 0);
  3016. set_64bit_val(wqe, 16, 0);
  3017. set_64bit_val(wqe, 32, 0);
  3018. set_64bit_val(wqe, 40, 0);
  3019. set_64bit_val(wqe, 48, 0);
  3020. set_64bit_val(wqe, 56, 0);
  3021. hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
  3022. FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  3023. IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
  3024. FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
  3025. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3026. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3027. set_64bit_val(wqe, 24, hdr);
  3028. print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE",
  3029. DUMP_PREFIX_OFFSET, 16, 8, wqe,
  3030. IRDMA_CQP_WQE_SIZE * 8, false);
  3031. if (post_sq)
  3032. irdma_sc_cqp_post_sq(cqp);
  3033. return 0;
  3034. }
  3035. /**
  3036. * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
  3037. * for fpm commit
  3038. * @cqp: struct for cqp hw
  3039. */
  3040. static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
  3041. {
  3042. return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
  3043. NULL);
  3044. }
  3045. /**
  3046. * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
  3047. * @cqp: struct for cqp hw
  3048. * @scratch: u64 saved to be used during cqp completion
  3049. * @hmc_fn_id: hmc function id
  3050. * @commit_fpm_mem: Memory for fpm values
  3051. * @post_sq: flag for cqp db to ring
  3052. * @wait_type: poll ccq or cqp registers for cqp completion
  3053. */
  3054. static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
  3055. u8 hmc_fn_id,
  3056. struct irdma_dma_mem *commit_fpm_mem,
  3057. bool post_sq, u8 wait_type)
  3058. {
  3059. __le64 *wqe;
  3060. u64 hdr;
  3061. u32 tail, val, error;
  3062. int ret_code = 0;
  3063. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3064. if (!wqe)
  3065. return -ENOMEM;
  3066. set_64bit_val(wqe, 16, hmc_fn_id);
  3067. set_64bit_val(wqe, 32, commit_fpm_mem->pa);
  3068. hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
  3069. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
  3070. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3071. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3072. set_64bit_val(wqe, 24, hdr);
  3073. print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET,
  3074. 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  3075. irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
  3076. if (post_sq) {
  3077. irdma_sc_cqp_post_sq(cqp);
  3078. if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
  3079. ret_code = irdma_cqp_poll_registers(cqp, tail,
  3080. cqp->dev->hw_attrs.max_done_count);
  3081. else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
  3082. ret_code = irdma_sc_commit_fpm_val_done(cqp);
  3083. }
  3084. return ret_code;
  3085. }
  3086. /**
  3087. * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
  3088. * query fpm
  3089. * @cqp: struct for cqp hw
  3090. */
  3091. static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
  3092. {
  3093. return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
  3094. NULL);
  3095. }
  3096. /**
  3097. * irdma_sc_query_fpm_val - cqp wqe query fpm values
  3098. * @cqp: struct for cqp hw
  3099. * @scratch: u64 saved to be used during cqp completion
  3100. * @hmc_fn_id: hmc function id
  3101. * @query_fpm_mem: memory for return fpm values
  3102. * @post_sq: flag for cqp db to ring
  3103. * @wait_type: poll ccq or cqp registers for cqp completion
  3104. */
  3105. static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
  3106. u8 hmc_fn_id,
  3107. struct irdma_dma_mem *query_fpm_mem,
  3108. bool post_sq, u8 wait_type)
  3109. {
  3110. __le64 *wqe;
  3111. u64 hdr;
  3112. u32 tail, val, error;
  3113. int ret_code = 0;
  3114. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3115. if (!wqe)
  3116. return -ENOMEM;
  3117. set_64bit_val(wqe, 16, hmc_fn_id);
  3118. set_64bit_val(wqe, 32, query_fpm_mem->pa);
  3119. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
  3120. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3121. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3122. set_64bit_val(wqe, 24, hdr);
  3123. print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8,
  3124. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  3125. irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
  3126. if (post_sq) {
  3127. irdma_sc_cqp_post_sq(cqp);
  3128. if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
  3129. ret_code = irdma_cqp_poll_registers(cqp, tail,
  3130. cqp->dev->hw_attrs.max_done_count);
  3131. else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
  3132. ret_code = irdma_sc_query_fpm_val_done(cqp);
  3133. }
  3134. return ret_code;
  3135. }
  3136. /**
  3137. * irdma_sc_ceq_init - initialize ceq
  3138. * @ceq: ceq sc structure
  3139. * @info: ceq initialization info
  3140. */
  3141. int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
  3142. struct irdma_ceq_init_info *info)
  3143. {
  3144. u32 pble_obj_cnt;
  3145. if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
  3146. info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
  3147. return -EINVAL;
  3148. if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
  3149. return -EINVAL;
  3150. pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
  3151. if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
  3152. return -EINVAL;
  3153. ceq->size = sizeof(*ceq);
  3154. ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
  3155. ceq->ceq_id = info->ceq_id;
  3156. ceq->dev = info->dev;
  3157. ceq->elem_cnt = info->elem_cnt;
  3158. ceq->ceq_elem_pa = info->ceqe_pa;
  3159. ceq->virtual_map = info->virtual_map;
  3160. ceq->itr_no_expire = info->itr_no_expire;
  3161. ceq->reg_cq = info->reg_cq;
  3162. ceq->reg_cq_size = 0;
  3163. spin_lock_init(&ceq->req_cq_lock);
  3164. ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
  3165. ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
  3166. ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
  3167. ceq->tph_en = info->tph_en;
  3168. ceq->tph_val = info->tph_val;
  3169. ceq->vsi = info->vsi;
  3170. ceq->polarity = 1;
  3171. IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
  3172. ceq->dev->ceq[info->ceq_id] = ceq;
  3173. return 0;
  3174. }
  3175. /**
  3176. * irdma_sc_ceq_create - create ceq wqe
  3177. * @ceq: ceq sc structure
  3178. * @scratch: u64 saved to be used during cqp completion
  3179. * @post_sq: flag for cqp db to ring
  3180. */
  3181. static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
  3182. bool post_sq)
  3183. {
  3184. struct irdma_sc_cqp *cqp;
  3185. __le64 *wqe;
  3186. u64 hdr;
  3187. cqp = ceq->dev->cqp;
  3188. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3189. if (!wqe)
  3190. return -ENOMEM;
  3191. set_64bit_val(wqe, 16, ceq->elem_cnt);
  3192. set_64bit_val(wqe, 32,
  3193. (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
  3194. set_64bit_val(wqe, 48,
  3195. (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
  3196. set_64bit_val(wqe, 56,
  3197. FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
  3198. FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
  3199. hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
  3200. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
  3201. FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
  3202. FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
  3203. FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
  3204. FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
  3205. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3206. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3207. set_64bit_val(wqe, 24, hdr);
  3208. print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
  3209. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  3210. if (post_sq)
  3211. irdma_sc_cqp_post_sq(cqp);
  3212. return 0;
  3213. }
  3214. /**
  3215. * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
  3216. * @ceq: ceq sc structure
  3217. */
  3218. static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
  3219. {
  3220. struct irdma_sc_cqp *cqp;
  3221. cqp = ceq->dev->cqp;
  3222. return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
  3223. NULL);
  3224. }
  3225. /**
  3226. * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
  3227. * @ceq: ceq sc structure
  3228. */
  3229. int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
  3230. {
  3231. struct irdma_sc_cqp *cqp;
  3232. if (ceq->reg_cq)
  3233. irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
  3234. cqp = ceq->dev->cqp;
  3235. cqp->process_cqp_sds = irdma_update_sds_noccq;
  3236. return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
  3237. NULL);
  3238. }
  3239. /**
  3240. * irdma_sc_cceq_create - create cceq
  3241. * @ceq: ceq sc structure
  3242. * @scratch: u64 saved to be used during cqp completion
  3243. */
  3244. int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
  3245. {
  3246. int ret_code;
  3247. struct irdma_sc_dev *dev = ceq->dev;
  3248. dev->ccq->vsi = ceq->vsi;
  3249. if (ceq->reg_cq) {
  3250. ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
  3251. if (ret_code)
  3252. return ret_code;
  3253. }
  3254. ret_code = irdma_sc_ceq_create(ceq, scratch, true);
  3255. if (!ret_code)
  3256. return irdma_sc_cceq_create_done(ceq);
  3257. return ret_code;
  3258. }
  3259. /**
  3260. * irdma_sc_ceq_destroy - destroy ceq
  3261. * @ceq: ceq sc structure
  3262. * @scratch: u64 saved to be used during cqp completion
  3263. * @post_sq: flag for cqp db to ring
  3264. */
  3265. int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
  3266. {
  3267. struct irdma_sc_cqp *cqp;
  3268. __le64 *wqe;
  3269. u64 hdr;
  3270. cqp = ceq->dev->cqp;
  3271. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3272. if (!wqe)
  3273. return -ENOMEM;
  3274. set_64bit_val(wqe, 16, ceq->elem_cnt);
  3275. set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
  3276. hdr = ceq->ceq_id |
  3277. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
  3278. FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
  3279. FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
  3280. FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
  3281. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3282. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3283. set_64bit_val(wqe, 24, hdr);
  3284. print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
  3285. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  3286. if (post_sq)
  3287. irdma_sc_cqp_post_sq(cqp);
  3288. return 0;
  3289. }
  3290. /**
  3291. * irdma_sc_process_ceq - process ceq
  3292. * @dev: sc device struct
  3293. * @ceq: ceq sc structure
  3294. *
  3295. * It is expected caller serializes this function with cleanup_ceqes()
  3296. * because these functions manipulate the same ceq
  3297. */
  3298. void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
  3299. {
  3300. u64 temp;
  3301. __le64 *ceqe;
  3302. struct irdma_sc_cq *cq = NULL;
  3303. struct irdma_sc_cq *temp_cq;
  3304. u8 polarity;
  3305. u32 cq_idx;
  3306. unsigned long flags;
  3307. do {
  3308. cq_idx = 0;
  3309. ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
  3310. get_64bit_val(ceqe, 0, &temp);
  3311. polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
  3312. if (polarity != ceq->polarity)
  3313. return NULL;
  3314. temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
  3315. if (!temp_cq) {
  3316. cq_idx = IRDMA_INVALID_CQ_IDX;
  3317. IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
  3318. if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
  3319. ceq->polarity ^= 1;
  3320. continue;
  3321. }
  3322. cq = temp_cq;
  3323. if (ceq->reg_cq) {
  3324. spin_lock_irqsave(&ceq->req_cq_lock, flags);
  3325. cq_idx = irdma_sc_find_reg_cq(ceq, cq);
  3326. spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
  3327. }
  3328. IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
  3329. if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
  3330. ceq->polarity ^= 1;
  3331. } while (cq_idx == IRDMA_INVALID_CQ_IDX);
  3332. if (cq)
  3333. irdma_sc_cq_ack(cq);
  3334. return cq;
  3335. }
  3336. /**
  3337. * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
  3338. * @cq: cq for which the ceqes need to be cleaned up
  3339. * @ceq: ceq ptr
  3340. *
  3341. * The function is called after the cq is destroyed to cleanup
  3342. * its pending ceqe entries. It is expected caller serializes this
  3343. * function with process_ceq() in interrupt context.
  3344. */
  3345. void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
  3346. {
  3347. struct irdma_sc_cq *next_cq;
  3348. u8 ceq_polarity = ceq->polarity;
  3349. __le64 *ceqe;
  3350. u8 polarity;
  3351. u64 temp;
  3352. int next;
  3353. u32 i;
  3354. next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
  3355. for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
  3356. ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
  3357. get_64bit_val(ceqe, 0, &temp);
  3358. polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
  3359. if (polarity != ceq_polarity)
  3360. return;
  3361. next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
  3362. if (cq == next_cq)
  3363. set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
  3364. next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
  3365. if (!next)
  3366. ceq_polarity ^= 1;
  3367. }
  3368. }
  3369. /**
  3370. * irdma_sc_aeq_init - initialize aeq
  3371. * @aeq: aeq structure ptr
  3372. * @info: aeq initialization info
  3373. */
  3374. int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
  3375. struct irdma_aeq_init_info *info)
  3376. {
  3377. u32 pble_obj_cnt;
  3378. if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
  3379. info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
  3380. return -EINVAL;
  3381. pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
  3382. if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
  3383. return -EINVAL;
  3384. aeq->size = sizeof(*aeq);
  3385. aeq->polarity = 1;
  3386. aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
  3387. aeq->dev = info->dev;
  3388. aeq->elem_cnt = info->elem_cnt;
  3389. aeq->aeq_elem_pa = info->aeq_elem_pa;
  3390. IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
  3391. aeq->virtual_map = info->virtual_map;
  3392. aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
  3393. aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
  3394. aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
  3395. aeq->msix_idx = info->msix_idx;
  3396. info->dev->aeq = aeq;
  3397. return 0;
  3398. }
  3399. /**
  3400. * irdma_sc_aeq_create - create aeq
  3401. * @aeq: aeq structure ptr
  3402. * @scratch: u64 saved to be used during cqp completion
  3403. * @post_sq: flag for cqp db to ring
  3404. */
  3405. static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
  3406. bool post_sq)
  3407. {
  3408. __le64 *wqe;
  3409. struct irdma_sc_cqp *cqp;
  3410. u64 hdr;
  3411. cqp = aeq->dev->cqp;
  3412. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3413. if (!wqe)
  3414. return -ENOMEM;
  3415. set_64bit_val(wqe, 16, aeq->elem_cnt);
  3416. set_64bit_val(wqe, 32,
  3417. (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
  3418. set_64bit_val(wqe, 48,
  3419. (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
  3420. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
  3421. FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
  3422. FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
  3423. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3424. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3425. set_64bit_val(wqe, 24, hdr);
  3426. print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
  3427. wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  3428. if (post_sq)
  3429. irdma_sc_cqp_post_sq(cqp);
  3430. return 0;
  3431. }
  3432. /**
  3433. * irdma_sc_aeq_destroy - destroy aeq during close
  3434. * @aeq: aeq structure ptr
  3435. * @scratch: u64 saved to be used during cqp completion
  3436. * @post_sq: flag for cqp db to ring
  3437. */
  3438. static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
  3439. bool post_sq)
  3440. {
  3441. __le64 *wqe;
  3442. struct irdma_sc_cqp *cqp;
  3443. struct irdma_sc_dev *dev;
  3444. u64 hdr;
  3445. dev = aeq->dev;
  3446. writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
  3447. cqp = dev->cqp;
  3448. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3449. if (!wqe)
  3450. return -ENOMEM;
  3451. set_64bit_val(wqe, 16, aeq->elem_cnt);
  3452. set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
  3453. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
  3454. FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
  3455. FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
  3456. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3457. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3458. set_64bit_val(wqe, 24, hdr);
  3459. print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
  3460. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  3461. if (post_sq)
  3462. irdma_sc_cqp_post_sq(cqp);
  3463. return 0;
  3464. }
  3465. /**
  3466. * irdma_sc_get_next_aeqe - get next aeq entry
  3467. * @aeq: aeq structure ptr
  3468. * @info: aeqe info to be returned
  3469. */
  3470. int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
  3471. struct irdma_aeqe_info *info)
  3472. {
  3473. u64 temp, compl_ctx;
  3474. __le64 *aeqe;
  3475. u16 wqe_idx;
  3476. u8 ae_src;
  3477. u8 polarity;
  3478. aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
  3479. get_64bit_val(aeqe, 8, &temp);
  3480. polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
  3481. if (aeq->polarity != polarity)
  3482. return -ENOENT;
  3483. /* Ensure AEQE contents are read after valid bit is checked */
  3484. dma_rmb();
  3485. get_64bit_val(aeqe, 0, &compl_ctx);
  3486. print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
  3487. aeqe, 16, false);
  3488. ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
  3489. wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
  3490. info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
  3491. ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
  3492. info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
  3493. info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
  3494. info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
  3495. info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
  3496. info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
  3497. info->ae_src = ae_src;
  3498. switch (info->ae_id) {
  3499. case IRDMA_AE_PRIV_OPERATION_DENIED:
  3500. case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
  3501. case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
  3502. case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
  3503. case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
  3504. case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
  3505. case IRDMA_AE_UDA_XMIT_BAD_PD:
  3506. case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
  3507. case IRDMA_AE_BAD_CLOSE:
  3508. case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
  3509. case IRDMA_AE_STAG_ZERO_INVALID:
  3510. case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
  3511. case IRDMA_AE_IB_INVALID_REQUEST:
  3512. case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
  3513. case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
  3514. case IRDMA_AE_IB_REMOTE_OP_ERROR:
  3515. case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
  3516. case IRDMA_AE_DDP_UBE_INVALID_MO:
  3517. case IRDMA_AE_DDP_UBE_INVALID_QN:
  3518. case IRDMA_AE_DDP_NO_L_BIT:
  3519. case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
  3520. case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
  3521. case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
  3522. case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
  3523. case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
  3524. case IRDMA_AE_INVALID_ARP_ENTRY:
  3525. case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
  3526. case IRDMA_AE_STALE_ARP_ENTRY:
  3527. case IRDMA_AE_INVALID_AH_ENTRY:
  3528. case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
  3529. case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
  3530. case IRDMA_AE_LLP_TOO_MANY_RETRIES:
  3531. case IRDMA_AE_LLP_DOUBT_REACHABILITY:
  3532. case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
  3533. case IRDMA_AE_RESET_SENT:
  3534. case IRDMA_AE_TERMINATE_SENT:
  3535. case IRDMA_AE_RESET_NOT_SENT:
  3536. case IRDMA_AE_LCE_QP_CATASTROPHIC:
  3537. case IRDMA_AE_QP_SUSPEND_COMPLETE:
  3538. case IRDMA_AE_UDA_L4LEN_INVALID:
  3539. info->qp = true;
  3540. info->compl_ctx = compl_ctx;
  3541. break;
  3542. case IRDMA_AE_LCE_CQ_CATASTROPHIC:
  3543. info->cq = true;
  3544. info->compl_ctx = compl_ctx << 1;
  3545. ae_src = IRDMA_AE_SOURCE_RSVD;
  3546. break;
  3547. case IRDMA_AE_ROCE_EMPTY_MCG:
  3548. case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
  3549. case IRDMA_AE_ROCE_BAD_MC_QPID:
  3550. case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
  3551. fallthrough;
  3552. case IRDMA_AE_LLP_CONNECTION_RESET:
  3553. case IRDMA_AE_LLP_SYN_RECEIVED:
  3554. case IRDMA_AE_LLP_FIN_RECEIVED:
  3555. case IRDMA_AE_LLP_CLOSE_COMPLETE:
  3556. case IRDMA_AE_LLP_TERMINATE_RECEIVED:
  3557. case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
  3558. ae_src = IRDMA_AE_SOURCE_RSVD;
  3559. info->qp = true;
  3560. info->compl_ctx = compl_ctx;
  3561. break;
  3562. default:
  3563. break;
  3564. }
  3565. switch (ae_src) {
  3566. case IRDMA_AE_SOURCE_RQ:
  3567. case IRDMA_AE_SOURCE_RQ_0011:
  3568. info->qp = true;
  3569. info->rq = true;
  3570. info->wqe_idx = wqe_idx;
  3571. info->compl_ctx = compl_ctx;
  3572. break;
  3573. case IRDMA_AE_SOURCE_CQ:
  3574. case IRDMA_AE_SOURCE_CQ_0110:
  3575. case IRDMA_AE_SOURCE_CQ_1010:
  3576. case IRDMA_AE_SOURCE_CQ_1110:
  3577. info->cq = true;
  3578. info->compl_ctx = compl_ctx << 1;
  3579. break;
  3580. case IRDMA_AE_SOURCE_SQ:
  3581. case IRDMA_AE_SOURCE_SQ_0111:
  3582. info->qp = true;
  3583. info->sq = true;
  3584. info->wqe_idx = wqe_idx;
  3585. info->compl_ctx = compl_ctx;
  3586. break;
  3587. case IRDMA_AE_SOURCE_IN_RR_WR:
  3588. case IRDMA_AE_SOURCE_IN_RR_WR_1011:
  3589. info->qp = true;
  3590. info->compl_ctx = compl_ctx;
  3591. info->in_rdrsp_wr = true;
  3592. break;
  3593. case IRDMA_AE_SOURCE_OUT_RR:
  3594. case IRDMA_AE_SOURCE_OUT_RR_1111:
  3595. info->qp = true;
  3596. info->compl_ctx = compl_ctx;
  3597. info->out_rdrsp = true;
  3598. break;
  3599. case IRDMA_AE_SOURCE_RSVD:
  3600. default:
  3601. break;
  3602. }
  3603. IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
  3604. if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
  3605. aeq->polarity ^= 1;
  3606. return 0;
  3607. }
  3608. /**
  3609. * irdma_sc_repost_aeq_entries - repost completed aeq entries
  3610. * @dev: sc device struct
  3611. * @count: allocate count
  3612. */
  3613. void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
  3614. {
  3615. writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
  3616. }
  3617. /**
  3618. * irdma_sc_ccq_init - initialize control cq
  3619. * @cq: sc's cq ctruct
  3620. * @info: info for control cq initialization
  3621. */
  3622. int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
  3623. {
  3624. u32 pble_obj_cnt;
  3625. if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
  3626. info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
  3627. return -EINVAL;
  3628. if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
  3629. return -EINVAL;
  3630. pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
  3631. if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
  3632. return -EINVAL;
  3633. cq->cq_pa = info->cq_pa;
  3634. cq->cq_uk.cq_base = info->cq_base;
  3635. cq->shadow_area_pa = info->shadow_area_pa;
  3636. cq->cq_uk.shadow_area = info->shadow_area;
  3637. cq->shadow_read_threshold = info->shadow_read_threshold;
  3638. cq->dev = info->dev;
  3639. cq->ceq_id = info->ceq_id;
  3640. cq->cq_uk.cq_size = info->num_elem;
  3641. cq->cq_type = IRDMA_CQ_TYPE_CQP;
  3642. cq->ceqe_mask = info->ceqe_mask;
  3643. IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
  3644. cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
  3645. cq->ceq_id_valid = info->ceq_id_valid;
  3646. cq->tph_en = info->tph_en;
  3647. cq->tph_val = info->tph_val;
  3648. cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
  3649. cq->pbl_list = info->pbl_list;
  3650. cq->virtual_map = info->virtual_map;
  3651. cq->pbl_chunk_size = info->pbl_chunk_size;
  3652. cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
  3653. cq->cq_uk.polarity = true;
  3654. cq->vsi = info->vsi;
  3655. cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
  3656. /* Only applicable to CQs other than CCQ so initialize to zero */
  3657. cq->cq_uk.cqe_alloc_db = NULL;
  3658. info->dev->ccq = cq;
  3659. return 0;
  3660. }
  3661. /**
  3662. * irdma_sc_ccq_create_done - poll cqp for ccq create
  3663. * @ccq: ccq sc struct
  3664. */
  3665. static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
  3666. {
  3667. struct irdma_sc_cqp *cqp;
  3668. cqp = ccq->dev->cqp;
  3669. return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
  3670. }
  3671. /**
  3672. * irdma_sc_ccq_create - create control cq
  3673. * @ccq: ccq sc struct
  3674. * @scratch: u64 saved to be used during cqp completion
  3675. * @check_overflow: overlow flag for ccq
  3676. * @post_sq: flag for cqp db to ring
  3677. */
  3678. int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
  3679. bool check_overflow, bool post_sq)
  3680. {
  3681. int ret_code;
  3682. ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
  3683. if (ret_code)
  3684. return ret_code;
  3685. if (post_sq) {
  3686. ret_code = irdma_sc_ccq_create_done(ccq);
  3687. if (ret_code)
  3688. return ret_code;
  3689. }
  3690. ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
  3691. return 0;
  3692. }
  3693. /**
  3694. * irdma_sc_ccq_destroy - destroy ccq during close
  3695. * @ccq: ccq sc struct
  3696. * @scratch: u64 saved to be used during cqp completion
  3697. * @post_sq: flag for cqp db to ring
  3698. */
  3699. int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
  3700. {
  3701. struct irdma_sc_cqp *cqp;
  3702. __le64 *wqe;
  3703. u64 hdr;
  3704. int ret_code = 0;
  3705. u32 tail, val, error;
  3706. cqp = ccq->dev->cqp;
  3707. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3708. if (!wqe)
  3709. return -ENOMEM;
  3710. set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
  3711. set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
  3712. set_64bit_val(wqe, 40, ccq->shadow_area_pa);
  3713. hdr = ccq->cq_uk.cq_id |
  3714. FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
  3715. IRDMA_CQPSQ_CQ_CEQID) |
  3716. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
  3717. FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
  3718. FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
  3719. FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
  3720. FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
  3721. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3722. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3723. set_64bit_val(wqe, 24, hdr);
  3724. print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
  3725. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  3726. irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
  3727. if (post_sq) {
  3728. irdma_sc_cqp_post_sq(cqp);
  3729. ret_code = irdma_cqp_poll_registers(cqp, tail,
  3730. cqp->dev->hw_attrs.max_done_count);
  3731. }
  3732. cqp->process_cqp_sds = irdma_update_sds_noccq;
  3733. return ret_code;
  3734. }
  3735. /**
  3736. * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
  3737. * @dev : ptr to irdma_dev struct
  3738. * @hmc_fn_id: hmc function id
  3739. */
  3740. int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
  3741. {
  3742. struct irdma_hmc_info *hmc_info;
  3743. struct irdma_hmc_fpm_misc *hmc_fpm_misc;
  3744. struct irdma_dma_mem query_fpm_mem;
  3745. int ret_code = 0;
  3746. u8 wait_type;
  3747. hmc_info = dev->hmc_info;
  3748. hmc_fpm_misc = &dev->hmc_fpm_misc;
  3749. query_fpm_mem.pa = dev->fpm_query_buf_pa;
  3750. query_fpm_mem.va = dev->fpm_query_buf;
  3751. hmc_info->hmc_fn_id = hmc_fn_id;
  3752. wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
  3753. ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
  3754. &query_fpm_mem, true, wait_type);
  3755. if (ret_code)
  3756. return ret_code;
  3757. /* parse the fpm_query_buf and fill hmc obj info */
  3758. ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
  3759. hmc_fpm_misc);
  3760. print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
  3761. 8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE,
  3762. false);
  3763. return ret_code;
  3764. }
  3765. /**
  3766. * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
  3767. * command and populates fpm base address in hmc_info
  3768. * @dev : ptr to irdma_dev struct
  3769. * @hmc_fn_id: hmc function id
  3770. */
  3771. static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
  3772. {
  3773. struct irdma_hmc_info *hmc_info;
  3774. struct irdma_hmc_obj_info *obj_info;
  3775. __le64 *buf;
  3776. struct irdma_dma_mem commit_fpm_mem;
  3777. int ret_code = 0;
  3778. u8 wait_type;
  3779. hmc_info = dev->hmc_info;
  3780. obj_info = hmc_info->hmc_obj;
  3781. buf = dev->fpm_commit_buf;
  3782. set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
  3783. set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
  3784. set_64bit_val(buf, 16, (u64)0); /* RSRVD */
  3785. set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
  3786. set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
  3787. set_64bit_val(buf, 40, (u64)0); /* RSVD */
  3788. set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
  3789. set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
  3790. set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
  3791. set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
  3792. set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
  3793. set_64bit_val(buf, 88,
  3794. (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
  3795. set_64bit_val(buf, 96,
  3796. (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
  3797. set_64bit_val(buf, 104,
  3798. (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
  3799. set_64bit_val(buf, 112,
  3800. (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
  3801. set_64bit_val(buf, 120, (u64)0); /* RSVD */
  3802. set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
  3803. set_64bit_val(buf, 136,
  3804. (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
  3805. set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
  3806. set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
  3807. set_64bit_val(buf, 160,
  3808. (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
  3809. set_64bit_val(buf, 168,
  3810. (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
  3811. commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
  3812. commit_fpm_mem.va = dev->fpm_commit_buf;
  3813. wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
  3814. print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
  3815. 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
  3816. false);
  3817. ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
  3818. &commit_fpm_mem, true, wait_type);
  3819. if (!ret_code)
  3820. irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
  3821. hmc_info->hmc_obj,
  3822. &hmc_info->sd_table.sd_cnt);
  3823. print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
  3824. 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
  3825. false);
  3826. return ret_code;
  3827. }
  3828. /**
  3829. * cqp_sds_wqe_fill - fill cqp wqe doe sd
  3830. * @cqp: struct for cqp hw
  3831. * @info: sd info for wqe
  3832. * @scratch: u64 saved to be used during cqp completion
  3833. */
  3834. static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
  3835. struct irdma_update_sds_info *info, u64 scratch)
  3836. {
  3837. u64 data;
  3838. u64 hdr;
  3839. __le64 *wqe;
  3840. int mem_entries, wqe_entries;
  3841. struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
  3842. u64 offset = 0;
  3843. u32 wqe_idx;
  3844. wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
  3845. if (!wqe)
  3846. return -ENOMEM;
  3847. wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
  3848. mem_entries = info->cnt - wqe_entries;
  3849. if (mem_entries) {
  3850. offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
  3851. memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
  3852. data = (u64)sdbuf->pa + offset;
  3853. } else {
  3854. data = 0;
  3855. }
  3856. data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
  3857. set_64bit_val(wqe, 16, data);
  3858. switch (wqe_entries) {
  3859. case 3:
  3860. set_64bit_val(wqe, 48,
  3861. (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
  3862. FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
  3863. set_64bit_val(wqe, 56, info->entry[2].data);
  3864. fallthrough;
  3865. case 2:
  3866. set_64bit_val(wqe, 32,
  3867. (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
  3868. FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
  3869. set_64bit_val(wqe, 40, info->entry[1].data);
  3870. fallthrough;
  3871. case 1:
  3872. set_64bit_val(wqe, 0,
  3873. FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
  3874. set_64bit_val(wqe, 8, info->entry[0].data);
  3875. break;
  3876. default:
  3877. break;
  3878. }
  3879. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
  3880. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
  3881. FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
  3882. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3883. set_64bit_val(wqe, 24, hdr);
  3884. if (mem_entries)
  3885. print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer",
  3886. DUMP_PREFIX_OFFSET, 16, 8,
  3887. (char *)sdbuf->va + offset,
  3888. mem_entries << 4, false);
  3889. print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16,
  3890. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  3891. return 0;
  3892. }
  3893. /**
  3894. * irdma_update_pe_sds - cqp wqe for sd
  3895. * @dev: ptr to irdma_dev struct
  3896. * @info: sd info for sd's
  3897. * @scratch: u64 saved to be used during cqp completion
  3898. */
  3899. static int irdma_update_pe_sds(struct irdma_sc_dev *dev,
  3900. struct irdma_update_sds_info *info, u64 scratch)
  3901. {
  3902. struct irdma_sc_cqp *cqp = dev->cqp;
  3903. int ret_code;
  3904. ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
  3905. if (!ret_code)
  3906. irdma_sc_cqp_post_sq(cqp);
  3907. return ret_code;
  3908. }
  3909. /**
  3910. * irdma_update_sds_noccq - update sd before ccq created
  3911. * @dev: sc device struct
  3912. * @info: sd info for sd's
  3913. */
  3914. int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
  3915. struct irdma_update_sds_info *info)
  3916. {
  3917. u32 error, val, tail;
  3918. struct irdma_sc_cqp *cqp = dev->cqp;
  3919. int ret_code;
  3920. ret_code = cqp_sds_wqe_fill(cqp, info, 0);
  3921. if (ret_code)
  3922. return ret_code;
  3923. irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
  3924. irdma_sc_cqp_post_sq(cqp);
  3925. return irdma_cqp_poll_registers(cqp, tail,
  3926. cqp->dev->hw_attrs.max_done_count);
  3927. }
  3928. /**
  3929. * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
  3930. * @cqp: struct for cqp hw
  3931. * @scratch: u64 saved to be used during cqp completion
  3932. * @hmc_fn_id: hmc function id
  3933. * @post_sq: flag for cqp db to ring
  3934. * @poll_registers: flag to poll register for cqp completion
  3935. */
  3936. int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
  3937. u8 hmc_fn_id, bool post_sq,
  3938. bool poll_registers)
  3939. {
  3940. u64 hdr;
  3941. __le64 *wqe;
  3942. u32 tail, val, error;
  3943. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  3944. if (!wqe)
  3945. return -ENOMEM;
  3946. set_64bit_val(wqe, 16,
  3947. FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
  3948. hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
  3949. IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
  3950. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  3951. dma_wmb(); /* make sure WQE is written before valid bit is set */
  3952. set_64bit_val(wqe, 24, hdr);
  3953. print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE",
  3954. DUMP_PREFIX_OFFSET, 16, 8, wqe,
  3955. IRDMA_CQP_WQE_SIZE * 8, false);
  3956. irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
  3957. if (post_sq) {
  3958. irdma_sc_cqp_post_sq(cqp);
  3959. if (poll_registers)
  3960. /* check for cqp sq tail update */
  3961. return irdma_cqp_poll_registers(cqp, tail,
  3962. cqp->dev->hw_attrs.max_done_count);
  3963. else
  3964. return irdma_sc_poll_for_cqp_op_done(cqp,
  3965. IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
  3966. NULL);
  3967. }
  3968. return 0;
  3969. }
  3970. /**
  3971. * irdma_cqp_ring_full - check if cqp ring is full
  3972. * @cqp: struct for cqp hw
  3973. */
  3974. static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
  3975. {
  3976. return IRDMA_RING_FULL_ERR(cqp->sq_ring);
  3977. }
  3978. /**
  3979. * irdma_est_sd - returns approximate number of SDs for HMC
  3980. * @dev: sc device struct
  3981. * @hmc_info: hmc structure, size and count for HMC objects
  3982. */
  3983. static u32 irdma_est_sd(struct irdma_sc_dev *dev,
  3984. struct irdma_hmc_info *hmc_info)
  3985. {
  3986. int i;
  3987. u64 size = 0;
  3988. u64 sd;
  3989. for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
  3990. if (i != IRDMA_HMC_IW_PBLE)
  3991. size += round_up(hmc_info->hmc_obj[i].cnt *
  3992. hmc_info->hmc_obj[i].size, 512);
  3993. size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
  3994. hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
  3995. if (size & 0x1FFFFF)
  3996. sd = (size >> 21) + 1; /* add 1 for remainder */
  3997. else
  3998. sd = size >> 21;
  3999. if (sd > 0xFFFFFFFF) {
  4000. ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
  4001. sd = 0xFFFFFFFF - 1;
  4002. }
  4003. return (u32)sd;
  4004. }
  4005. /**
  4006. * irdma_sc_query_rdma_features_done - poll cqp for query features done
  4007. * @cqp: struct for cqp hw
  4008. */
  4009. static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
  4010. {
  4011. return irdma_sc_poll_for_cqp_op_done(cqp,
  4012. IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
  4013. NULL);
  4014. }
  4015. /**
  4016. * irdma_sc_query_rdma_features - query RDMA features and FW ver
  4017. * @cqp: struct for cqp hw
  4018. * @buf: buffer to hold query info
  4019. * @scratch: u64 saved to be used during cqp completion
  4020. */
  4021. static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
  4022. struct irdma_dma_mem *buf, u64 scratch)
  4023. {
  4024. __le64 *wqe;
  4025. u64 temp;
  4026. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
  4027. if (!wqe)
  4028. return -ENOMEM;
  4029. temp = buf->pa;
  4030. set_64bit_val(wqe, 32, temp);
  4031. temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
  4032. cqp->polarity) |
  4033. FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
  4034. FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
  4035. dma_wmb(); /* make sure WQE is written before valid bit is set */
  4036. set_64bit_val(wqe, 24, temp);
  4037. print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
  4038. 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  4039. irdma_sc_cqp_post_sq(cqp);
  4040. return 0;
  4041. }
  4042. /**
  4043. * irdma_get_rdma_features - get RDMA features
  4044. * @dev: sc device struct
  4045. */
  4046. int irdma_get_rdma_features(struct irdma_sc_dev *dev)
  4047. {
  4048. int ret_code;
  4049. struct irdma_dma_mem feat_buf;
  4050. u64 temp;
  4051. u16 byte_idx, feat_type, feat_cnt, feat_idx;
  4052. feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE,
  4053. IRDMA_FEATURE_BUF_ALIGNMENT);
  4054. feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
  4055. &feat_buf.pa, GFP_KERNEL);
  4056. if (!feat_buf.va)
  4057. return -ENOMEM;
  4058. ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
  4059. if (!ret_code)
  4060. ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
  4061. if (ret_code)
  4062. goto exit;
  4063. get_64bit_val(feat_buf.va, 0, &temp);
  4064. feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
  4065. if (feat_cnt < 2) {
  4066. ret_code = -EINVAL;
  4067. goto exit;
  4068. } else if (feat_cnt > IRDMA_MAX_FEATURES) {
  4069. ibdev_dbg(to_ibdev(dev),
  4070. "DEV: feature buf size insufficient, retrying with larger buffer\n");
  4071. dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
  4072. feat_buf.pa);
  4073. feat_buf.va = NULL;
  4074. feat_buf.size = ALIGN(8 * feat_cnt,
  4075. IRDMA_FEATURE_BUF_ALIGNMENT);
  4076. feat_buf.va = dma_alloc_coherent(dev->hw->device,
  4077. feat_buf.size, &feat_buf.pa,
  4078. GFP_KERNEL);
  4079. if (!feat_buf.va)
  4080. return -ENOMEM;
  4081. ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
  4082. if (!ret_code)
  4083. ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
  4084. if (ret_code)
  4085. goto exit;
  4086. get_64bit_val(feat_buf.va, 0, &temp);
  4087. feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
  4088. if (feat_cnt < 2) {
  4089. ret_code = -EINVAL;
  4090. goto exit;
  4091. }
  4092. }
  4093. print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
  4094. 16, 8, feat_buf.va, feat_cnt * 8, false);
  4095. for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
  4096. feat_idx++, byte_idx += 8) {
  4097. get_64bit_val(feat_buf.va, byte_idx, &temp);
  4098. feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
  4099. if (feat_type >= IRDMA_MAX_FEATURES) {
  4100. ibdev_dbg(to_ibdev(dev),
  4101. "DEV: found unrecognized feature type %d\n",
  4102. feat_type);
  4103. continue;
  4104. }
  4105. dev->feature_info[feat_type] = temp;
  4106. }
  4107. exit:
  4108. dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
  4109. feat_buf.pa);
  4110. feat_buf.va = NULL;
  4111. return ret_code;
  4112. }
  4113. static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
  4114. struct irdma_hmc_info *hmc_info, u32 qpwanted)
  4115. {
  4116. u32 q1_cnt;
  4117. if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
  4118. q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
  4119. } else {
  4120. if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
  4121. q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
  4122. else
  4123. q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
  4124. }
  4125. return q1_cnt;
  4126. }
  4127. static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
  4128. struct irdma_hmc_info *hmc_info, u32 qpwanted)
  4129. {
  4130. hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
  4131. }
  4132. static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
  4133. struct irdma_hmc_info *hmc_info, u32 qpwanted)
  4134. {
  4135. struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
  4136. hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
  4137. 4 * hmc_fpm_misc->xf_block_size * qpwanted;
  4138. hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
  4139. if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
  4140. hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
  4141. if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
  4142. hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
  4143. hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
  4144. hmc_fpm_misc->rrf_block_size;
  4145. if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
  4146. hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
  4147. if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
  4148. hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
  4149. hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
  4150. hmc_fpm_misc->ooiscf_block_size;
  4151. }
  4152. /**
  4153. * irdma_cfg_fpm_val - configure HMC objects
  4154. * @dev: sc device struct
  4155. * @qp_count: desired qp count
  4156. */
  4157. int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
  4158. {
  4159. struct irdma_virt_mem virt_mem;
  4160. u32 i, mem_size;
  4161. u32 qpwanted, mrwanted, pblewanted;
  4162. u32 powerof2, hte;
  4163. u32 sd_needed;
  4164. u32 sd_diff;
  4165. u32 loop_count = 0;
  4166. struct irdma_hmc_info *hmc_info;
  4167. struct irdma_hmc_fpm_misc *hmc_fpm_misc;
  4168. int ret_code = 0;
  4169. hmc_info = dev->hmc_info;
  4170. hmc_fpm_misc = &dev->hmc_fpm_misc;
  4171. ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
  4172. if (ret_code) {
  4173. ibdev_dbg(to_ibdev(dev),
  4174. "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n",
  4175. ret_code);
  4176. return ret_code;
  4177. }
  4178. for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
  4179. hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
  4180. sd_needed = irdma_est_sd(dev, hmc_info);
  4181. ibdev_dbg(to_ibdev(dev),
  4182. "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
  4183. sd_needed, hmc_info->first_sd_index);
  4184. ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
  4185. hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
  4186. qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
  4187. powerof2 = 1;
  4188. while (powerof2 <= qpwanted)
  4189. powerof2 *= 2;
  4190. powerof2 /= 2;
  4191. qpwanted = powerof2;
  4192. mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
  4193. pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
  4194. ibdev_dbg(to_ibdev(dev),
  4195. "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
  4196. qp_count, hmc_fpm_misc->max_sds,
  4197. hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
  4198. hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
  4199. hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
  4200. hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
  4201. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
  4202. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
  4203. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
  4204. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
  4205. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
  4206. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
  4207. hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
  4208. hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
  4209. hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
  4210. while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
  4211. qpwanted /= 2;
  4212. do {
  4213. ++loop_count;
  4214. hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
  4215. hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
  4216. min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
  4217. hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
  4218. hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
  4219. hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
  4220. powerof2 = 1;
  4221. while (powerof2 < hte)
  4222. powerof2 *= 2;
  4223. hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
  4224. powerof2 * hmc_fpm_misc->ht_multiplier;
  4225. if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  4226. cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
  4227. else
  4228. cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
  4229. hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
  4230. hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
  4231. hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
  4232. hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
  4233. hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
  4234. hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
  4235. (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
  4236. hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
  4237. sd_needed = irdma_est_sd(dev, hmc_info);
  4238. ibdev_dbg(to_ibdev(dev),
  4239. "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
  4240. sd_needed, hmc_fpm_misc->max_sds, mrwanted,
  4241. pblewanted, qpwanted);
  4242. /* Do not reduce resources further. All objects fit with max SDs */
  4243. if (sd_needed <= hmc_fpm_misc->max_sds)
  4244. break;
  4245. sd_diff = sd_needed - hmc_fpm_misc->max_sds;
  4246. if (sd_diff > 128) {
  4247. if (!(loop_count % 2) && qpwanted > 128) {
  4248. qpwanted /= 2;
  4249. } else {
  4250. mrwanted /= 2;
  4251. pblewanted /= 2;
  4252. }
  4253. continue;
  4254. }
  4255. if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
  4256. pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
  4257. pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
  4258. continue;
  4259. } else if (pblewanted > (100 * FPM_MULTIPLIER)) {
  4260. pblewanted -= 10 * FPM_MULTIPLIER;
  4261. } else if (pblewanted > FPM_MULTIPLIER) {
  4262. pblewanted -= FPM_MULTIPLIER;
  4263. } else if (qpwanted <= 128) {
  4264. if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
  4265. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
  4266. if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
  4267. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
  4268. }
  4269. if (mrwanted > FPM_MULTIPLIER)
  4270. mrwanted -= FPM_MULTIPLIER;
  4271. if (!(loop_count % 10) && qpwanted > 128) {
  4272. qpwanted /= 2;
  4273. if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
  4274. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
  4275. }
  4276. } while (loop_count < 2000);
  4277. if (sd_needed > hmc_fpm_misc->max_sds) {
  4278. ibdev_dbg(to_ibdev(dev),
  4279. "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
  4280. loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
  4281. return -EINVAL;
  4282. }
  4283. if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
  4284. pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
  4285. FPM_MULTIPLIER;
  4286. hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
  4287. sd_needed = irdma_est_sd(dev, hmc_info);
  4288. }
  4289. ibdev_dbg(to_ibdev(dev),
  4290. "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
  4291. loop_count, sd_needed,
  4292. hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
  4293. hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
  4294. hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
  4295. hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
  4296. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
  4297. hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
  4298. hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
  4299. ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
  4300. if (ret_code) {
  4301. ibdev_dbg(to_ibdev(dev),
  4302. "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
  4303. readl(dev->hw_regs[IRDMA_CQPERRCODES]));
  4304. return ret_code;
  4305. }
  4306. mem_size = sizeof(struct irdma_hmc_sd_entry) *
  4307. (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
  4308. virt_mem.size = mem_size;
  4309. virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
  4310. if (!virt_mem.va) {
  4311. ibdev_dbg(to_ibdev(dev),
  4312. "HMC: failed to allocate memory for sd_entry buffer\n");
  4313. return -ENOMEM;
  4314. }
  4315. hmc_info->sd_table.sd_entry = virt_mem.va;
  4316. return ret_code;
  4317. }
  4318. /**
  4319. * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
  4320. * @dev: rdma device
  4321. * @pcmdinfo: cqp command info
  4322. */
  4323. static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
  4324. struct cqp_cmds_info *pcmdinfo)
  4325. {
  4326. int status;
  4327. struct irdma_dma_mem val_mem;
  4328. bool alloc = false;
  4329. dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
  4330. switch (pcmdinfo->cqp_cmd) {
  4331. case IRDMA_OP_CEQ_DESTROY:
  4332. status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
  4333. pcmdinfo->in.u.ceq_destroy.scratch,
  4334. pcmdinfo->post_sq);
  4335. break;
  4336. case IRDMA_OP_AEQ_DESTROY:
  4337. status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
  4338. pcmdinfo->in.u.aeq_destroy.scratch,
  4339. pcmdinfo->post_sq);
  4340. break;
  4341. case IRDMA_OP_CEQ_CREATE:
  4342. status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
  4343. pcmdinfo->in.u.ceq_create.scratch,
  4344. pcmdinfo->post_sq);
  4345. break;
  4346. case IRDMA_OP_AEQ_CREATE:
  4347. status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
  4348. pcmdinfo->in.u.aeq_create.scratch,
  4349. pcmdinfo->post_sq);
  4350. break;
  4351. case IRDMA_OP_QP_UPLOAD_CONTEXT:
  4352. status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
  4353. &pcmdinfo->in.u.qp_upload_context.info,
  4354. pcmdinfo->in.u.qp_upload_context.scratch,
  4355. pcmdinfo->post_sq);
  4356. break;
  4357. case IRDMA_OP_CQ_CREATE:
  4358. status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
  4359. pcmdinfo->in.u.cq_create.scratch,
  4360. pcmdinfo->in.u.cq_create.check_overflow,
  4361. pcmdinfo->post_sq);
  4362. break;
  4363. case IRDMA_OP_CQ_MODIFY:
  4364. status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
  4365. &pcmdinfo->in.u.cq_modify.info,
  4366. pcmdinfo->in.u.cq_modify.scratch,
  4367. pcmdinfo->post_sq);
  4368. break;
  4369. case IRDMA_OP_CQ_DESTROY:
  4370. status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
  4371. pcmdinfo->in.u.cq_destroy.scratch,
  4372. pcmdinfo->post_sq);
  4373. break;
  4374. case IRDMA_OP_QP_FLUSH_WQES:
  4375. status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
  4376. &pcmdinfo->in.u.qp_flush_wqes.info,
  4377. pcmdinfo->in.u.qp_flush_wqes.scratch,
  4378. pcmdinfo->post_sq);
  4379. break;
  4380. case IRDMA_OP_GEN_AE:
  4381. status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
  4382. &pcmdinfo->in.u.gen_ae.info,
  4383. pcmdinfo->in.u.gen_ae.scratch,
  4384. pcmdinfo->post_sq);
  4385. break;
  4386. case IRDMA_OP_MANAGE_PUSH_PAGE:
  4387. status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
  4388. &pcmdinfo->in.u.manage_push_page.info,
  4389. pcmdinfo->in.u.manage_push_page.scratch,
  4390. pcmdinfo->post_sq);
  4391. break;
  4392. case IRDMA_OP_UPDATE_PE_SDS:
  4393. status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
  4394. &pcmdinfo->in.u.update_pe_sds.info,
  4395. pcmdinfo->in.u.update_pe_sds.scratch);
  4396. break;
  4397. case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
  4398. /* switch to calling through the call table */
  4399. status =
  4400. irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
  4401. &pcmdinfo->in.u.manage_hmc_pm.info,
  4402. pcmdinfo->in.u.manage_hmc_pm.scratch,
  4403. true);
  4404. break;
  4405. case IRDMA_OP_SUSPEND:
  4406. status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
  4407. pcmdinfo->in.u.suspend_resume.qp,
  4408. pcmdinfo->in.u.suspend_resume.scratch);
  4409. break;
  4410. case IRDMA_OP_RESUME:
  4411. status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
  4412. pcmdinfo->in.u.suspend_resume.qp,
  4413. pcmdinfo->in.u.suspend_resume.scratch);
  4414. break;
  4415. case IRDMA_OP_QUERY_FPM_VAL:
  4416. val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
  4417. val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
  4418. status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
  4419. pcmdinfo->in.u.query_fpm_val.scratch,
  4420. pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
  4421. &val_mem, true, IRDMA_CQP_WAIT_EVENT);
  4422. break;
  4423. case IRDMA_OP_COMMIT_FPM_VAL:
  4424. val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
  4425. val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
  4426. status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
  4427. pcmdinfo->in.u.commit_fpm_val.scratch,
  4428. pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
  4429. &val_mem,
  4430. true,
  4431. IRDMA_CQP_WAIT_EVENT);
  4432. break;
  4433. case IRDMA_OP_STATS_ALLOCATE:
  4434. alloc = true;
  4435. fallthrough;
  4436. case IRDMA_OP_STATS_FREE:
  4437. status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
  4438. &pcmdinfo->in.u.stats_manage.info,
  4439. alloc,
  4440. pcmdinfo->in.u.stats_manage.scratch);
  4441. break;
  4442. case IRDMA_OP_STATS_GATHER:
  4443. status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
  4444. &pcmdinfo->in.u.stats_gather.info,
  4445. pcmdinfo->in.u.stats_gather.scratch);
  4446. break;
  4447. case IRDMA_OP_WS_MODIFY_NODE:
  4448. status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
  4449. &pcmdinfo->in.u.ws_node.info,
  4450. IRDMA_MODIFY_NODE,
  4451. pcmdinfo->in.u.ws_node.scratch);
  4452. break;
  4453. case IRDMA_OP_WS_DELETE_NODE:
  4454. status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
  4455. &pcmdinfo->in.u.ws_node.info,
  4456. IRDMA_DEL_NODE,
  4457. pcmdinfo->in.u.ws_node.scratch);
  4458. break;
  4459. case IRDMA_OP_WS_ADD_NODE:
  4460. status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
  4461. &pcmdinfo->in.u.ws_node.info,
  4462. IRDMA_ADD_NODE,
  4463. pcmdinfo->in.u.ws_node.scratch);
  4464. break;
  4465. case IRDMA_OP_SET_UP_MAP:
  4466. status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
  4467. &pcmdinfo->in.u.up_map.info,
  4468. pcmdinfo->in.u.up_map.scratch);
  4469. break;
  4470. case IRDMA_OP_QUERY_RDMA_FEATURES:
  4471. status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
  4472. &pcmdinfo->in.u.query_rdma.query_buff_mem,
  4473. pcmdinfo->in.u.query_rdma.scratch);
  4474. break;
  4475. case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
  4476. status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
  4477. pcmdinfo->in.u.del_arp_cache_entry.scratch,
  4478. pcmdinfo->in.u.del_arp_cache_entry.arp_index,
  4479. pcmdinfo->post_sq);
  4480. break;
  4481. case IRDMA_OP_MANAGE_APBVT_ENTRY:
  4482. status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
  4483. &pcmdinfo->in.u.manage_apbvt_entry.info,
  4484. pcmdinfo->in.u.manage_apbvt_entry.scratch,
  4485. pcmdinfo->post_sq);
  4486. break;
  4487. case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
  4488. status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
  4489. &pcmdinfo->in.u.manage_qhash_table_entry.info,
  4490. pcmdinfo->in.u.manage_qhash_table_entry.scratch,
  4491. pcmdinfo->post_sq);
  4492. break;
  4493. case IRDMA_OP_QP_MODIFY:
  4494. status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
  4495. &pcmdinfo->in.u.qp_modify.info,
  4496. pcmdinfo->in.u.qp_modify.scratch,
  4497. pcmdinfo->post_sq);
  4498. break;
  4499. case IRDMA_OP_QP_CREATE:
  4500. status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
  4501. &pcmdinfo->in.u.qp_create.info,
  4502. pcmdinfo->in.u.qp_create.scratch,
  4503. pcmdinfo->post_sq);
  4504. break;
  4505. case IRDMA_OP_QP_DESTROY:
  4506. status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
  4507. pcmdinfo->in.u.qp_destroy.scratch,
  4508. pcmdinfo->in.u.qp_destroy.remove_hash_idx,
  4509. pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
  4510. pcmdinfo->post_sq);
  4511. break;
  4512. case IRDMA_OP_ALLOC_STAG:
  4513. status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
  4514. &pcmdinfo->in.u.alloc_stag.info,
  4515. pcmdinfo->in.u.alloc_stag.scratch,
  4516. pcmdinfo->post_sq);
  4517. break;
  4518. case IRDMA_OP_MR_REG_NON_SHARED:
  4519. status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
  4520. &pcmdinfo->in.u.mr_reg_non_shared.info,
  4521. pcmdinfo->in.u.mr_reg_non_shared.scratch,
  4522. pcmdinfo->post_sq);
  4523. break;
  4524. case IRDMA_OP_DEALLOC_STAG:
  4525. status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
  4526. &pcmdinfo->in.u.dealloc_stag.info,
  4527. pcmdinfo->in.u.dealloc_stag.scratch,
  4528. pcmdinfo->post_sq);
  4529. break;
  4530. case IRDMA_OP_MW_ALLOC:
  4531. status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
  4532. &pcmdinfo->in.u.mw_alloc.info,
  4533. pcmdinfo->in.u.mw_alloc.scratch,
  4534. pcmdinfo->post_sq);
  4535. break;
  4536. case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
  4537. status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
  4538. &pcmdinfo->in.u.add_arp_cache_entry.info,
  4539. pcmdinfo->in.u.add_arp_cache_entry.scratch,
  4540. pcmdinfo->post_sq);
  4541. break;
  4542. case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
  4543. status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
  4544. pcmdinfo->in.u.alloc_local_mac_entry.scratch,
  4545. pcmdinfo->post_sq);
  4546. break;
  4547. case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
  4548. status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
  4549. &pcmdinfo->in.u.add_local_mac_entry.info,
  4550. pcmdinfo->in.u.add_local_mac_entry.scratch,
  4551. pcmdinfo->post_sq);
  4552. break;
  4553. case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
  4554. status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
  4555. pcmdinfo->in.u.del_local_mac_entry.scratch,
  4556. pcmdinfo->in.u.del_local_mac_entry.entry_idx,
  4557. pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
  4558. pcmdinfo->post_sq);
  4559. break;
  4560. case IRDMA_OP_AH_CREATE:
  4561. status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
  4562. &pcmdinfo->in.u.ah_create.info,
  4563. pcmdinfo->in.u.ah_create.scratch);
  4564. break;
  4565. case IRDMA_OP_AH_DESTROY:
  4566. status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
  4567. &pcmdinfo->in.u.ah_destroy.info,
  4568. pcmdinfo->in.u.ah_destroy.scratch);
  4569. break;
  4570. case IRDMA_OP_MC_CREATE:
  4571. status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
  4572. &pcmdinfo->in.u.mc_create.info,
  4573. pcmdinfo->in.u.mc_create.scratch);
  4574. break;
  4575. case IRDMA_OP_MC_DESTROY:
  4576. status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
  4577. &pcmdinfo->in.u.mc_destroy.info,
  4578. pcmdinfo->in.u.mc_destroy.scratch);
  4579. break;
  4580. case IRDMA_OP_MC_MODIFY:
  4581. status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
  4582. &pcmdinfo->in.u.mc_modify.info,
  4583. pcmdinfo->in.u.mc_modify.scratch);
  4584. break;
  4585. default:
  4586. status = -EOPNOTSUPP;
  4587. break;
  4588. }
  4589. return status;
  4590. }
  4591. /**
  4592. * irdma_process_cqp_cmd - process all cqp commands
  4593. * @dev: sc device struct
  4594. * @pcmdinfo: cqp command info
  4595. */
  4596. int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
  4597. struct cqp_cmds_info *pcmdinfo)
  4598. {
  4599. int status = 0;
  4600. unsigned long flags;
  4601. spin_lock_irqsave(&dev->cqp_lock, flags);
  4602. if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
  4603. status = irdma_exec_cqp_cmd(dev, pcmdinfo);
  4604. else
  4605. list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
  4606. spin_unlock_irqrestore(&dev->cqp_lock, flags);
  4607. return status;
  4608. }
  4609. /**
  4610. * irdma_process_bh - called from tasklet for cqp list
  4611. * @dev: sc device struct
  4612. */
  4613. int irdma_process_bh(struct irdma_sc_dev *dev)
  4614. {
  4615. int status = 0;
  4616. struct cqp_cmds_info *pcmdinfo;
  4617. unsigned long flags;
  4618. spin_lock_irqsave(&dev->cqp_lock, flags);
  4619. while (!list_empty(&dev->cqp_cmd_head) &&
  4620. !irdma_cqp_ring_full(dev->cqp)) {
  4621. pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
  4622. status = irdma_exec_cqp_cmd(dev, pcmdinfo);
  4623. if (status)
  4624. break;
  4625. }
  4626. spin_unlock_irqrestore(&dev->cqp_lock, flags);
  4627. return status;
  4628. }
  4629. /**
  4630. * irdma_cfg_aeq- Configure AEQ interrupt
  4631. * @dev: pointer to the device structure
  4632. * @idx: vector index
  4633. * @enable: True to enable, False disables
  4634. */
  4635. void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
  4636. {
  4637. u32 reg_val;
  4638. reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
  4639. FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
  4640. FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3);
  4641. writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
  4642. }
  4643. /**
  4644. * sc_vsi_update_stats - Update statistics
  4645. * @vsi: sc_vsi instance to update
  4646. */
  4647. void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
  4648. {
  4649. struct irdma_gather_stats *gather_stats;
  4650. struct irdma_gather_stats *last_gather_stats;
  4651. gather_stats = vsi->pestat->gather_info.gather_stats_va;
  4652. last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
  4653. irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
  4654. last_gather_stats);
  4655. }
  4656. /**
  4657. * irdma_wait_pe_ready - Check if firmware is ready
  4658. * @dev: provides access to registers
  4659. */
  4660. static int irdma_wait_pe_ready(struct irdma_sc_dev *dev)
  4661. {
  4662. u32 statuscpu0;
  4663. u32 statuscpu1;
  4664. u32 statuscpu2;
  4665. u32 retrycount = 0;
  4666. do {
  4667. statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
  4668. statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
  4669. statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
  4670. if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
  4671. statuscpu2 == 0x80)
  4672. return 0;
  4673. mdelay(1000);
  4674. } while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
  4675. return -1;
  4676. }
  4677. static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
  4678. {
  4679. switch (dev->hw_attrs.uk_attrs.hw_rev) {
  4680. case IRDMA_GEN_1:
  4681. i40iw_init_hw(dev);
  4682. break;
  4683. case IRDMA_GEN_2:
  4684. icrdma_init_hw(dev);
  4685. break;
  4686. }
  4687. }
  4688. /**
  4689. * irdma_sc_dev_init - Initialize control part of device
  4690. * @ver: version
  4691. * @dev: Device pointer
  4692. * @info: Device init info
  4693. */
  4694. int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
  4695. struct irdma_device_init_info *info)
  4696. {
  4697. u32 val;
  4698. int ret_code = 0;
  4699. u8 db_size;
  4700. INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
  4701. mutex_init(&dev->ws_mutex);
  4702. dev->hmc_fn_id = info->hmc_fn_id;
  4703. dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
  4704. dev->fpm_query_buf = info->fpm_query_buf;
  4705. dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
  4706. dev->fpm_commit_buf = info->fpm_commit_buf;
  4707. dev->hw = info->hw;
  4708. dev->hw->hw_addr = info->bar0;
  4709. /* Setup the hardware limits, hmc may limit further */
  4710. dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
  4711. dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
  4712. dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
  4713. dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
  4714. dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
  4715. dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
  4716. dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
  4717. dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT;
  4718. dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD;
  4719. dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
  4720. dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
  4721. dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
  4722. dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
  4723. dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
  4724. dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
  4725. dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
  4726. dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
  4727. dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
  4728. dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
  4729. dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
  4730. dev->hw_attrs.max_pe_ready_count = 14;
  4731. dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
  4732. dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
  4733. dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
  4734. dev->hw_attrs.uk_attrs.hw_rev = ver;
  4735. irdma_sc_init_hw(dev);
  4736. if (irdma_wait_pe_ready(dev))
  4737. return -ETIMEDOUT;
  4738. val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
  4739. db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
  4740. if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
  4741. ibdev_dbg(to_ibdev(dev),
  4742. "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
  4743. val, db_size);
  4744. return -ENODEV;
  4745. }
  4746. dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
  4747. return ret_code;
  4748. }
  4749. /**
  4750. * irdma_update_stats - Update statistics
  4751. * @hw_stats: hw_stats instance to update
  4752. * @gather_stats: updated stat counters
  4753. * @last_gather_stats: last stat counters
  4754. */
  4755. void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
  4756. struct irdma_gather_stats *gather_stats,
  4757. struct irdma_gather_stats *last_gather_stats)
  4758. {
  4759. u64 *stats_val = hw_stats->stats_val_32;
  4760. stats_val[IRDMA_HW_STAT_INDEX_RXVLANERR] +=
  4761. IRDMA_STATS_DELTA(gather_stats->rxvlanerr,
  4762. last_gather_stats->rxvlanerr,
  4763. IRDMA_MAX_STATS_32);
  4764. stats_val[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] +=
  4765. IRDMA_STATS_DELTA(gather_stats->ip4rxdiscard,
  4766. last_gather_stats->ip4rxdiscard,
  4767. IRDMA_MAX_STATS_32);
  4768. stats_val[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] +=
  4769. IRDMA_STATS_DELTA(gather_stats->ip4rxtrunc,
  4770. last_gather_stats->ip4rxtrunc,
  4771. IRDMA_MAX_STATS_32);
  4772. stats_val[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] +=
  4773. IRDMA_STATS_DELTA(gather_stats->ip4txnoroute,
  4774. last_gather_stats->ip4txnoroute,
  4775. IRDMA_MAX_STATS_32);
  4776. stats_val[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] +=
  4777. IRDMA_STATS_DELTA(gather_stats->ip6rxdiscard,
  4778. last_gather_stats->ip6rxdiscard,
  4779. IRDMA_MAX_STATS_32);
  4780. stats_val[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] +=
  4781. IRDMA_STATS_DELTA(gather_stats->ip6rxtrunc,
  4782. last_gather_stats->ip6rxtrunc,
  4783. IRDMA_MAX_STATS_32);
  4784. stats_val[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] +=
  4785. IRDMA_STATS_DELTA(gather_stats->ip6txnoroute,
  4786. last_gather_stats->ip6txnoroute,
  4787. IRDMA_MAX_STATS_32);
  4788. stats_val[IRDMA_HW_STAT_INDEX_TCPRTXSEG] +=
  4789. IRDMA_STATS_DELTA(gather_stats->tcprtxseg,
  4790. last_gather_stats->tcprtxseg,
  4791. IRDMA_MAX_STATS_32);
  4792. stats_val[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] +=
  4793. IRDMA_STATS_DELTA(gather_stats->tcprxopterr,
  4794. last_gather_stats->tcprxopterr,
  4795. IRDMA_MAX_STATS_32);
  4796. stats_val[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] +=
  4797. IRDMA_STATS_DELTA(gather_stats->tcprxprotoerr,
  4798. last_gather_stats->tcprxprotoerr,
  4799. IRDMA_MAX_STATS_32);
  4800. stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] +=
  4801. IRDMA_STATS_DELTA(gather_stats->rxrpcnphandled,
  4802. last_gather_stats->rxrpcnphandled,
  4803. IRDMA_MAX_STATS_32);
  4804. stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] +=
  4805. IRDMA_STATS_DELTA(gather_stats->rxrpcnpignored,
  4806. last_gather_stats->rxrpcnpignored,
  4807. IRDMA_MAX_STATS_32);
  4808. stats_val[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] +=
  4809. IRDMA_STATS_DELTA(gather_stats->txnpcnpsent,
  4810. last_gather_stats->txnpcnpsent,
  4811. IRDMA_MAX_STATS_32);
  4812. stats_val = hw_stats->stats_val_64;
  4813. stats_val[IRDMA_HW_STAT_INDEX_IP4RXOCTS] +=
  4814. IRDMA_STATS_DELTA(gather_stats->ip4rxocts,
  4815. last_gather_stats->ip4rxocts,
  4816. IRDMA_MAX_STATS_48);
  4817. stats_val[IRDMA_HW_STAT_INDEX_IP4RXPKTS] +=
  4818. IRDMA_STATS_DELTA(gather_stats->ip4rxpkts,
  4819. last_gather_stats->ip4rxpkts,
  4820. IRDMA_MAX_STATS_48);
  4821. stats_val[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] +=
  4822. IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
  4823. last_gather_stats->ip4txfrag,
  4824. IRDMA_MAX_STATS_48);
  4825. stats_val[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] +=
  4826. IRDMA_STATS_DELTA(gather_stats->ip4rxmcpkts,
  4827. last_gather_stats->ip4rxmcpkts,
  4828. IRDMA_MAX_STATS_48);
  4829. stats_val[IRDMA_HW_STAT_INDEX_IP4TXOCTS] +=
  4830. IRDMA_STATS_DELTA(gather_stats->ip4txocts,
  4831. last_gather_stats->ip4txocts,
  4832. IRDMA_MAX_STATS_48);
  4833. stats_val[IRDMA_HW_STAT_INDEX_IP4TXPKTS] +=
  4834. IRDMA_STATS_DELTA(gather_stats->ip4txpkts,
  4835. last_gather_stats->ip4txpkts,
  4836. IRDMA_MAX_STATS_48);
  4837. stats_val[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] +=
  4838. IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
  4839. last_gather_stats->ip4txfrag,
  4840. IRDMA_MAX_STATS_48);
  4841. stats_val[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] +=
  4842. IRDMA_STATS_DELTA(gather_stats->ip4txmcpkts,
  4843. last_gather_stats->ip4txmcpkts,
  4844. IRDMA_MAX_STATS_48);
  4845. stats_val[IRDMA_HW_STAT_INDEX_IP6RXOCTS] +=
  4846. IRDMA_STATS_DELTA(gather_stats->ip6rxocts,
  4847. last_gather_stats->ip6rxocts,
  4848. IRDMA_MAX_STATS_48);
  4849. stats_val[IRDMA_HW_STAT_INDEX_IP6RXPKTS] +=
  4850. IRDMA_STATS_DELTA(gather_stats->ip6rxpkts,
  4851. last_gather_stats->ip6rxpkts,
  4852. IRDMA_MAX_STATS_48);
  4853. stats_val[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] +=
  4854. IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
  4855. last_gather_stats->ip6txfrags,
  4856. IRDMA_MAX_STATS_48);
  4857. stats_val[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] +=
  4858. IRDMA_STATS_DELTA(gather_stats->ip6rxmcpkts,
  4859. last_gather_stats->ip6rxmcpkts,
  4860. IRDMA_MAX_STATS_48);
  4861. stats_val[IRDMA_HW_STAT_INDEX_IP6TXOCTS] +=
  4862. IRDMA_STATS_DELTA(gather_stats->ip6txocts,
  4863. last_gather_stats->ip6txocts,
  4864. IRDMA_MAX_STATS_48);
  4865. stats_val[IRDMA_HW_STAT_INDEX_IP6TXPKTS] +=
  4866. IRDMA_STATS_DELTA(gather_stats->ip6txpkts,
  4867. last_gather_stats->ip6txpkts,
  4868. IRDMA_MAX_STATS_48);
  4869. stats_val[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] +=
  4870. IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
  4871. last_gather_stats->ip6txfrags,
  4872. IRDMA_MAX_STATS_48);
  4873. stats_val[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] +=
  4874. IRDMA_STATS_DELTA(gather_stats->ip6txmcpkts,
  4875. last_gather_stats->ip6txmcpkts,
  4876. IRDMA_MAX_STATS_48);
  4877. stats_val[IRDMA_HW_STAT_INDEX_TCPRXSEGS] +=
  4878. IRDMA_STATS_DELTA(gather_stats->tcprxsegs,
  4879. last_gather_stats->tcprxsegs,
  4880. IRDMA_MAX_STATS_48);
  4881. stats_val[IRDMA_HW_STAT_INDEX_TCPTXSEG] +=
  4882. IRDMA_STATS_DELTA(gather_stats->tcptxsegs,
  4883. last_gather_stats->tcptxsegs,
  4884. IRDMA_MAX_STATS_48);
  4885. stats_val[IRDMA_HW_STAT_INDEX_RDMARXRDS] +=
  4886. IRDMA_STATS_DELTA(gather_stats->rdmarxrds,
  4887. last_gather_stats->rdmarxrds,
  4888. IRDMA_MAX_STATS_48);
  4889. stats_val[IRDMA_HW_STAT_INDEX_RDMARXSNDS] +=
  4890. IRDMA_STATS_DELTA(gather_stats->rdmarxsnds,
  4891. last_gather_stats->rdmarxsnds,
  4892. IRDMA_MAX_STATS_48);
  4893. stats_val[IRDMA_HW_STAT_INDEX_RDMARXWRS] +=
  4894. IRDMA_STATS_DELTA(gather_stats->rdmarxwrs,
  4895. last_gather_stats->rdmarxwrs,
  4896. IRDMA_MAX_STATS_48);
  4897. stats_val[IRDMA_HW_STAT_INDEX_RDMATXRDS] +=
  4898. IRDMA_STATS_DELTA(gather_stats->rdmatxrds,
  4899. last_gather_stats->rdmatxrds,
  4900. IRDMA_MAX_STATS_48);
  4901. stats_val[IRDMA_HW_STAT_INDEX_RDMATXSNDS] +=
  4902. IRDMA_STATS_DELTA(gather_stats->rdmatxsnds,
  4903. last_gather_stats->rdmatxsnds,
  4904. IRDMA_MAX_STATS_48);
  4905. stats_val[IRDMA_HW_STAT_INDEX_RDMATXWRS] +=
  4906. IRDMA_STATS_DELTA(gather_stats->rdmatxwrs,
  4907. last_gather_stats->rdmatxwrs,
  4908. IRDMA_MAX_STATS_48);
  4909. stats_val[IRDMA_HW_STAT_INDEX_RDMAVBND] +=
  4910. IRDMA_STATS_DELTA(gather_stats->rdmavbn,
  4911. last_gather_stats->rdmavbn,
  4912. IRDMA_MAX_STATS_48);
  4913. stats_val[IRDMA_HW_STAT_INDEX_RDMAVINV] +=
  4914. IRDMA_STATS_DELTA(gather_stats->rdmavinv,
  4915. last_gather_stats->rdmavinv,
  4916. IRDMA_MAX_STATS_48);
  4917. stats_val[IRDMA_HW_STAT_INDEX_UDPRXPKTS] +=
  4918. IRDMA_STATS_DELTA(gather_stats->udprxpkts,
  4919. last_gather_stats->udprxpkts,
  4920. IRDMA_MAX_STATS_48);
  4921. stats_val[IRDMA_HW_STAT_INDEX_UDPTXPKTS] +=
  4922. IRDMA_STATS_DELTA(gather_stats->udptxpkts,
  4923. last_gather_stats->udptxpkts,
  4924. IRDMA_MAX_STATS_48);
  4925. stats_val[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] +=
  4926. IRDMA_STATS_DELTA(gather_stats->rxnpecnmrkpkts,
  4927. last_gather_stats->rxnpecnmrkpkts,
  4928. IRDMA_MAX_STATS_48);
  4929. memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));
  4930. }