bfa_fcpim.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  4. * Copyright (c) 2014- QLogic Corporation.
  5. * All rights reserved
  6. * www.qlogic.com
  7. *
  8. * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
  9. */
  10. #include "bfad_drv.h"
  11. #include "bfa_modules.h"
  12. BFA_TRC_FILE(HAL, FCPIM);
  13. /*
  14. * BFA ITNIM Related definitions
  15. */
  16. static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
  17. #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
  18. (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
  19. #define bfa_fcpim_additn(__itnim) \
  20. list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
  21. #define bfa_fcpim_delitn(__itnim) do { \
  22. WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
  23. bfa_itnim_update_del_itn_stats(__itnim); \
  24. list_del(&(__itnim)->qe); \
  25. WARN_ON(!list_empty(&(__itnim)->io_q)); \
  26. WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
  27. WARN_ON(!list_empty(&(__itnim)->pending_q)); \
  28. } while (0)
  29. #define bfa_itnim_online_cb(__itnim) do { \
  30. if ((__itnim)->bfa->fcs) \
  31. bfa_cb_itnim_online((__itnim)->ditn); \
  32. else { \
  33. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  34. __bfa_cb_itnim_online, (__itnim)); \
  35. } \
  36. } while (0)
  37. #define bfa_itnim_offline_cb(__itnim) do { \
  38. if ((__itnim)->bfa->fcs) \
  39. bfa_cb_itnim_offline((__itnim)->ditn); \
  40. else { \
  41. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  42. __bfa_cb_itnim_offline, (__itnim)); \
  43. } \
  44. } while (0)
  45. #define bfa_itnim_sler_cb(__itnim) do { \
  46. if ((__itnim)->bfa->fcs) \
  47. bfa_cb_itnim_sler((__itnim)->ditn); \
  48. else { \
  49. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  50. __bfa_cb_itnim_sler, (__itnim)); \
  51. } \
  52. } while (0)
  53. enum bfa_ioim_lm_ua_status {
  54. BFA_IOIM_LM_UA_RESET = 0,
  55. BFA_IOIM_LM_UA_SET = 1,
  56. };
  57. /*
  58. * itnim state machine event
  59. */
  60. enum bfa_itnim_event {
  61. BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
  62. BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
  63. BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
  64. BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
  65. BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
  66. BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
  67. BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
  68. BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
  69. BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
  70. };
  71. /*
  72. * BFA IOIM related definitions
  73. */
  74. #define bfa_ioim_move_to_comp_q(__ioim) do { \
  75. list_del(&(__ioim)->qe); \
  76. list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
  77. } while (0)
  78. #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
  79. if ((__fcpim)->profile_comp) \
  80. (__fcpim)->profile_comp(__ioim); \
  81. } while (0)
  82. #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
  83. if ((__fcpim)->profile_start) \
  84. (__fcpim)->profile_start(__ioim); \
  85. } while (0)
  86. /*
  87. * IO state machine events
  88. */
  89. enum bfa_ioim_event {
  90. BFA_IOIM_SM_START = 1, /* io start request from host */
  91. BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
  92. BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
  93. BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
  94. BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
  95. BFA_IOIM_SM_FREE = 6, /* io resource is freed */
  96. BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
  97. BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
  98. BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
  99. BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
  100. BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
  101. BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
  102. BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
  103. BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
  104. BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
  105. BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
  106. BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
  107. BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
  108. };
  109. /*
  110. * BFA TSKIM related definitions
  111. */
  112. /*
  113. * task management completion handling
  114. */
  115. #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
  116. bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
  117. bfa_tskim_notify_comp(__tskim); \
  118. } while (0)
  119. #define bfa_tskim_notify_comp(__tskim) do { \
  120. if ((__tskim)->notify) \
  121. bfa_itnim_tskdone((__tskim)->itnim); \
  122. } while (0)
  123. enum bfa_tskim_event {
  124. BFA_TSKIM_SM_START = 1, /* TM command start */
  125. BFA_TSKIM_SM_DONE = 2, /* TM completion */
  126. BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
  127. BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
  128. BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
  129. BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
  130. BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
  131. BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
  132. BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
  133. };
  134. /*
  135. * forward declaration for BFA ITNIM functions
  136. */
  137. static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
  138. static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
  139. static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
  140. static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
  141. static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
  142. static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
  143. static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
  144. static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
  145. static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
  146. static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
  147. static void bfa_itnim_iotov(void *itnim_arg);
  148. static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
  149. static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
  150. static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
  151. /*
  152. * forward declaration of ITNIM state machine
  153. */
  154. static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
  155. enum bfa_itnim_event event);
  156. static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
  157. enum bfa_itnim_event event);
  158. static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
  159. enum bfa_itnim_event event);
  160. static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  161. enum bfa_itnim_event event);
  162. static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
  163. enum bfa_itnim_event event);
  164. static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
  165. enum bfa_itnim_event event);
  166. static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  167. enum bfa_itnim_event event);
  168. static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
  169. enum bfa_itnim_event event);
  170. static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
  171. enum bfa_itnim_event event);
  172. static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
  173. enum bfa_itnim_event event);
  174. static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
  175. enum bfa_itnim_event event);
  176. static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
  177. enum bfa_itnim_event event);
  178. static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
  179. enum bfa_itnim_event event);
  180. static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
  181. enum bfa_itnim_event event);
  182. static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
  183. enum bfa_itnim_event event);
  184. /*
  185. * forward declaration for BFA IOIM functions
  186. */
  187. static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
  188. static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
  189. static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
  190. static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
  191. static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
  192. static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
  193. static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
  194. static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
  195. static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
  196. static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
  197. /*
  198. * forward declaration of BFA IO state machine
  199. */
  200. static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
  201. enum bfa_ioim_event event);
  202. static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
  203. enum bfa_ioim_event event);
  204. static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
  205. enum bfa_ioim_event event);
  206. static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
  207. enum bfa_ioim_event event);
  208. static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
  209. enum bfa_ioim_event event);
  210. static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
  211. enum bfa_ioim_event event);
  212. static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
  213. enum bfa_ioim_event event);
  214. static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
  215. enum bfa_ioim_event event);
  216. static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
  217. enum bfa_ioim_event event);
  218. static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
  219. enum bfa_ioim_event event);
  220. static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
  221. enum bfa_ioim_event event);
  222. static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
  223. enum bfa_ioim_event event);
  224. /*
  225. * forward declaration for BFA TSKIM functions
  226. */
  227. static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
  228. static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
  229. static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
  230. struct scsi_lun lun);
  231. static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
  232. static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
  233. static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
  234. static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
  235. static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
  236. static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
  237. /*
  238. * forward declaration of BFA TSKIM state machine
  239. */
  240. static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
  241. enum bfa_tskim_event event);
  242. static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
  243. enum bfa_tskim_event event);
  244. static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
  245. enum bfa_tskim_event event);
  246. static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
  247. enum bfa_tskim_event event);
  248. static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
  249. enum bfa_tskim_event event);
  250. static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
  251. enum bfa_tskim_event event);
  252. static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
  253. enum bfa_tskim_event event);
  254. /*
  255. * BFA FCP Initiator Mode module
  256. */
  257. /*
  258. * Compute and return memory needed by FCP(im) module.
  259. */
  260. static void
  261. bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
  262. {
  263. bfa_itnim_meminfo(cfg, km_len);
  264. /*
  265. * IO memory
  266. */
  267. *km_len += cfg->fwcfg.num_ioim_reqs *
  268. (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
  269. /*
  270. * task management command memory
  271. */
  272. if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
  273. cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
  274. *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
  275. }
  276. static void
  277. bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
  278. struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
  279. {
  280. struct bfa_fcpim_s *fcpim = &fcp->fcpim;
  281. struct bfa_s *bfa = fcp->bfa;
  282. bfa_trc(bfa, cfg->drvcfg.path_tov);
  283. bfa_trc(bfa, cfg->fwcfg.num_rports);
  284. bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
  285. bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
  286. fcpim->fcp = fcp;
  287. fcpim->bfa = bfa;
  288. fcpim->num_itnims = cfg->fwcfg.num_rports;
  289. fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
  290. fcpim->path_tov = cfg->drvcfg.path_tov;
  291. fcpim->delay_comp = cfg->drvcfg.delay_comp;
  292. fcpim->profile_comp = NULL;
  293. fcpim->profile_start = NULL;
  294. bfa_itnim_attach(fcpim);
  295. bfa_tskim_attach(fcpim);
  296. bfa_ioim_attach(fcpim);
  297. }
  298. void
  299. bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
  300. {
  301. struct bfa_fcpim_s *fcpim = &fcp->fcpim;
  302. struct bfa_itnim_s *itnim;
  303. struct list_head *qe, *qen;
  304. /* Enqueue unused ioim resources to free_q */
  305. list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
  306. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  307. itnim = (struct bfa_itnim_s *) qe;
  308. bfa_itnim_iocdisable(itnim);
  309. }
  310. }
  311. void
  312. bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
  313. {
  314. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  315. fcpim->path_tov = path_tov * 1000;
  316. if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
  317. fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
  318. }
  319. u16
  320. bfa_fcpim_path_tov_get(struct bfa_s *bfa)
  321. {
  322. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  323. return fcpim->path_tov / 1000;
  324. }
  325. #define bfa_fcpim_add_iostats(__l, __r, __stats) \
  326. (__l->__stats += __r->__stats)
  327. void
  328. bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
  329. struct bfa_itnim_iostats_s *rstats)
  330. {
  331. bfa_fcpim_add_iostats(lstats, rstats, total_ios);
  332. bfa_fcpim_add_iostats(lstats, rstats, qresumes);
  333. bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
  334. bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
  335. bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
  336. bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
  337. bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
  338. bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
  339. bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
  340. bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
  341. bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
  342. bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
  343. bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
  344. bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
  345. bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
  346. bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
  347. bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
  348. bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
  349. bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
  350. bfa_fcpim_add_iostats(lstats, rstats, onlines);
  351. bfa_fcpim_add_iostats(lstats, rstats, offlines);
  352. bfa_fcpim_add_iostats(lstats, rstats, creates);
  353. bfa_fcpim_add_iostats(lstats, rstats, deletes);
  354. bfa_fcpim_add_iostats(lstats, rstats, create_comps);
  355. bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
  356. bfa_fcpim_add_iostats(lstats, rstats, sler_events);
  357. bfa_fcpim_add_iostats(lstats, rstats, fw_create);
  358. bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
  359. bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
  360. bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
  361. bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
  362. bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
  363. bfa_fcpim_add_iostats(lstats, rstats, tm_success);
  364. bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
  365. bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
  366. bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
  367. bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
  368. bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
  369. bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
  370. bfa_fcpim_add_iostats(lstats, rstats, io_comps);
  371. bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
  372. bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
  373. bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
  374. bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
  375. }
  376. bfa_status_t
  377. bfa_fcpim_port_iostats(struct bfa_s *bfa,
  378. struct bfa_itnim_iostats_s *stats, u8 lp_tag)
  379. {
  380. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  381. struct list_head *qe, *qen;
  382. struct bfa_itnim_s *itnim;
  383. /* accumulate IO stats from itnim */
  384. memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
  385. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  386. itnim = (struct bfa_itnim_s *) qe;
  387. if (itnim->rport->rport_info.lp_tag != lp_tag)
  388. continue;
  389. bfa_fcpim_add_stats(stats, &(itnim->stats));
  390. }
  391. return BFA_STATUS_OK;
  392. }
  393. static void
  394. bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
  395. {
  396. struct bfa_itnim_latency_s *io_lat =
  397. &(ioim->itnim->ioprofile.io_latency);
  398. u32 val, idx;
  399. val = (u32)(jiffies - ioim->start_time);
  400. idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
  401. bfa_itnim_ioprofile_update(ioim->itnim, idx);
  402. io_lat->count[idx]++;
  403. io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
  404. io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
  405. io_lat->avg[idx] += val;
  406. }
  407. static void
  408. bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
  409. {
  410. ioim->start_time = jiffies;
  411. }
  412. bfa_status_t
  413. bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
  414. {
  415. struct bfa_itnim_s *itnim;
  416. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  417. struct list_head *qe, *qen;
  418. /* accumulate IO stats from itnim */
  419. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  420. itnim = (struct bfa_itnim_s *) qe;
  421. bfa_itnim_clear_stats(itnim);
  422. }
  423. fcpim->io_profile = BFA_TRUE;
  424. fcpim->io_profile_start_time = time;
  425. fcpim->profile_comp = bfa_ioim_profile_comp;
  426. fcpim->profile_start = bfa_ioim_profile_start;
  427. return BFA_STATUS_OK;
  428. }
  429. bfa_status_t
  430. bfa_fcpim_profile_off(struct bfa_s *bfa)
  431. {
  432. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  433. fcpim->io_profile = BFA_FALSE;
  434. fcpim->io_profile_start_time = 0;
  435. fcpim->profile_comp = NULL;
  436. fcpim->profile_start = NULL;
  437. return BFA_STATUS_OK;
  438. }
  439. u16
  440. bfa_fcpim_qdepth_get(struct bfa_s *bfa)
  441. {
  442. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  443. return fcpim->q_depth;
  444. }
  445. /*
  446. * BFA ITNIM module state machine functions
  447. */
  448. /*
  449. * Beginning/unallocated state - no events expected.
  450. */
  451. static void
  452. bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  453. {
  454. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  455. bfa_trc(itnim->bfa, event);
  456. switch (event) {
  457. case BFA_ITNIM_SM_CREATE:
  458. bfa_sm_set_state(itnim, bfa_itnim_sm_created);
  459. itnim->is_online = BFA_FALSE;
  460. bfa_fcpim_additn(itnim);
  461. break;
  462. default:
  463. bfa_sm_fault(itnim->bfa, event);
  464. }
  465. }
  466. /*
  467. * Beginning state, only online event expected.
  468. */
  469. static void
  470. bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  471. {
  472. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  473. bfa_trc(itnim->bfa, event);
  474. switch (event) {
  475. case BFA_ITNIM_SM_ONLINE:
  476. if (bfa_itnim_send_fwcreate(itnim))
  477. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  478. else
  479. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  480. break;
  481. case BFA_ITNIM_SM_DELETE:
  482. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  483. bfa_fcpim_delitn(itnim);
  484. break;
  485. case BFA_ITNIM_SM_HWFAIL:
  486. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  487. break;
  488. default:
  489. bfa_sm_fault(itnim->bfa, event);
  490. }
  491. }
  492. /*
  493. * Waiting for itnim create response from firmware.
  494. */
  495. static void
  496. bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  497. {
  498. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  499. bfa_trc(itnim->bfa, event);
  500. switch (event) {
  501. case BFA_ITNIM_SM_FWRSP:
  502. bfa_sm_set_state(itnim, bfa_itnim_sm_online);
  503. itnim->is_online = BFA_TRUE;
  504. bfa_itnim_iotov_online(itnim);
  505. bfa_itnim_online_cb(itnim);
  506. break;
  507. case BFA_ITNIM_SM_DELETE:
  508. bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
  509. break;
  510. case BFA_ITNIM_SM_OFFLINE:
  511. if (bfa_itnim_send_fwdelete(itnim))
  512. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  513. else
  514. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
  515. break;
  516. case BFA_ITNIM_SM_HWFAIL:
  517. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  518. break;
  519. default:
  520. bfa_sm_fault(itnim->bfa, event);
  521. }
  522. }
  523. static void
  524. bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
  525. enum bfa_itnim_event event)
  526. {
  527. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  528. bfa_trc(itnim->bfa, event);
  529. switch (event) {
  530. case BFA_ITNIM_SM_QRESUME:
  531. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  532. bfa_itnim_send_fwcreate(itnim);
  533. break;
  534. case BFA_ITNIM_SM_DELETE:
  535. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  536. bfa_reqq_wcancel(&itnim->reqq_wait);
  537. bfa_fcpim_delitn(itnim);
  538. break;
  539. case BFA_ITNIM_SM_OFFLINE:
  540. bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
  541. bfa_reqq_wcancel(&itnim->reqq_wait);
  542. bfa_itnim_offline_cb(itnim);
  543. break;
  544. case BFA_ITNIM_SM_HWFAIL:
  545. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  546. bfa_reqq_wcancel(&itnim->reqq_wait);
  547. break;
  548. default:
  549. bfa_sm_fault(itnim->bfa, event);
  550. }
  551. }
  552. /*
  553. * Waiting for itnim create response from firmware, a delete is pending.
  554. */
  555. static void
  556. bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  557. enum bfa_itnim_event event)
  558. {
  559. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  560. bfa_trc(itnim->bfa, event);
  561. switch (event) {
  562. case BFA_ITNIM_SM_FWRSP:
  563. if (bfa_itnim_send_fwdelete(itnim))
  564. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  565. else
  566. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  567. break;
  568. case BFA_ITNIM_SM_HWFAIL:
  569. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  570. bfa_fcpim_delitn(itnim);
  571. break;
  572. default:
  573. bfa_sm_fault(itnim->bfa, event);
  574. }
  575. }
  576. /*
  577. * Online state - normal parking state.
  578. */
  579. static void
  580. bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  581. {
  582. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  583. bfa_trc(itnim->bfa, event);
  584. switch (event) {
  585. case BFA_ITNIM_SM_OFFLINE:
  586. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
  587. itnim->is_online = BFA_FALSE;
  588. bfa_itnim_iotov_start(itnim);
  589. bfa_itnim_cleanup(itnim);
  590. break;
  591. case BFA_ITNIM_SM_DELETE:
  592. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  593. itnim->is_online = BFA_FALSE;
  594. bfa_itnim_cleanup(itnim);
  595. break;
  596. case BFA_ITNIM_SM_SLER:
  597. bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
  598. itnim->is_online = BFA_FALSE;
  599. bfa_itnim_iotov_start(itnim);
  600. bfa_itnim_sler_cb(itnim);
  601. break;
  602. case BFA_ITNIM_SM_HWFAIL:
  603. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  604. itnim->is_online = BFA_FALSE;
  605. bfa_itnim_iotov_start(itnim);
  606. bfa_itnim_iocdisable_cleanup(itnim);
  607. break;
  608. default:
  609. bfa_sm_fault(itnim->bfa, event);
  610. }
  611. }
  612. /*
  613. * Second level error recovery need.
  614. */
  615. static void
  616. bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  617. {
  618. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  619. bfa_trc(itnim->bfa, event);
  620. switch (event) {
  621. case BFA_ITNIM_SM_OFFLINE:
  622. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
  623. bfa_itnim_cleanup(itnim);
  624. break;
  625. case BFA_ITNIM_SM_DELETE:
  626. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  627. bfa_itnim_cleanup(itnim);
  628. bfa_itnim_iotov_delete(itnim);
  629. break;
  630. case BFA_ITNIM_SM_HWFAIL:
  631. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  632. bfa_itnim_iocdisable_cleanup(itnim);
  633. break;
  634. default:
  635. bfa_sm_fault(itnim->bfa, event);
  636. }
  637. }
  638. /*
  639. * Going offline. Waiting for active IO cleanup.
  640. */
  641. static void
  642. bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  643. enum bfa_itnim_event event)
  644. {
  645. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  646. bfa_trc(itnim->bfa, event);
  647. switch (event) {
  648. case BFA_ITNIM_SM_CLEANUP:
  649. if (bfa_itnim_send_fwdelete(itnim))
  650. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  651. else
  652. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
  653. break;
  654. case BFA_ITNIM_SM_DELETE:
  655. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  656. bfa_itnim_iotov_delete(itnim);
  657. break;
  658. case BFA_ITNIM_SM_HWFAIL:
  659. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  660. bfa_itnim_iocdisable_cleanup(itnim);
  661. bfa_itnim_offline_cb(itnim);
  662. break;
  663. case BFA_ITNIM_SM_SLER:
  664. break;
  665. default:
  666. bfa_sm_fault(itnim->bfa, event);
  667. }
  668. }
  669. /*
  670. * Deleting itnim. Waiting for active IO cleanup.
  671. */
  672. static void
  673. bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
  674. enum bfa_itnim_event event)
  675. {
  676. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  677. bfa_trc(itnim->bfa, event);
  678. switch (event) {
  679. case BFA_ITNIM_SM_CLEANUP:
  680. if (bfa_itnim_send_fwdelete(itnim))
  681. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  682. else
  683. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  684. break;
  685. case BFA_ITNIM_SM_HWFAIL:
  686. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  687. bfa_itnim_iocdisable_cleanup(itnim);
  688. break;
  689. default:
  690. bfa_sm_fault(itnim->bfa, event);
  691. }
  692. }
  693. /*
  694. * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
  695. */
  696. static void
  697. bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  698. {
  699. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  700. bfa_trc(itnim->bfa, event);
  701. switch (event) {
  702. case BFA_ITNIM_SM_FWRSP:
  703. bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
  704. bfa_itnim_offline_cb(itnim);
  705. break;
  706. case BFA_ITNIM_SM_DELETE:
  707. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  708. break;
  709. case BFA_ITNIM_SM_HWFAIL:
  710. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  711. bfa_itnim_offline_cb(itnim);
  712. break;
  713. default:
  714. bfa_sm_fault(itnim->bfa, event);
  715. }
  716. }
  717. static void
  718. bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
  719. enum bfa_itnim_event event)
  720. {
  721. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  722. bfa_trc(itnim->bfa, event);
  723. switch (event) {
  724. case BFA_ITNIM_SM_QRESUME:
  725. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  726. bfa_itnim_send_fwdelete(itnim);
  727. break;
  728. case BFA_ITNIM_SM_DELETE:
  729. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  730. break;
  731. case BFA_ITNIM_SM_HWFAIL:
  732. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  733. bfa_reqq_wcancel(&itnim->reqq_wait);
  734. bfa_itnim_offline_cb(itnim);
  735. break;
  736. default:
  737. bfa_sm_fault(itnim->bfa, event);
  738. }
  739. }
  740. /*
  741. * Offline state.
  742. */
  743. static void
  744. bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  745. {
  746. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  747. bfa_trc(itnim->bfa, event);
  748. switch (event) {
  749. case BFA_ITNIM_SM_DELETE:
  750. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  751. bfa_itnim_iotov_delete(itnim);
  752. bfa_fcpim_delitn(itnim);
  753. break;
  754. case BFA_ITNIM_SM_ONLINE:
  755. if (bfa_itnim_send_fwcreate(itnim))
  756. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  757. else
  758. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  759. break;
  760. case BFA_ITNIM_SM_HWFAIL:
  761. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  762. break;
  763. default:
  764. bfa_sm_fault(itnim->bfa, event);
  765. }
  766. }
  767. static void
  768. bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
  769. enum bfa_itnim_event event)
  770. {
  771. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  772. bfa_trc(itnim->bfa, event);
  773. switch (event) {
  774. case BFA_ITNIM_SM_DELETE:
  775. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  776. bfa_itnim_iotov_delete(itnim);
  777. bfa_fcpim_delitn(itnim);
  778. break;
  779. case BFA_ITNIM_SM_OFFLINE:
  780. bfa_itnim_offline_cb(itnim);
  781. break;
  782. case BFA_ITNIM_SM_ONLINE:
  783. if (bfa_itnim_send_fwcreate(itnim))
  784. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  785. else
  786. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  787. break;
  788. case BFA_ITNIM_SM_HWFAIL:
  789. break;
  790. default:
  791. bfa_sm_fault(itnim->bfa, event);
  792. }
  793. }
  794. /*
  795. * Itnim is deleted, waiting for firmware response to delete.
  796. */
  797. static void
  798. bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  799. {
  800. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  801. bfa_trc(itnim->bfa, event);
  802. switch (event) {
  803. case BFA_ITNIM_SM_FWRSP:
  804. case BFA_ITNIM_SM_HWFAIL:
  805. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  806. bfa_fcpim_delitn(itnim);
  807. break;
  808. default:
  809. bfa_sm_fault(itnim->bfa, event);
  810. }
  811. }
  812. static void
  813. bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
  814. enum bfa_itnim_event event)
  815. {
  816. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  817. bfa_trc(itnim->bfa, event);
  818. switch (event) {
  819. case BFA_ITNIM_SM_QRESUME:
  820. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  821. bfa_itnim_send_fwdelete(itnim);
  822. break;
  823. case BFA_ITNIM_SM_HWFAIL:
  824. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  825. bfa_reqq_wcancel(&itnim->reqq_wait);
  826. bfa_fcpim_delitn(itnim);
  827. break;
  828. default:
  829. bfa_sm_fault(itnim->bfa, event);
  830. }
  831. }
  832. /*
  833. * Initiate cleanup of all IOs on an IOC failure.
  834. */
  835. static void
  836. bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
  837. {
  838. struct bfa_tskim_s *tskim;
  839. struct bfa_ioim_s *ioim;
  840. struct list_head *qe, *qen;
  841. list_for_each_safe(qe, qen, &itnim->tsk_q) {
  842. tskim = (struct bfa_tskim_s *) qe;
  843. bfa_tskim_iocdisable(tskim);
  844. }
  845. list_for_each_safe(qe, qen, &itnim->io_q) {
  846. ioim = (struct bfa_ioim_s *) qe;
  847. bfa_ioim_iocdisable(ioim);
  848. }
  849. /*
  850. * For IO request in pending queue, we pretend an early timeout.
  851. */
  852. list_for_each_safe(qe, qen, &itnim->pending_q) {
  853. ioim = (struct bfa_ioim_s *) qe;
  854. bfa_ioim_tov(ioim);
  855. }
  856. list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
  857. ioim = (struct bfa_ioim_s *) qe;
  858. bfa_ioim_iocdisable(ioim);
  859. }
  860. }
  861. /*
  862. * IO cleanup completion
  863. */
  864. static void
  865. bfa_itnim_cleanp_comp(void *itnim_cbarg)
  866. {
  867. struct bfa_itnim_s *itnim = itnim_cbarg;
  868. bfa_stats(itnim, cleanup_comps);
  869. bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
  870. }
  871. /*
  872. * Initiate cleanup of all IOs.
  873. */
  874. static void
  875. bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
  876. {
  877. struct bfa_ioim_s *ioim;
  878. struct bfa_tskim_s *tskim;
  879. struct list_head *qe, *qen;
  880. bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
  881. list_for_each_safe(qe, qen, &itnim->io_q) {
  882. ioim = (struct bfa_ioim_s *) qe;
  883. /*
  884. * Move IO to a cleanup queue from active queue so that a later
  885. * TM will not pickup this IO.
  886. */
  887. list_del(&ioim->qe);
  888. list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
  889. bfa_wc_up(&itnim->wc);
  890. bfa_ioim_cleanup(ioim);
  891. }
  892. list_for_each_safe(qe, qen, &itnim->tsk_q) {
  893. tskim = (struct bfa_tskim_s *) qe;
  894. bfa_wc_up(&itnim->wc);
  895. bfa_tskim_cleanup(tskim);
  896. }
  897. bfa_wc_wait(&itnim->wc);
  898. }
  899. static void
  900. __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
  901. {
  902. struct bfa_itnim_s *itnim = cbarg;
  903. if (complete)
  904. bfa_cb_itnim_online(itnim->ditn);
  905. }
  906. static void
  907. __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
  908. {
  909. struct bfa_itnim_s *itnim = cbarg;
  910. if (complete)
  911. bfa_cb_itnim_offline(itnim->ditn);
  912. }
  913. static void
  914. __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
  915. {
  916. struct bfa_itnim_s *itnim = cbarg;
  917. if (complete)
  918. bfa_cb_itnim_sler(itnim->ditn);
  919. }
  920. /*
  921. * Call to resume any I/O requests waiting for room in request queue.
  922. */
  923. static void
  924. bfa_itnim_qresume(void *cbarg)
  925. {
  926. struct bfa_itnim_s *itnim = cbarg;
  927. bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
  928. }
  929. /*
  930. * bfa_itnim_public
  931. */
  932. void
  933. bfa_itnim_iodone(struct bfa_itnim_s *itnim)
  934. {
  935. bfa_wc_down(&itnim->wc);
  936. }
  937. void
  938. bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
  939. {
  940. bfa_wc_down(&itnim->wc);
  941. }
  942. void
  943. bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
  944. {
  945. /*
  946. * ITN memory
  947. */
  948. *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
  949. }
  950. void
  951. bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
  952. {
  953. struct bfa_s *bfa = fcpim->bfa;
  954. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  955. struct bfa_itnim_s *itnim;
  956. int i, j;
  957. INIT_LIST_HEAD(&fcpim->itnim_q);
  958. itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
  959. fcpim->itnim_arr = itnim;
  960. for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
  961. memset(itnim, 0, sizeof(struct bfa_itnim_s));
  962. itnim->bfa = bfa;
  963. itnim->fcpim = fcpim;
  964. itnim->reqq = BFA_REQQ_QOS_LO;
  965. itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
  966. itnim->iotov_active = BFA_FALSE;
  967. bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
  968. INIT_LIST_HEAD(&itnim->io_q);
  969. INIT_LIST_HEAD(&itnim->io_cleanup_q);
  970. INIT_LIST_HEAD(&itnim->pending_q);
  971. INIT_LIST_HEAD(&itnim->tsk_q);
  972. INIT_LIST_HEAD(&itnim->delay_comp_q);
  973. for (j = 0; j < BFA_IOBUCKET_MAX; j++)
  974. itnim->ioprofile.io_latency.min[j] = ~0;
  975. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  976. }
  977. bfa_mem_kva_curp(fcp) = (u8 *) itnim;
  978. }
  979. void
  980. bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
  981. {
  982. bfa_stats(itnim, ioc_disabled);
  983. bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
  984. }
  985. static bfa_boolean_t
  986. bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
  987. {
  988. struct bfi_itn_create_req_s *m;
  989. itnim->msg_no++;
  990. /*
  991. * check for room in queue to send request now
  992. */
  993. m = bfa_reqq_next(itnim->bfa, itnim->reqq);
  994. if (!m) {
  995. bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
  996. return BFA_FALSE;
  997. }
  998. bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
  999. bfa_fn_lpu(itnim->bfa));
  1000. m->fw_handle = itnim->rport->fw_handle;
  1001. m->class = FC_CLASS_3;
  1002. m->seq_rec = itnim->seq_rec;
  1003. m->msg_no = itnim->msg_no;
  1004. bfa_stats(itnim, fw_create);
  1005. /*
  1006. * queue I/O message to firmware
  1007. */
  1008. bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
  1009. return BFA_TRUE;
  1010. }
  1011. static bfa_boolean_t
  1012. bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
  1013. {
  1014. struct bfi_itn_delete_req_s *m;
  1015. /*
  1016. * check for room in queue to send request now
  1017. */
  1018. m = bfa_reqq_next(itnim->bfa, itnim->reqq);
  1019. if (!m) {
  1020. bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
  1021. return BFA_FALSE;
  1022. }
  1023. bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
  1024. bfa_fn_lpu(itnim->bfa));
  1025. m->fw_handle = itnim->rport->fw_handle;
  1026. bfa_stats(itnim, fw_delete);
  1027. /*
  1028. * queue I/O message to firmware
  1029. */
  1030. bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
  1031. return BFA_TRUE;
  1032. }
  1033. /*
  1034. * Cleanup all pending failed inflight requests.
  1035. */
  1036. static void
  1037. bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
  1038. {
  1039. struct bfa_ioim_s *ioim;
  1040. struct list_head *qe, *qen;
  1041. list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
  1042. ioim = (struct bfa_ioim_s *)qe;
  1043. bfa_ioim_delayed_comp(ioim, iotov);
  1044. }
  1045. }
  1046. /*
  1047. * Start all pending IO requests.
  1048. */
  1049. static void
  1050. bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
  1051. {
  1052. struct bfa_ioim_s *ioim;
  1053. bfa_itnim_iotov_stop(itnim);
  1054. /*
  1055. * Abort all inflight IO requests in the queue
  1056. */
  1057. bfa_itnim_delayed_comp(itnim, BFA_FALSE);
  1058. /*
  1059. * Start all pending IO requests.
  1060. */
  1061. while (!list_empty(&itnim->pending_q)) {
  1062. bfa_q_deq(&itnim->pending_q, &ioim);
  1063. list_add_tail(&ioim->qe, &itnim->io_q);
  1064. bfa_ioim_start(ioim);
  1065. }
  1066. }
  1067. /*
  1068. * Fail all pending IO requests
  1069. */
  1070. static void
  1071. bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
  1072. {
  1073. struct bfa_ioim_s *ioim;
  1074. /*
  1075. * Fail all inflight IO requests in the queue
  1076. */
  1077. bfa_itnim_delayed_comp(itnim, BFA_TRUE);
  1078. /*
  1079. * Fail any pending IO requests.
  1080. */
  1081. while (!list_empty(&itnim->pending_q)) {
  1082. bfa_q_deq(&itnim->pending_q, &ioim);
  1083. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  1084. bfa_ioim_tov(ioim);
  1085. }
  1086. }
  1087. /*
  1088. * IO TOV timer callback. Fail any pending IO requests.
  1089. */
  1090. static void
  1091. bfa_itnim_iotov(void *itnim_arg)
  1092. {
  1093. struct bfa_itnim_s *itnim = itnim_arg;
  1094. itnim->iotov_active = BFA_FALSE;
  1095. bfa_cb_itnim_tov_begin(itnim->ditn);
  1096. bfa_itnim_iotov_cleanup(itnim);
  1097. bfa_cb_itnim_tov(itnim->ditn);
  1098. }
  1099. /*
  1100. * Start IO TOV timer for failing back pending IO requests in offline state.
  1101. */
  1102. static void
  1103. bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
  1104. {
  1105. if (itnim->fcpim->path_tov > 0) {
  1106. itnim->iotov_active = BFA_TRUE;
  1107. WARN_ON(!bfa_itnim_hold_io(itnim));
  1108. bfa_timer_start(itnim->bfa, &itnim->timer,
  1109. bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
  1110. }
  1111. }
  1112. /*
  1113. * Stop IO TOV timer.
  1114. */
  1115. static void
  1116. bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
  1117. {
  1118. if (itnim->iotov_active) {
  1119. itnim->iotov_active = BFA_FALSE;
  1120. bfa_timer_stop(&itnim->timer);
  1121. }
  1122. }
  1123. /*
  1124. * Stop IO TOV timer.
  1125. */
  1126. static void
  1127. bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
  1128. {
  1129. bfa_boolean_t pathtov_active = BFA_FALSE;
  1130. if (itnim->iotov_active)
  1131. pathtov_active = BFA_TRUE;
  1132. bfa_itnim_iotov_stop(itnim);
  1133. if (pathtov_active)
  1134. bfa_cb_itnim_tov_begin(itnim->ditn);
  1135. bfa_itnim_iotov_cleanup(itnim);
  1136. if (pathtov_active)
  1137. bfa_cb_itnim_tov(itnim->ditn);
  1138. }
  1139. static void
  1140. bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
  1141. {
  1142. struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
  1143. fcpim->del_itn_stats.del_itn_iocomp_aborted +=
  1144. itnim->stats.iocomp_aborted;
  1145. fcpim->del_itn_stats.del_itn_iocomp_timedout +=
  1146. itnim->stats.iocomp_timedout;
  1147. fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
  1148. itnim->stats.iocom_sqer_needed;
  1149. fcpim->del_itn_stats.del_itn_iocom_res_free +=
  1150. itnim->stats.iocom_res_free;
  1151. fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
  1152. itnim->stats.iocom_hostabrts;
  1153. fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
  1154. fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
  1155. fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
  1156. }
  1157. /*
  1158. * bfa_itnim_public
  1159. */
  1160. /*
  1161. * Itnim interrupt processing.
  1162. */
  1163. void
  1164. bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  1165. {
  1166. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  1167. union bfi_itn_i2h_msg_u msg;
  1168. struct bfa_itnim_s *itnim;
  1169. bfa_trc(bfa, m->mhdr.msg_id);
  1170. msg.msg = m;
  1171. switch (m->mhdr.msg_id) {
  1172. case BFI_ITN_I2H_CREATE_RSP:
  1173. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1174. msg.create_rsp->bfa_handle);
  1175. WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
  1176. bfa_stats(itnim, create_comps);
  1177. bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
  1178. break;
  1179. case BFI_ITN_I2H_DELETE_RSP:
  1180. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1181. msg.delete_rsp->bfa_handle);
  1182. WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
  1183. bfa_stats(itnim, delete_comps);
  1184. bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
  1185. break;
  1186. case BFI_ITN_I2H_SLER_EVENT:
  1187. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1188. msg.sler_event->bfa_handle);
  1189. bfa_stats(itnim, sler_events);
  1190. bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
  1191. break;
  1192. default:
  1193. bfa_trc(bfa, m->mhdr.msg_id);
  1194. WARN_ON(1);
  1195. }
  1196. }
  1197. /*
  1198. * bfa_itnim_api
  1199. */
  1200. struct bfa_itnim_s *
  1201. bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
  1202. {
  1203. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  1204. struct bfa_itnim_s *itnim;
  1205. bfa_itn_create(bfa, rport, bfa_itnim_isr);
  1206. itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
  1207. WARN_ON(itnim->rport != rport);
  1208. itnim->ditn = ditn;
  1209. bfa_stats(itnim, creates);
  1210. bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
  1211. return itnim;
  1212. }
  1213. void
  1214. bfa_itnim_delete(struct bfa_itnim_s *itnim)
  1215. {
  1216. bfa_stats(itnim, deletes);
  1217. bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
  1218. }
  1219. void
  1220. bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
  1221. {
  1222. itnim->seq_rec = seq_rec;
  1223. bfa_stats(itnim, onlines);
  1224. bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
  1225. }
  1226. void
  1227. bfa_itnim_offline(struct bfa_itnim_s *itnim)
  1228. {
  1229. bfa_stats(itnim, offlines);
  1230. bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
  1231. }
  1232. /*
  1233. * Return true if itnim is considered offline for holding off IO request.
  1234. * IO is not held if itnim is being deleted.
  1235. */
  1236. bfa_boolean_t
  1237. bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
  1238. {
  1239. return itnim->fcpim->path_tov && itnim->iotov_active &&
  1240. (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
  1241. bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
  1242. bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
  1243. bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
  1244. bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
  1245. bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
  1246. }
  1247. #define bfa_io_lat_clock_res_div HZ
  1248. #define bfa_io_lat_clock_res_mul 1000
  1249. bfa_status_t
  1250. bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
  1251. struct bfa_itnim_ioprofile_s *ioprofile)
  1252. {
  1253. struct bfa_fcpim_s *fcpim;
  1254. if (!itnim)
  1255. return BFA_STATUS_NO_FCPIM_NEXUS;
  1256. fcpim = BFA_FCPIM(itnim->bfa);
  1257. if (!fcpim->io_profile)
  1258. return BFA_STATUS_IOPROFILE_OFF;
  1259. itnim->ioprofile.index = BFA_IOBUCKET_MAX;
  1260. /* unsigned 32-bit time_t overflow here in y2106 */
  1261. itnim->ioprofile.io_profile_start_time =
  1262. bfa_io_profile_start_time(itnim->bfa);
  1263. itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
  1264. itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
  1265. *ioprofile = itnim->ioprofile;
  1266. return BFA_STATUS_OK;
  1267. }
  1268. void
  1269. bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
  1270. {
  1271. int j;
  1272. if (!itnim)
  1273. return;
  1274. memset(&itnim->stats, 0, sizeof(itnim->stats));
  1275. memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
  1276. for (j = 0; j < BFA_IOBUCKET_MAX; j++)
  1277. itnim->ioprofile.io_latency.min[j] = ~0;
  1278. }
  1279. /*
  1280. * BFA IO module state machine functions
  1281. */
  1282. /*
  1283. * IO is not started (unallocated).
  1284. */
  1285. static void
  1286. bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1287. {
  1288. switch (event) {
  1289. case BFA_IOIM_SM_START:
  1290. if (!bfa_itnim_is_online(ioim->itnim)) {
  1291. if (!bfa_itnim_hold_io(ioim->itnim)) {
  1292. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1293. list_del(&ioim->qe);
  1294. list_add_tail(&ioim->qe,
  1295. &ioim->fcpim->ioim_comp_q);
  1296. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1297. __bfa_cb_ioim_pathtov, ioim);
  1298. } else {
  1299. list_del(&ioim->qe);
  1300. list_add_tail(&ioim->qe,
  1301. &ioim->itnim->pending_q);
  1302. }
  1303. break;
  1304. }
  1305. if (ioim->nsges > BFI_SGE_INLINE) {
  1306. if (!bfa_ioim_sgpg_alloc(ioim)) {
  1307. bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
  1308. return;
  1309. }
  1310. }
  1311. if (!bfa_ioim_send_ioreq(ioim)) {
  1312. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1313. break;
  1314. }
  1315. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1316. break;
  1317. case BFA_IOIM_SM_IOTOV:
  1318. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1319. bfa_ioim_move_to_comp_q(ioim);
  1320. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1321. __bfa_cb_ioim_pathtov, ioim);
  1322. break;
  1323. case BFA_IOIM_SM_ABORT:
  1324. /*
  1325. * IO in pending queue can get abort requests. Complete abort
  1326. * requests immediately.
  1327. */
  1328. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1329. WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
  1330. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1331. __bfa_cb_ioim_abort, ioim);
  1332. break;
  1333. default:
  1334. bfa_sm_fault(ioim->bfa, event);
  1335. }
  1336. }
  1337. /*
  1338. * IO is waiting for SG pages.
  1339. */
  1340. static void
  1341. bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1342. {
  1343. bfa_trc(ioim->bfa, ioim->iotag);
  1344. bfa_trc(ioim->bfa, event);
  1345. switch (event) {
  1346. case BFA_IOIM_SM_SGALLOCED:
  1347. if (!bfa_ioim_send_ioreq(ioim)) {
  1348. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1349. break;
  1350. }
  1351. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1352. break;
  1353. case BFA_IOIM_SM_CLEANUP:
  1354. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1355. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1356. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1357. ioim);
  1358. bfa_ioim_notify_cleanup(ioim);
  1359. break;
  1360. case BFA_IOIM_SM_ABORT:
  1361. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1362. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1363. bfa_ioim_move_to_comp_q(ioim);
  1364. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1365. ioim);
  1366. break;
  1367. case BFA_IOIM_SM_HWFAIL:
  1368. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1369. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1370. bfa_ioim_move_to_comp_q(ioim);
  1371. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1372. ioim);
  1373. break;
  1374. default:
  1375. bfa_sm_fault(ioim->bfa, event);
  1376. }
  1377. }
  1378. /*
  1379. * IO is active.
  1380. */
  1381. static void
  1382. bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1383. {
  1384. switch (event) {
  1385. case BFA_IOIM_SM_COMP_GOOD:
  1386. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1387. bfa_ioim_move_to_comp_q(ioim);
  1388. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1389. __bfa_cb_ioim_good_comp, ioim);
  1390. break;
  1391. case BFA_IOIM_SM_COMP:
  1392. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1393. bfa_ioim_move_to_comp_q(ioim);
  1394. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  1395. ioim);
  1396. break;
  1397. case BFA_IOIM_SM_DONE:
  1398. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1399. bfa_ioim_move_to_comp_q(ioim);
  1400. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  1401. ioim);
  1402. break;
  1403. case BFA_IOIM_SM_ABORT:
  1404. ioim->iosp->abort_explicit = BFA_TRUE;
  1405. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1406. if (bfa_ioim_send_abort(ioim))
  1407. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  1408. else {
  1409. bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
  1410. bfa_stats(ioim->itnim, qwait);
  1411. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1412. &ioim->iosp->reqq_wait);
  1413. }
  1414. break;
  1415. case BFA_IOIM_SM_CLEANUP:
  1416. ioim->iosp->abort_explicit = BFA_FALSE;
  1417. ioim->io_cbfn = __bfa_cb_ioim_failed;
  1418. if (bfa_ioim_send_abort(ioim))
  1419. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1420. else {
  1421. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1422. bfa_stats(ioim->itnim, qwait);
  1423. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1424. &ioim->iosp->reqq_wait);
  1425. }
  1426. break;
  1427. case BFA_IOIM_SM_HWFAIL:
  1428. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1429. bfa_ioim_move_to_comp_q(ioim);
  1430. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1431. ioim);
  1432. break;
  1433. case BFA_IOIM_SM_SQRETRY:
  1434. if (bfa_ioim_maxretry_reached(ioim)) {
  1435. /* max retry reached, free IO */
  1436. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1437. bfa_ioim_move_to_comp_q(ioim);
  1438. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1439. __bfa_cb_ioim_failed, ioim);
  1440. break;
  1441. }
  1442. /* waiting for IO tag resource free */
  1443. bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
  1444. break;
  1445. default:
  1446. bfa_sm_fault(ioim->bfa, event);
  1447. }
  1448. }
  1449. /*
  1450. * IO is retried with new tag.
  1451. */
  1452. static void
  1453. bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1454. {
  1455. switch (event) {
  1456. case BFA_IOIM_SM_FREE:
  1457. /* abts and rrq done. Now retry the IO with new tag */
  1458. bfa_ioim_update_iotag(ioim);
  1459. if (!bfa_ioim_send_ioreq(ioim)) {
  1460. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1461. break;
  1462. }
  1463. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1464. break;
  1465. case BFA_IOIM_SM_CLEANUP:
  1466. ioim->iosp->abort_explicit = BFA_FALSE;
  1467. ioim->io_cbfn = __bfa_cb_ioim_failed;
  1468. if (bfa_ioim_send_abort(ioim))
  1469. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1470. else {
  1471. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1472. bfa_stats(ioim->itnim, qwait);
  1473. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1474. &ioim->iosp->reqq_wait);
  1475. }
  1476. break;
  1477. case BFA_IOIM_SM_HWFAIL:
  1478. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1479. bfa_ioim_move_to_comp_q(ioim);
  1480. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1481. __bfa_cb_ioim_failed, ioim);
  1482. break;
  1483. case BFA_IOIM_SM_ABORT:
  1484. /* in this state IO abort is done.
  1485. * Waiting for IO tag resource free.
  1486. */
  1487. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1488. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1489. ioim);
  1490. break;
  1491. default:
  1492. bfa_sm_fault(ioim->bfa, event);
  1493. }
  1494. }
  1495. /*
  1496. * IO is being aborted, waiting for completion from firmware.
  1497. */
  1498. static void
  1499. bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1500. {
  1501. bfa_trc(ioim->bfa, ioim->iotag);
  1502. bfa_trc(ioim->bfa, event);
  1503. switch (event) {
  1504. case BFA_IOIM_SM_COMP_GOOD:
  1505. case BFA_IOIM_SM_COMP:
  1506. case BFA_IOIM_SM_DONE:
  1507. case BFA_IOIM_SM_FREE:
  1508. break;
  1509. case BFA_IOIM_SM_ABORT_DONE:
  1510. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1511. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1512. ioim);
  1513. break;
  1514. case BFA_IOIM_SM_ABORT_COMP:
  1515. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1516. bfa_ioim_move_to_comp_q(ioim);
  1517. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1518. ioim);
  1519. break;
  1520. case BFA_IOIM_SM_COMP_UTAG:
  1521. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1522. bfa_ioim_move_to_comp_q(ioim);
  1523. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1524. ioim);
  1525. break;
  1526. case BFA_IOIM_SM_CLEANUP:
  1527. WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
  1528. ioim->iosp->abort_explicit = BFA_FALSE;
  1529. if (bfa_ioim_send_abort(ioim))
  1530. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1531. else {
  1532. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1533. bfa_stats(ioim->itnim, qwait);
  1534. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1535. &ioim->iosp->reqq_wait);
  1536. }
  1537. break;
  1538. case BFA_IOIM_SM_HWFAIL:
  1539. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1540. bfa_ioim_move_to_comp_q(ioim);
  1541. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1542. ioim);
  1543. break;
  1544. default:
  1545. bfa_sm_fault(ioim->bfa, event);
  1546. }
  1547. }
  1548. /*
  1549. * IO is being cleaned up (implicit abort), waiting for completion from
  1550. * firmware.
  1551. */
  1552. static void
  1553. bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1554. {
  1555. bfa_trc(ioim->bfa, ioim->iotag);
  1556. bfa_trc(ioim->bfa, event);
  1557. switch (event) {
  1558. case BFA_IOIM_SM_COMP_GOOD:
  1559. case BFA_IOIM_SM_COMP:
  1560. case BFA_IOIM_SM_DONE:
  1561. case BFA_IOIM_SM_FREE:
  1562. break;
  1563. case BFA_IOIM_SM_ABORT:
  1564. /*
  1565. * IO is already being aborted implicitly
  1566. */
  1567. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1568. break;
  1569. case BFA_IOIM_SM_ABORT_DONE:
  1570. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1571. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1572. bfa_ioim_notify_cleanup(ioim);
  1573. break;
  1574. case BFA_IOIM_SM_ABORT_COMP:
  1575. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1576. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1577. bfa_ioim_notify_cleanup(ioim);
  1578. break;
  1579. case BFA_IOIM_SM_COMP_UTAG:
  1580. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1581. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1582. bfa_ioim_notify_cleanup(ioim);
  1583. break;
  1584. case BFA_IOIM_SM_HWFAIL:
  1585. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1586. bfa_ioim_move_to_comp_q(ioim);
  1587. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1588. ioim);
  1589. break;
  1590. case BFA_IOIM_SM_CLEANUP:
  1591. /*
  1592. * IO can be in cleanup state already due to TM command.
  1593. * 2nd cleanup request comes from ITN offline event.
  1594. */
  1595. break;
  1596. default:
  1597. bfa_sm_fault(ioim->bfa, event);
  1598. }
  1599. }
  1600. /*
  1601. * IO is waiting for room in request CQ
  1602. */
  1603. static void
  1604. bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1605. {
  1606. bfa_trc(ioim->bfa, ioim->iotag);
  1607. bfa_trc(ioim->bfa, event);
  1608. switch (event) {
  1609. case BFA_IOIM_SM_QRESUME:
  1610. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1611. bfa_ioim_send_ioreq(ioim);
  1612. break;
  1613. case BFA_IOIM_SM_ABORT:
  1614. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1615. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1616. bfa_ioim_move_to_comp_q(ioim);
  1617. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1618. ioim);
  1619. break;
  1620. case BFA_IOIM_SM_CLEANUP:
  1621. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1622. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1623. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1624. ioim);
  1625. bfa_ioim_notify_cleanup(ioim);
  1626. break;
  1627. case BFA_IOIM_SM_HWFAIL:
  1628. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1629. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1630. bfa_ioim_move_to_comp_q(ioim);
  1631. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1632. ioim);
  1633. break;
  1634. default:
  1635. bfa_sm_fault(ioim->bfa, event);
  1636. }
  1637. }
  1638. /*
  1639. * Active IO is being aborted, waiting for room in request CQ.
  1640. */
  1641. static void
  1642. bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1643. {
  1644. bfa_trc(ioim->bfa, ioim->iotag);
  1645. bfa_trc(ioim->bfa, event);
  1646. switch (event) {
  1647. case BFA_IOIM_SM_QRESUME:
  1648. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  1649. bfa_ioim_send_abort(ioim);
  1650. break;
  1651. case BFA_IOIM_SM_CLEANUP:
  1652. WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
  1653. ioim->iosp->abort_explicit = BFA_FALSE;
  1654. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1655. break;
  1656. case BFA_IOIM_SM_COMP_GOOD:
  1657. case BFA_IOIM_SM_COMP:
  1658. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1659. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1660. bfa_ioim_move_to_comp_q(ioim);
  1661. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1662. ioim);
  1663. break;
  1664. case BFA_IOIM_SM_DONE:
  1665. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1666. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1667. bfa_ioim_move_to_comp_q(ioim);
  1668. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1669. ioim);
  1670. break;
  1671. case BFA_IOIM_SM_HWFAIL:
  1672. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1673. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1674. bfa_ioim_move_to_comp_q(ioim);
  1675. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1676. ioim);
  1677. break;
  1678. default:
  1679. bfa_sm_fault(ioim->bfa, event);
  1680. }
  1681. }
  1682. /*
  1683. * Active IO is being cleaned up, waiting for room in request CQ.
  1684. */
  1685. static void
  1686. bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1687. {
  1688. bfa_trc(ioim->bfa, ioim->iotag);
  1689. bfa_trc(ioim->bfa, event);
  1690. switch (event) {
  1691. case BFA_IOIM_SM_QRESUME:
  1692. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1693. bfa_ioim_send_abort(ioim);
  1694. break;
  1695. case BFA_IOIM_SM_ABORT:
  1696. /*
  1697. * IO is already being cleaned up implicitly
  1698. */
  1699. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1700. break;
  1701. case BFA_IOIM_SM_COMP_GOOD:
  1702. case BFA_IOIM_SM_COMP:
  1703. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1704. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1705. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1706. bfa_ioim_notify_cleanup(ioim);
  1707. break;
  1708. case BFA_IOIM_SM_DONE:
  1709. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1710. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1711. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1712. bfa_ioim_notify_cleanup(ioim);
  1713. break;
  1714. case BFA_IOIM_SM_HWFAIL:
  1715. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1716. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1717. bfa_ioim_move_to_comp_q(ioim);
  1718. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1719. ioim);
  1720. break;
  1721. default:
  1722. bfa_sm_fault(ioim->bfa, event);
  1723. }
  1724. }
  1725. /*
  1726. * IO bfa callback is pending.
  1727. */
  1728. static void
  1729. bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1730. {
  1731. switch (event) {
  1732. case BFA_IOIM_SM_HCB:
  1733. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  1734. bfa_ioim_free(ioim);
  1735. break;
  1736. case BFA_IOIM_SM_CLEANUP:
  1737. bfa_ioim_notify_cleanup(ioim);
  1738. break;
  1739. case BFA_IOIM_SM_HWFAIL:
  1740. break;
  1741. default:
  1742. bfa_sm_fault(ioim->bfa, event);
  1743. }
  1744. }
  1745. /*
  1746. * IO bfa callback is pending. IO resource cannot be freed.
  1747. */
  1748. static void
  1749. bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1750. {
  1751. bfa_trc(ioim->bfa, ioim->iotag);
  1752. bfa_trc(ioim->bfa, event);
  1753. switch (event) {
  1754. case BFA_IOIM_SM_HCB:
  1755. bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
  1756. list_del(&ioim->qe);
  1757. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
  1758. break;
  1759. case BFA_IOIM_SM_FREE:
  1760. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1761. break;
  1762. case BFA_IOIM_SM_CLEANUP:
  1763. bfa_ioim_notify_cleanup(ioim);
  1764. break;
  1765. case BFA_IOIM_SM_HWFAIL:
  1766. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1767. break;
  1768. default:
  1769. bfa_sm_fault(ioim->bfa, event);
  1770. }
  1771. }
  1772. /*
  1773. * IO is completed, waiting resource free from firmware.
  1774. */
  1775. static void
  1776. bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1777. {
  1778. bfa_trc(ioim->bfa, ioim->iotag);
  1779. bfa_trc(ioim->bfa, event);
  1780. switch (event) {
  1781. case BFA_IOIM_SM_FREE:
  1782. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  1783. bfa_ioim_free(ioim);
  1784. break;
  1785. case BFA_IOIM_SM_CLEANUP:
  1786. bfa_ioim_notify_cleanup(ioim);
  1787. break;
  1788. case BFA_IOIM_SM_HWFAIL:
  1789. break;
  1790. default:
  1791. bfa_sm_fault(ioim->bfa, event);
  1792. }
  1793. }
  1794. /*
  1795. * This is called from bfa_fcpim_start after the bfa_init() with flash read
  1796. * is complete by driver. now invalidate the stale content of lun mask
  1797. * like unit attention, rp tag and lp tag.
  1798. */
  1799. void
  1800. bfa_ioim_lm_init(struct bfa_s *bfa)
  1801. {
  1802. struct bfa_lun_mask_s *lunm_list;
  1803. int i;
  1804. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1805. return;
  1806. lunm_list = bfa_get_lun_mask_list(bfa);
  1807. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1808. lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
  1809. lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
  1810. lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
  1811. }
  1812. }
  1813. static void
  1814. __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
  1815. {
  1816. struct bfa_ioim_s *ioim = cbarg;
  1817. if (!complete) {
  1818. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  1819. return;
  1820. }
  1821. bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
  1822. }
  1823. static void
  1824. __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
  1825. {
  1826. struct bfa_ioim_s *ioim = cbarg;
  1827. struct bfi_ioim_rsp_s *m;
  1828. u8 *snsinfo = NULL;
  1829. u8 sns_len = 0;
  1830. s32 residue = 0;
  1831. if (!complete) {
  1832. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  1833. return;
  1834. }
  1835. m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
  1836. if (m->io_status == BFI_IOIM_STS_OK) {
  1837. /*
  1838. * setup sense information, if present
  1839. */
  1840. if ((m->scsi_status == SAM_STAT_CHECK_CONDITION) &&
  1841. m->sns_len) {
  1842. sns_len = m->sns_len;
  1843. snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
  1844. ioim->iotag);
  1845. }
  1846. /*
  1847. * setup residue value correctly for normal completions
  1848. */
  1849. if (m->resid_flags == FCP_RESID_UNDER) {
  1850. residue = be32_to_cpu(m->residue);
  1851. bfa_stats(ioim->itnim, iocomp_underrun);
  1852. }
  1853. if (m->resid_flags == FCP_RESID_OVER) {
  1854. residue = be32_to_cpu(m->residue);
  1855. residue = -residue;
  1856. bfa_stats(ioim->itnim, iocomp_overrun);
  1857. }
  1858. }
  1859. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
  1860. m->scsi_status, sns_len, snsinfo, residue);
  1861. }
  1862. void
  1863. bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
  1864. u16 rp_tag, u8 lp_tag)
  1865. {
  1866. struct bfa_lun_mask_s *lun_list;
  1867. u8 i;
  1868. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1869. return;
  1870. lun_list = bfa_get_lun_mask_list(bfa);
  1871. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1872. if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
  1873. if ((lun_list[i].lp_wwn == lp_wwn) &&
  1874. (lun_list[i].rp_wwn == rp_wwn)) {
  1875. lun_list[i].rp_tag = rp_tag;
  1876. lun_list[i].lp_tag = lp_tag;
  1877. }
  1878. }
  1879. }
  1880. }
  1881. /*
  1882. * set UA for all active luns in LM DB
  1883. */
  1884. static void
  1885. bfa_ioim_lm_set_ua(struct bfa_s *bfa)
  1886. {
  1887. struct bfa_lun_mask_s *lunm_list;
  1888. int i;
  1889. lunm_list = bfa_get_lun_mask_list(bfa);
  1890. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1891. if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  1892. continue;
  1893. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  1894. }
  1895. }
  1896. bfa_status_t
  1897. bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
  1898. {
  1899. struct bfa_lunmask_cfg_s *lun_mask;
  1900. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1901. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1902. return BFA_STATUS_FAILED;
  1903. if (bfa_get_lun_mask_status(bfa) == update)
  1904. return BFA_STATUS_NO_CHANGE;
  1905. lun_mask = bfa_get_lun_mask(bfa);
  1906. lun_mask->status = update;
  1907. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
  1908. bfa_ioim_lm_set_ua(bfa);
  1909. return bfa_dconf_update(bfa);
  1910. }
  1911. bfa_status_t
  1912. bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
  1913. {
  1914. int i;
  1915. struct bfa_lun_mask_s *lunm_list;
  1916. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1917. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1918. return BFA_STATUS_FAILED;
  1919. lunm_list = bfa_get_lun_mask_list(bfa);
  1920. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1921. if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
  1922. if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
  1923. bfa_rport_unset_lunmask(bfa,
  1924. BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
  1925. }
  1926. }
  1927. memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
  1928. return bfa_dconf_update(bfa);
  1929. }
  1930. bfa_status_t
  1931. bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
  1932. {
  1933. struct bfa_lunmask_cfg_s *lun_mask;
  1934. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1935. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1936. return BFA_STATUS_FAILED;
  1937. lun_mask = bfa_get_lun_mask(bfa);
  1938. memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
  1939. return BFA_STATUS_OK;
  1940. }
  1941. bfa_status_t
  1942. bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
  1943. wwn_t rpwwn, struct scsi_lun lun)
  1944. {
  1945. struct bfa_lun_mask_s *lunm_list;
  1946. struct bfa_rport_s *rp = NULL;
  1947. int i, free_index = MAX_LUN_MASK_CFG + 1;
  1948. struct bfa_fcs_lport_s *port = NULL;
  1949. struct bfa_fcs_rport_s *rp_fcs;
  1950. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1951. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1952. return BFA_STATUS_FAILED;
  1953. port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
  1954. vf_id, *pwwn);
  1955. if (port) {
  1956. *pwwn = port->port_cfg.pwwn;
  1957. rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
  1958. if (rp_fcs)
  1959. rp = rp_fcs->bfa_rport;
  1960. }
  1961. lunm_list = bfa_get_lun_mask_list(bfa);
  1962. /* if entry exists */
  1963. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1964. if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  1965. free_index = i;
  1966. if ((lunm_list[i].lp_wwn == *pwwn) &&
  1967. (lunm_list[i].rp_wwn == rpwwn) &&
  1968. (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
  1969. scsilun_to_int((struct scsi_lun *)&lun)))
  1970. return BFA_STATUS_ENTRY_EXISTS;
  1971. }
  1972. if (free_index > MAX_LUN_MASK_CFG)
  1973. return BFA_STATUS_MAX_ENTRY_REACHED;
  1974. if (rp) {
  1975. lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
  1976. rp->rport_info.local_pid);
  1977. lunm_list[free_index].rp_tag = rp->rport_tag;
  1978. } else {
  1979. lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
  1980. lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
  1981. }
  1982. lunm_list[free_index].lp_wwn = *pwwn;
  1983. lunm_list[free_index].rp_wwn = rpwwn;
  1984. lunm_list[free_index].lun = lun;
  1985. lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
  1986. /* set for all luns in this rp */
  1987. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1988. if ((lunm_list[i].lp_wwn == *pwwn) &&
  1989. (lunm_list[i].rp_wwn == rpwwn))
  1990. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  1991. }
  1992. return bfa_dconf_update(bfa);
  1993. }
  1994. bfa_status_t
  1995. bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
  1996. wwn_t rpwwn, struct scsi_lun lun)
  1997. {
  1998. struct bfa_lun_mask_s *lunm_list;
  1999. struct bfa_fcs_lport_s *port = NULL;
  2000. int i;
  2001. /* in min cfg lunm_list could be NULL but no commands should run. */
  2002. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2003. return BFA_STATUS_FAILED;
  2004. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  2005. bfa_trc(bfa, *pwwn);
  2006. bfa_trc(bfa, rpwwn);
  2007. bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
  2008. if (*pwwn == 0) {
  2009. port = bfa_fcs_lookup_port(
  2010. &((struct bfad_s *)bfa->bfad)->bfa_fcs,
  2011. vf_id, *pwwn);
  2012. if (port)
  2013. *pwwn = port->port_cfg.pwwn;
  2014. }
  2015. lunm_list = bfa_get_lun_mask_list(bfa);
  2016. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2017. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2018. (lunm_list[i].rp_wwn == rpwwn) &&
  2019. (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
  2020. scsilun_to_int((struct scsi_lun *)&lun))) {
  2021. lunm_list[i].lp_wwn = 0;
  2022. lunm_list[i].rp_wwn = 0;
  2023. int_to_scsilun(0, &lunm_list[i].lun);
  2024. lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
  2025. if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
  2026. lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
  2027. lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
  2028. }
  2029. return bfa_dconf_update(bfa);
  2030. }
  2031. }
  2032. /* set for all luns in this rp */
  2033. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2034. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2035. (lunm_list[i].rp_wwn == rpwwn))
  2036. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  2037. }
  2038. return BFA_STATUS_ENTRY_NOT_EXISTS;
  2039. }
  2040. static void
  2041. __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
  2042. {
  2043. struct bfa_ioim_s *ioim = cbarg;
  2044. if (!complete) {
  2045. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2046. return;
  2047. }
  2048. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
  2049. 0, 0, NULL, 0);
  2050. }
  2051. static void
  2052. __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
  2053. {
  2054. struct bfa_ioim_s *ioim = cbarg;
  2055. bfa_stats(ioim->itnim, path_tov_expired);
  2056. if (!complete) {
  2057. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2058. return;
  2059. }
  2060. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
  2061. 0, 0, NULL, 0);
  2062. }
  2063. static void
  2064. __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
  2065. {
  2066. struct bfa_ioim_s *ioim = cbarg;
  2067. if (!complete) {
  2068. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2069. return;
  2070. }
  2071. bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
  2072. }
  2073. static void
  2074. bfa_ioim_sgpg_alloced(void *cbarg)
  2075. {
  2076. struct bfa_ioim_s *ioim = cbarg;
  2077. ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  2078. list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
  2079. ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  2080. bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
  2081. }
  2082. /*
  2083. * Send I/O request to firmware.
  2084. */
  2085. static bfa_boolean_t
  2086. bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
  2087. {
  2088. struct bfa_itnim_s *itnim = ioim->itnim;
  2089. struct bfi_ioim_req_s *m;
  2090. static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
  2091. struct bfi_sge_s *sge, *sgpge;
  2092. u32 pgdlen = 0;
  2093. u32 fcp_dl;
  2094. u64 addr;
  2095. struct scatterlist *sg;
  2096. struct bfa_sgpg_s *sgpg;
  2097. struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
  2098. u32 i, sge_id, pgcumsz;
  2099. enum dma_data_direction dmadir;
  2100. /*
  2101. * check for room in queue to send request now
  2102. */
  2103. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  2104. if (!m) {
  2105. bfa_stats(ioim->itnim, qwait);
  2106. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  2107. &ioim->iosp->reqq_wait);
  2108. return BFA_FALSE;
  2109. }
  2110. /*
  2111. * build i/o request message next
  2112. */
  2113. m->io_tag = cpu_to_be16(ioim->iotag);
  2114. m->rport_hdl = ioim->itnim->rport->fw_handle;
  2115. m->io_timeout = 0;
  2116. sge = &m->sges[0];
  2117. sgpg = ioim->sgpg;
  2118. sge_id = 0;
  2119. sgpge = NULL;
  2120. pgcumsz = 0;
  2121. scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
  2122. if (i == 0) {
  2123. /* build inline IO SG element */
  2124. addr = bfa_sgaddr_le(sg_dma_address(sg));
  2125. sge->sga = *(union bfi_addr_u *) &addr;
  2126. pgdlen = sg_dma_len(sg);
  2127. sge->sg_len = pgdlen;
  2128. sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
  2129. BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
  2130. bfa_sge_to_be(sge);
  2131. sge++;
  2132. } else {
  2133. if (sge_id == 0)
  2134. sgpge = sgpg->sgpg->sges;
  2135. addr = bfa_sgaddr_le(sg_dma_address(sg));
  2136. sgpge->sga = *(union bfi_addr_u *) &addr;
  2137. sgpge->sg_len = sg_dma_len(sg);
  2138. pgcumsz += sgpge->sg_len;
  2139. /* set flags */
  2140. if (i < (ioim->nsges - 1) &&
  2141. sge_id < (BFI_SGPG_DATA_SGES - 1))
  2142. sgpge->flags = BFI_SGE_DATA;
  2143. else if (i < (ioim->nsges - 1))
  2144. sgpge->flags = BFI_SGE_DATA_CPL;
  2145. else
  2146. sgpge->flags = BFI_SGE_DATA_LAST;
  2147. bfa_sge_to_le(sgpge);
  2148. sgpge++;
  2149. if (i == (ioim->nsges - 1)) {
  2150. sgpge->flags = BFI_SGE_PGDLEN;
  2151. sgpge->sga.a32.addr_lo = 0;
  2152. sgpge->sga.a32.addr_hi = 0;
  2153. sgpge->sg_len = pgcumsz;
  2154. bfa_sge_to_le(sgpge);
  2155. } else if (++sge_id == BFI_SGPG_DATA_SGES) {
  2156. sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
  2157. sgpge->flags = BFI_SGE_LINK;
  2158. sgpge->sga = sgpg->sgpg_pa;
  2159. sgpge->sg_len = pgcumsz;
  2160. bfa_sge_to_le(sgpge);
  2161. sge_id = 0;
  2162. pgcumsz = 0;
  2163. }
  2164. }
  2165. }
  2166. if (ioim->nsges > BFI_SGE_INLINE) {
  2167. sge->sga = ioim->sgpg->sgpg_pa;
  2168. } else {
  2169. sge->sga.a32.addr_lo = 0;
  2170. sge->sga.a32.addr_hi = 0;
  2171. }
  2172. sge->sg_len = pgdlen;
  2173. sge->flags = BFI_SGE_PGDLEN;
  2174. bfa_sge_to_be(sge);
  2175. /*
  2176. * set up I/O command parameters
  2177. */
  2178. m->cmnd = cmnd_z0;
  2179. int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
  2180. dmadir = cmnd->sc_data_direction;
  2181. if (dmadir == DMA_TO_DEVICE)
  2182. m->cmnd.iodir = FCP_IODIR_WRITE;
  2183. else if (dmadir == DMA_FROM_DEVICE)
  2184. m->cmnd.iodir = FCP_IODIR_READ;
  2185. else
  2186. m->cmnd.iodir = FCP_IODIR_NONE;
  2187. m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
  2188. fcp_dl = scsi_bufflen(cmnd);
  2189. m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
  2190. /*
  2191. * set up I/O message header
  2192. */
  2193. switch (m->cmnd.iodir) {
  2194. case FCP_IODIR_READ:
  2195. bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
  2196. bfa_stats(itnim, input_reqs);
  2197. ioim->itnim->stats.rd_throughput += fcp_dl;
  2198. break;
  2199. case FCP_IODIR_WRITE:
  2200. bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
  2201. bfa_stats(itnim, output_reqs);
  2202. ioim->itnim->stats.wr_throughput += fcp_dl;
  2203. break;
  2204. case FCP_IODIR_RW:
  2205. bfa_stats(itnim, input_reqs);
  2206. bfa_stats(itnim, output_reqs);
  2207. fallthrough;
  2208. default:
  2209. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
  2210. }
  2211. if (itnim->seq_rec ||
  2212. (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
  2213. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
  2214. /*
  2215. * queue I/O message to firmware
  2216. */
  2217. bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
  2218. return BFA_TRUE;
  2219. }
  2220. /*
  2221. * Setup any additional SG pages needed.Inline SG element is setup
  2222. * at queuing time.
  2223. */
  2224. static bfa_boolean_t
  2225. bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
  2226. {
  2227. u16 nsgpgs;
  2228. WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
  2229. /*
  2230. * allocate SG pages needed
  2231. */
  2232. nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  2233. if (!nsgpgs)
  2234. return BFA_TRUE;
  2235. if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
  2236. != BFA_STATUS_OK) {
  2237. bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
  2238. return BFA_FALSE;
  2239. }
  2240. ioim->nsgpgs = nsgpgs;
  2241. ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  2242. return BFA_TRUE;
  2243. }
  2244. /*
  2245. * Send I/O abort request to firmware.
  2246. */
  2247. static bfa_boolean_t
  2248. bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
  2249. {
  2250. struct bfi_ioim_abort_req_s *m;
  2251. enum bfi_ioim_h2i msgop;
  2252. /*
  2253. * check for room in queue to send request now
  2254. */
  2255. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  2256. if (!m)
  2257. return BFA_FALSE;
  2258. /*
  2259. * build i/o request message next
  2260. */
  2261. if (ioim->iosp->abort_explicit)
  2262. msgop = BFI_IOIM_H2I_IOABORT_REQ;
  2263. else
  2264. msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
  2265. bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
  2266. m->io_tag = cpu_to_be16(ioim->iotag);
  2267. m->abort_tag = ++ioim->abort_tag;
  2268. /*
  2269. * queue I/O message to firmware
  2270. */
  2271. bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
  2272. return BFA_TRUE;
  2273. }
  2274. /*
  2275. * Call to resume any I/O requests waiting for room in request queue.
  2276. */
  2277. static void
  2278. bfa_ioim_qresume(void *cbarg)
  2279. {
  2280. struct bfa_ioim_s *ioim = cbarg;
  2281. bfa_stats(ioim->itnim, qresumes);
  2282. bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
  2283. }
  2284. static void
  2285. bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
  2286. {
  2287. /*
  2288. * Move IO from itnim queue to fcpim global queue since itnim will be
  2289. * freed.
  2290. */
  2291. list_del(&ioim->qe);
  2292. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2293. if (!ioim->iosp->tskim) {
  2294. if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
  2295. bfa_cb_dequeue(&ioim->hcb_qe);
  2296. list_del(&ioim->qe);
  2297. list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
  2298. }
  2299. bfa_itnim_iodone(ioim->itnim);
  2300. } else
  2301. bfa_wc_down(&ioim->iosp->tskim->wc);
  2302. }
  2303. static bfa_boolean_t
  2304. bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
  2305. {
  2306. if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
  2307. (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
  2308. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
  2309. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
  2310. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
  2311. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
  2312. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
  2313. return BFA_FALSE;
  2314. return BFA_TRUE;
  2315. }
  2316. void
  2317. bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  2318. {
  2319. /*
  2320. * If path tov timer expired, failback with PATHTOV status - these
  2321. * IO requests are not normally retried by IO stack.
  2322. *
  2323. * Otherwise device cameback online and fail it with normal failed
  2324. * status so that IO stack retries these failed IO requests.
  2325. */
  2326. if (iotov)
  2327. ioim->io_cbfn = __bfa_cb_ioim_pathtov;
  2328. else {
  2329. ioim->io_cbfn = __bfa_cb_ioim_failed;
  2330. bfa_stats(ioim->itnim, iocom_nexus_abort);
  2331. }
  2332. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  2333. /*
  2334. * Move IO to fcpim global queue since itnim will be
  2335. * freed.
  2336. */
  2337. list_del(&ioim->qe);
  2338. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2339. }
  2340. /*
  2341. * Memory allocation and initialization.
  2342. */
  2343. void
  2344. bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
  2345. {
  2346. struct bfa_ioim_s *ioim;
  2347. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  2348. struct bfa_ioim_sp_s *iosp;
  2349. u16 i;
  2350. /*
  2351. * claim memory first
  2352. */
  2353. ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
  2354. fcpim->ioim_arr = ioim;
  2355. bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
  2356. iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
  2357. fcpim->ioim_sp_arr = iosp;
  2358. bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
  2359. /*
  2360. * Initialize ioim free queues
  2361. */
  2362. INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
  2363. INIT_LIST_HEAD(&fcpim->ioim_comp_q);
  2364. for (i = 0; i < fcpim->fcp->num_ioim_reqs;
  2365. i++, ioim++, iosp++) {
  2366. /*
  2367. * initialize IOIM
  2368. */
  2369. memset(ioim, 0, sizeof(struct bfa_ioim_s));
  2370. ioim->iotag = i;
  2371. ioim->bfa = fcpim->bfa;
  2372. ioim->fcpim = fcpim;
  2373. ioim->iosp = iosp;
  2374. INIT_LIST_HEAD(&ioim->sgpg_q);
  2375. bfa_reqq_winit(&ioim->iosp->reqq_wait,
  2376. bfa_ioim_qresume, ioim);
  2377. bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
  2378. bfa_ioim_sgpg_alloced, ioim);
  2379. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  2380. }
  2381. }
  2382. void
  2383. bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  2384. {
  2385. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2386. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  2387. struct bfa_ioim_s *ioim;
  2388. u16 iotag;
  2389. enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
  2390. iotag = be16_to_cpu(rsp->io_tag);
  2391. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  2392. WARN_ON(ioim->iotag != iotag);
  2393. bfa_trc(ioim->bfa, ioim->iotag);
  2394. bfa_trc(ioim->bfa, rsp->io_status);
  2395. bfa_trc(ioim->bfa, rsp->reuse_io_tag);
  2396. if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
  2397. ioim->iosp->comp_rspmsg = *m;
  2398. switch (rsp->io_status) {
  2399. case BFI_IOIM_STS_OK:
  2400. bfa_stats(ioim->itnim, iocomp_ok);
  2401. if (rsp->reuse_io_tag == 0)
  2402. evt = BFA_IOIM_SM_DONE;
  2403. else
  2404. evt = BFA_IOIM_SM_COMP;
  2405. break;
  2406. case BFI_IOIM_STS_TIMEDOUT:
  2407. bfa_stats(ioim->itnim, iocomp_timedout);
  2408. fallthrough;
  2409. case BFI_IOIM_STS_ABORTED:
  2410. rsp->io_status = BFI_IOIM_STS_ABORTED;
  2411. bfa_stats(ioim->itnim, iocomp_aborted);
  2412. if (rsp->reuse_io_tag == 0)
  2413. evt = BFA_IOIM_SM_DONE;
  2414. else
  2415. evt = BFA_IOIM_SM_COMP;
  2416. break;
  2417. case BFI_IOIM_STS_PROTO_ERR:
  2418. bfa_stats(ioim->itnim, iocom_proto_err);
  2419. WARN_ON(!rsp->reuse_io_tag);
  2420. evt = BFA_IOIM_SM_COMP;
  2421. break;
  2422. case BFI_IOIM_STS_SQER_NEEDED:
  2423. bfa_stats(ioim->itnim, iocom_sqer_needed);
  2424. WARN_ON(rsp->reuse_io_tag != 0);
  2425. evt = BFA_IOIM_SM_SQRETRY;
  2426. break;
  2427. case BFI_IOIM_STS_RES_FREE:
  2428. bfa_stats(ioim->itnim, iocom_res_free);
  2429. evt = BFA_IOIM_SM_FREE;
  2430. break;
  2431. case BFI_IOIM_STS_HOST_ABORTED:
  2432. bfa_stats(ioim->itnim, iocom_hostabrts);
  2433. if (rsp->abort_tag != ioim->abort_tag) {
  2434. bfa_trc(ioim->bfa, rsp->abort_tag);
  2435. bfa_trc(ioim->bfa, ioim->abort_tag);
  2436. return;
  2437. }
  2438. if (rsp->reuse_io_tag)
  2439. evt = BFA_IOIM_SM_ABORT_COMP;
  2440. else
  2441. evt = BFA_IOIM_SM_ABORT_DONE;
  2442. break;
  2443. case BFI_IOIM_STS_UTAG:
  2444. bfa_stats(ioim->itnim, iocom_utags);
  2445. evt = BFA_IOIM_SM_COMP_UTAG;
  2446. break;
  2447. default:
  2448. WARN_ON(1);
  2449. }
  2450. bfa_sm_send_event(ioim, evt);
  2451. }
  2452. void
  2453. bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  2454. {
  2455. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2456. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  2457. struct bfa_ioim_s *ioim;
  2458. u16 iotag;
  2459. iotag = be16_to_cpu(rsp->io_tag);
  2460. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  2461. WARN_ON(ioim->iotag != iotag);
  2462. bfa_ioim_cb_profile_comp(fcpim, ioim);
  2463. bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
  2464. }
  2465. /*
  2466. * Called by itnim to clean up IO while going offline.
  2467. */
  2468. void
  2469. bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
  2470. {
  2471. bfa_trc(ioim->bfa, ioim->iotag);
  2472. bfa_stats(ioim->itnim, io_cleanups);
  2473. ioim->iosp->tskim = NULL;
  2474. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  2475. }
  2476. void
  2477. bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
  2478. {
  2479. bfa_trc(ioim->bfa, ioim->iotag);
  2480. bfa_stats(ioim->itnim, io_tmaborts);
  2481. ioim->iosp->tskim = tskim;
  2482. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  2483. }
  2484. /*
  2485. * IOC failure handling.
  2486. */
  2487. void
  2488. bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
  2489. {
  2490. bfa_trc(ioim->bfa, ioim->iotag);
  2491. bfa_stats(ioim->itnim, io_iocdowns);
  2492. bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
  2493. }
  2494. /*
  2495. * IO offline TOV popped. Fail the pending IO.
  2496. */
  2497. void
  2498. bfa_ioim_tov(struct bfa_ioim_s *ioim)
  2499. {
  2500. bfa_trc(ioim->bfa, ioim->iotag);
  2501. bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
  2502. }
  2503. /*
  2504. * Allocate IOIM resource for initiator mode I/O request.
  2505. */
  2506. struct bfa_ioim_s *
  2507. bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
  2508. struct bfa_itnim_s *itnim, u16 nsges)
  2509. {
  2510. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2511. struct bfa_ioim_s *ioim;
  2512. struct bfa_iotag_s *iotag = NULL;
  2513. /*
  2514. * alocate IOIM resource
  2515. */
  2516. bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
  2517. if (!iotag) {
  2518. bfa_stats(itnim, no_iotags);
  2519. return NULL;
  2520. }
  2521. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
  2522. ioim->dio = dio;
  2523. ioim->itnim = itnim;
  2524. ioim->nsges = nsges;
  2525. ioim->nsgpgs = 0;
  2526. bfa_stats(itnim, total_ios);
  2527. fcpim->ios_active++;
  2528. list_add_tail(&ioim->qe, &itnim->io_q);
  2529. return ioim;
  2530. }
  2531. void
  2532. bfa_ioim_free(struct bfa_ioim_s *ioim)
  2533. {
  2534. struct bfa_fcpim_s *fcpim = ioim->fcpim;
  2535. struct bfa_iotag_s *iotag;
  2536. if (ioim->nsgpgs > 0)
  2537. bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
  2538. bfa_stats(ioim->itnim, io_comps);
  2539. fcpim->ios_active--;
  2540. ioim->iotag &= BFA_IOIM_IOTAG_MASK;
  2541. WARN_ON(!(ioim->iotag <
  2542. (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
  2543. iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
  2544. if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
  2545. list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
  2546. else
  2547. list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
  2548. list_del(&ioim->qe);
  2549. }
  2550. void
  2551. bfa_ioim_start(struct bfa_ioim_s *ioim)
  2552. {
  2553. bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
  2554. /*
  2555. * Obtain the queue over which this request has to be issued
  2556. */
  2557. ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
  2558. BFA_FALSE : bfa_itnim_get_reqq(ioim);
  2559. bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
  2560. }
  2561. /*
  2562. * Driver I/O abort request.
  2563. */
  2564. bfa_status_t
  2565. bfa_ioim_abort(struct bfa_ioim_s *ioim)
  2566. {
  2567. bfa_trc(ioim->bfa, ioim->iotag);
  2568. if (!bfa_ioim_is_abortable(ioim))
  2569. return BFA_STATUS_FAILED;
  2570. bfa_stats(ioim->itnim, io_aborts);
  2571. bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
  2572. return BFA_STATUS_OK;
  2573. }
  2574. /*
  2575. * BFA TSKIM state machine functions
  2576. */
  2577. /*
  2578. * Task management command beginning state.
  2579. */
  2580. static void
  2581. bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2582. {
  2583. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2584. switch (event) {
  2585. case BFA_TSKIM_SM_START:
  2586. bfa_sm_set_state(tskim, bfa_tskim_sm_active);
  2587. bfa_tskim_gather_ios(tskim);
  2588. /*
  2589. * If device is offline, do not send TM on wire. Just cleanup
  2590. * any pending IO requests and complete TM request.
  2591. */
  2592. if (!bfa_itnim_is_online(tskim->itnim)) {
  2593. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2594. tskim->tsk_status = BFI_TSKIM_STS_OK;
  2595. bfa_tskim_cleanup_ios(tskim);
  2596. return;
  2597. }
  2598. if (!bfa_tskim_send(tskim)) {
  2599. bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
  2600. bfa_stats(tskim->itnim, tm_qwait);
  2601. bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
  2602. &tskim->reqq_wait);
  2603. }
  2604. break;
  2605. default:
  2606. bfa_sm_fault(tskim->bfa, event);
  2607. }
  2608. }
  2609. /*
  2610. * TM command is active, awaiting completion from firmware to
  2611. * cleanup IO requests in TM scope.
  2612. */
  2613. static void
  2614. bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2615. {
  2616. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2617. switch (event) {
  2618. case BFA_TSKIM_SM_DONE:
  2619. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2620. bfa_tskim_cleanup_ios(tskim);
  2621. break;
  2622. case BFA_TSKIM_SM_CLEANUP:
  2623. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
  2624. if (!bfa_tskim_send_abort(tskim)) {
  2625. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
  2626. bfa_stats(tskim->itnim, tm_qwait);
  2627. bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
  2628. &tskim->reqq_wait);
  2629. }
  2630. break;
  2631. case BFA_TSKIM_SM_HWFAIL:
  2632. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2633. bfa_tskim_iocdisable_ios(tskim);
  2634. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2635. break;
  2636. default:
  2637. bfa_sm_fault(tskim->bfa, event);
  2638. }
  2639. }
  2640. /*
  2641. * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
  2642. * completion event from firmware.
  2643. */
  2644. static void
  2645. bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2646. {
  2647. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2648. switch (event) {
  2649. case BFA_TSKIM_SM_DONE:
  2650. /*
  2651. * Ignore and wait for ABORT completion from firmware.
  2652. */
  2653. break;
  2654. case BFA_TSKIM_SM_UTAG:
  2655. case BFA_TSKIM_SM_CLEANUP_DONE:
  2656. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2657. bfa_tskim_cleanup_ios(tskim);
  2658. break;
  2659. case BFA_TSKIM_SM_HWFAIL:
  2660. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2661. bfa_tskim_iocdisable_ios(tskim);
  2662. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2663. break;
  2664. default:
  2665. bfa_sm_fault(tskim->bfa, event);
  2666. }
  2667. }
  2668. static void
  2669. bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2670. {
  2671. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2672. switch (event) {
  2673. case BFA_TSKIM_SM_IOS_DONE:
  2674. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2675. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
  2676. break;
  2677. case BFA_TSKIM_SM_CLEANUP:
  2678. /*
  2679. * Ignore, TM command completed on wire.
  2680. * Notify TM conmpletion on IO cleanup completion.
  2681. */
  2682. break;
  2683. case BFA_TSKIM_SM_HWFAIL:
  2684. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2685. bfa_tskim_iocdisable_ios(tskim);
  2686. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2687. break;
  2688. default:
  2689. bfa_sm_fault(tskim->bfa, event);
  2690. }
  2691. }
  2692. /*
  2693. * Task management command is waiting for room in request CQ
  2694. */
  2695. static void
  2696. bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2697. {
  2698. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2699. switch (event) {
  2700. case BFA_TSKIM_SM_QRESUME:
  2701. bfa_sm_set_state(tskim, bfa_tskim_sm_active);
  2702. bfa_tskim_send(tskim);
  2703. break;
  2704. case BFA_TSKIM_SM_CLEANUP:
  2705. /*
  2706. * No need to send TM on wire since ITN is offline.
  2707. */
  2708. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2709. bfa_reqq_wcancel(&tskim->reqq_wait);
  2710. bfa_tskim_cleanup_ios(tskim);
  2711. break;
  2712. case BFA_TSKIM_SM_HWFAIL:
  2713. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2714. bfa_reqq_wcancel(&tskim->reqq_wait);
  2715. bfa_tskim_iocdisable_ios(tskim);
  2716. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2717. break;
  2718. default:
  2719. bfa_sm_fault(tskim->bfa, event);
  2720. }
  2721. }
  2722. /*
  2723. * Task management command is active, awaiting for room in request CQ
  2724. * to send clean up request.
  2725. */
  2726. static void
  2727. bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
  2728. enum bfa_tskim_event event)
  2729. {
  2730. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2731. switch (event) {
  2732. case BFA_TSKIM_SM_DONE:
  2733. bfa_reqq_wcancel(&tskim->reqq_wait);
  2734. fallthrough;
  2735. case BFA_TSKIM_SM_QRESUME:
  2736. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
  2737. bfa_tskim_send_abort(tskim);
  2738. break;
  2739. case BFA_TSKIM_SM_HWFAIL:
  2740. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2741. bfa_reqq_wcancel(&tskim->reqq_wait);
  2742. bfa_tskim_iocdisable_ios(tskim);
  2743. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2744. break;
  2745. default:
  2746. bfa_sm_fault(tskim->bfa, event);
  2747. }
  2748. }
  2749. /*
  2750. * BFA callback is pending
  2751. */
  2752. static void
  2753. bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2754. {
  2755. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2756. switch (event) {
  2757. case BFA_TSKIM_SM_HCB:
  2758. bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
  2759. bfa_tskim_free(tskim);
  2760. break;
  2761. case BFA_TSKIM_SM_CLEANUP:
  2762. bfa_tskim_notify_comp(tskim);
  2763. break;
  2764. case BFA_TSKIM_SM_HWFAIL:
  2765. break;
  2766. default:
  2767. bfa_sm_fault(tskim->bfa, event);
  2768. }
  2769. }
  2770. static void
  2771. __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
  2772. {
  2773. struct bfa_tskim_s *tskim = cbarg;
  2774. if (!complete) {
  2775. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
  2776. return;
  2777. }
  2778. bfa_stats(tskim->itnim, tm_success);
  2779. bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
  2780. }
  2781. static void
  2782. __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
  2783. {
  2784. struct bfa_tskim_s *tskim = cbarg;
  2785. if (!complete) {
  2786. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
  2787. return;
  2788. }
  2789. bfa_stats(tskim->itnim, tm_failures);
  2790. bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
  2791. BFI_TSKIM_STS_FAILED);
  2792. }
  2793. static bfa_boolean_t
  2794. bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
  2795. {
  2796. switch (tskim->tm_cmnd) {
  2797. case FCP_TM_TARGET_RESET:
  2798. return BFA_TRUE;
  2799. case FCP_TM_ABORT_TASK_SET:
  2800. case FCP_TM_CLEAR_TASK_SET:
  2801. case FCP_TM_LUN_RESET:
  2802. case FCP_TM_CLEAR_ACA:
  2803. return !memcmp(&tskim->lun, &lun, sizeof(lun));
  2804. default:
  2805. WARN_ON(1);
  2806. }
  2807. return BFA_FALSE;
  2808. }
  2809. /*
  2810. * Gather affected IO requests and task management commands.
  2811. */
  2812. static void
  2813. bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
  2814. {
  2815. struct bfa_itnim_s *itnim = tskim->itnim;
  2816. struct bfa_ioim_s *ioim;
  2817. struct list_head *qe, *qen;
  2818. struct scsi_cmnd *cmnd;
  2819. struct scsi_lun scsilun;
  2820. INIT_LIST_HEAD(&tskim->io_q);
  2821. /*
  2822. * Gather any active IO requests first.
  2823. */
  2824. list_for_each_safe(qe, qen, &itnim->io_q) {
  2825. ioim = (struct bfa_ioim_s *) qe;
  2826. cmnd = (struct scsi_cmnd *) ioim->dio;
  2827. int_to_scsilun(cmnd->device->lun, &scsilun);
  2828. if (bfa_tskim_match_scope(tskim, scsilun)) {
  2829. list_del(&ioim->qe);
  2830. list_add_tail(&ioim->qe, &tskim->io_q);
  2831. }
  2832. }
  2833. /*
  2834. * Failback any pending IO requests immediately.
  2835. */
  2836. list_for_each_safe(qe, qen, &itnim->pending_q) {
  2837. ioim = (struct bfa_ioim_s *) qe;
  2838. cmnd = (struct scsi_cmnd *) ioim->dio;
  2839. int_to_scsilun(cmnd->device->lun, &scsilun);
  2840. if (bfa_tskim_match_scope(tskim, scsilun)) {
  2841. list_del(&ioim->qe);
  2842. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2843. bfa_ioim_tov(ioim);
  2844. }
  2845. }
  2846. }
  2847. /*
  2848. * IO cleanup completion
  2849. */
  2850. static void
  2851. bfa_tskim_cleanp_comp(void *tskim_cbarg)
  2852. {
  2853. struct bfa_tskim_s *tskim = tskim_cbarg;
  2854. bfa_stats(tskim->itnim, tm_io_comps);
  2855. bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
  2856. }
  2857. /*
  2858. * Gather affected IO requests and task management commands.
  2859. */
  2860. static void
  2861. bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
  2862. {
  2863. struct bfa_ioim_s *ioim;
  2864. struct list_head *qe, *qen;
  2865. bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
  2866. list_for_each_safe(qe, qen, &tskim->io_q) {
  2867. ioim = (struct bfa_ioim_s *) qe;
  2868. bfa_wc_up(&tskim->wc);
  2869. bfa_ioim_cleanup_tm(ioim, tskim);
  2870. }
  2871. bfa_wc_wait(&tskim->wc);
  2872. }
  2873. /*
  2874. * Send task management request to firmware.
  2875. */
  2876. static bfa_boolean_t
  2877. bfa_tskim_send(struct bfa_tskim_s *tskim)
  2878. {
  2879. struct bfa_itnim_s *itnim = tskim->itnim;
  2880. struct bfi_tskim_req_s *m;
  2881. /*
  2882. * check for room in queue to send request now
  2883. */
  2884. m = bfa_reqq_next(tskim->bfa, itnim->reqq);
  2885. if (!m)
  2886. return BFA_FALSE;
  2887. /*
  2888. * build i/o request message next
  2889. */
  2890. bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
  2891. bfa_fn_lpu(tskim->bfa));
  2892. m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
  2893. m->itn_fhdl = tskim->itnim->rport->fw_handle;
  2894. m->t_secs = tskim->tsecs;
  2895. m->lun = tskim->lun;
  2896. m->tm_flags = tskim->tm_cmnd;
  2897. /*
  2898. * queue I/O message to firmware
  2899. */
  2900. bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
  2901. return BFA_TRUE;
  2902. }
  2903. /*
  2904. * Send abort request to cleanup an active TM to firmware.
  2905. */
  2906. static bfa_boolean_t
  2907. bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
  2908. {
  2909. struct bfa_itnim_s *itnim = tskim->itnim;
  2910. struct bfi_tskim_abortreq_s *m;
  2911. /*
  2912. * check for room in queue to send request now
  2913. */
  2914. m = bfa_reqq_next(tskim->bfa, itnim->reqq);
  2915. if (!m)
  2916. return BFA_FALSE;
  2917. /*
  2918. * build i/o request message next
  2919. */
  2920. bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
  2921. bfa_fn_lpu(tskim->bfa));
  2922. m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
  2923. /*
  2924. * queue I/O message to firmware
  2925. */
  2926. bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
  2927. return BFA_TRUE;
  2928. }
  2929. /*
  2930. * Call to resume task management cmnd waiting for room in request queue.
  2931. */
  2932. static void
  2933. bfa_tskim_qresume(void *cbarg)
  2934. {
  2935. struct bfa_tskim_s *tskim = cbarg;
  2936. bfa_stats(tskim->itnim, tm_qresumes);
  2937. bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
  2938. }
  2939. /*
  2940. * Cleanup IOs associated with a task mangement command on IOC failures.
  2941. */
  2942. static void
  2943. bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
  2944. {
  2945. struct bfa_ioim_s *ioim;
  2946. struct list_head *qe, *qen;
  2947. list_for_each_safe(qe, qen, &tskim->io_q) {
  2948. ioim = (struct bfa_ioim_s *) qe;
  2949. bfa_ioim_iocdisable(ioim);
  2950. }
  2951. }
  2952. /*
  2953. * Notification on completions from related ioim.
  2954. */
  2955. void
  2956. bfa_tskim_iodone(struct bfa_tskim_s *tskim)
  2957. {
  2958. bfa_wc_down(&tskim->wc);
  2959. }
  2960. /*
  2961. * Handle IOC h/w failure notification from itnim.
  2962. */
  2963. void
  2964. bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
  2965. {
  2966. tskim->notify = BFA_FALSE;
  2967. bfa_stats(tskim->itnim, tm_iocdowns);
  2968. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
  2969. }
  2970. /*
  2971. * Cleanup TM command and associated IOs as part of ITNIM offline.
  2972. */
  2973. void
  2974. bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
  2975. {
  2976. tskim->notify = BFA_TRUE;
  2977. bfa_stats(tskim->itnim, tm_cleanups);
  2978. bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
  2979. }
  2980. /*
  2981. * Memory allocation and initialization.
  2982. */
  2983. void
  2984. bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
  2985. {
  2986. struct bfa_tskim_s *tskim;
  2987. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  2988. u16 i;
  2989. INIT_LIST_HEAD(&fcpim->tskim_free_q);
  2990. INIT_LIST_HEAD(&fcpim->tskim_unused_q);
  2991. tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
  2992. fcpim->tskim_arr = tskim;
  2993. for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
  2994. /*
  2995. * initialize TSKIM
  2996. */
  2997. memset(tskim, 0, sizeof(struct bfa_tskim_s));
  2998. tskim->tsk_tag = i;
  2999. tskim->bfa = fcpim->bfa;
  3000. tskim->fcpim = fcpim;
  3001. tskim->notify = BFA_FALSE;
  3002. bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
  3003. tskim);
  3004. bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
  3005. list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
  3006. }
  3007. bfa_mem_kva_curp(fcp) = (u8 *) tskim;
  3008. }
  3009. void
  3010. bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3011. {
  3012. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3013. struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
  3014. struct bfa_tskim_s *tskim;
  3015. u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
  3016. tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
  3017. WARN_ON(tskim->tsk_tag != tsk_tag);
  3018. tskim->tsk_status = rsp->tsk_status;
  3019. /*
  3020. * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
  3021. * requests. All other statuses are for normal completions.
  3022. */
  3023. if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
  3024. bfa_stats(tskim->itnim, tm_cleanup_comps);
  3025. bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
  3026. } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
  3027. bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
  3028. } else {
  3029. bfa_stats(tskim->itnim, tm_fw_rsps);
  3030. bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
  3031. }
  3032. }
  3033. struct bfa_tskim_s *
  3034. bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
  3035. {
  3036. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3037. struct bfa_tskim_s *tskim;
  3038. bfa_q_deq(&fcpim->tskim_free_q, &tskim);
  3039. if (tskim)
  3040. tskim->dtsk = dtsk;
  3041. return tskim;
  3042. }
  3043. void
  3044. bfa_tskim_free(struct bfa_tskim_s *tskim)
  3045. {
  3046. WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
  3047. list_del(&tskim->qe);
  3048. list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
  3049. }
  3050. /*
  3051. * Start a task management command.
  3052. *
  3053. * @param[in] tskim BFA task management command instance
  3054. * @param[in] itnim i-t nexus for the task management command
  3055. * @param[in] lun lun, if applicable
  3056. * @param[in] tm_cmnd Task management command code.
  3057. * @param[in] t_secs Timeout in seconds
  3058. *
  3059. * @return None.
  3060. */
  3061. void
  3062. bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
  3063. struct scsi_lun lun,
  3064. enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
  3065. {
  3066. tskim->itnim = itnim;
  3067. tskim->lun = lun;
  3068. tskim->tm_cmnd = tm_cmnd;
  3069. tskim->tsecs = tsecs;
  3070. tskim->notify = BFA_FALSE;
  3071. bfa_stats(itnim, tm_cmnds);
  3072. list_add_tail(&tskim->qe, &itnim->tsk_q);
  3073. bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
  3074. }
  3075. void
  3076. bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
  3077. {
  3078. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3079. struct list_head *qe;
  3080. int i;
  3081. for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
  3082. bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
  3083. list_add_tail(qe, &fcpim->tskim_unused_q);
  3084. }
  3085. }
  3086. void
  3087. bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  3088. struct bfa_s *bfa)
  3089. {
  3090. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3091. struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
  3092. struct bfa_mem_dma_s *seg_ptr;
  3093. u16 nsegs, idx, per_seg_ios, num_io_req;
  3094. u32 km_len = 0;
  3095. /*
  3096. * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
  3097. * So if the values are non zero, adjust them appropriately.
  3098. */
  3099. if (cfg->fwcfg.num_ioim_reqs &&
  3100. cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
  3101. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
  3102. else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
  3103. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
  3104. if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
  3105. cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
  3106. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3107. if (num_io_req > BFA_IO_MAX) {
  3108. if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
  3109. cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
  3110. cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
  3111. } else if (cfg->fwcfg.num_fwtio_reqs)
  3112. cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
  3113. else
  3114. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
  3115. }
  3116. bfa_fcpim_meminfo(cfg, &km_len);
  3117. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3118. km_len += num_io_req * sizeof(struct bfa_iotag_s);
  3119. km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
  3120. /* dma memory */
  3121. nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
  3122. per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
  3123. bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
  3124. if (num_io_req >= per_seg_ios) {
  3125. num_io_req -= per_seg_ios;
  3126. bfa_mem_dma_setup(minfo, seg_ptr,
  3127. per_seg_ios * BFI_IOIM_SNSLEN);
  3128. } else
  3129. bfa_mem_dma_setup(minfo, seg_ptr,
  3130. num_io_req * BFI_IOIM_SNSLEN);
  3131. }
  3132. /* kva memory */
  3133. bfa_mem_kva_setup(minfo, fcp_kva, km_len);
  3134. }
  3135. void
  3136. bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  3137. struct bfa_pcidev_s *pcidev)
  3138. {
  3139. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3140. struct bfa_mem_dma_s *seg_ptr;
  3141. u16 idx, nsegs, num_io_req;
  3142. fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
  3143. fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
  3144. fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
  3145. fcp->num_itns = cfg->fwcfg.num_rports;
  3146. fcp->bfa = bfa;
  3147. /*
  3148. * Setup the pool of snsbase addr's, that is passed to fw as
  3149. * part of bfi_iocfc_cfg_s.
  3150. */
  3151. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3152. nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
  3153. bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
  3154. if (!bfa_mem_dma_virt(seg_ptr))
  3155. break;
  3156. fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
  3157. fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
  3158. bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
  3159. }
  3160. fcp->throttle_update_required = 1;
  3161. bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
  3162. bfa_iotag_attach(fcp);
  3163. fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
  3164. bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
  3165. (fcp->num_itns * sizeof(struct bfa_itn_s));
  3166. memset(fcp->itn_arr, 0,
  3167. (fcp->num_itns * sizeof(struct bfa_itn_s)));
  3168. }
  3169. void
  3170. bfa_fcp_iocdisable(struct bfa_s *bfa)
  3171. {
  3172. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3173. bfa_fcpim_iocdisable(fcp);
  3174. }
  3175. void
  3176. bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
  3177. {
  3178. struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
  3179. struct list_head *qe;
  3180. int i;
  3181. /* Update io throttle value only once during driver load time */
  3182. if (!mod->throttle_update_required)
  3183. return;
  3184. for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
  3185. bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
  3186. list_add_tail(qe, &mod->iotag_unused_q);
  3187. }
  3188. if (mod->num_ioim_reqs != num_ioim_fw) {
  3189. bfa_trc(bfa, mod->num_ioim_reqs);
  3190. bfa_trc(bfa, num_ioim_fw);
  3191. }
  3192. mod->max_ioim_reqs = max_ioim_fw;
  3193. mod->num_ioim_reqs = num_ioim_fw;
  3194. mod->throttle_update_required = 0;
  3195. }
  3196. void
  3197. bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
  3198. void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
  3199. {
  3200. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3201. struct bfa_itn_s *itn;
  3202. itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
  3203. itn->isr = isr;
  3204. }
  3205. /*
  3206. * Itn interrupt processing.
  3207. */
  3208. void
  3209. bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3210. {
  3211. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3212. union bfi_itn_i2h_msg_u msg;
  3213. struct bfa_itn_s *itn;
  3214. msg.msg = m;
  3215. itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
  3216. if (itn->isr)
  3217. itn->isr(bfa, m);
  3218. else
  3219. WARN_ON(1);
  3220. }
  3221. void
  3222. bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
  3223. {
  3224. struct bfa_iotag_s *iotag;
  3225. u16 num_io_req, i;
  3226. iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
  3227. fcp->iotag_arr = iotag;
  3228. INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
  3229. INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
  3230. INIT_LIST_HEAD(&fcp->iotag_unused_q);
  3231. num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
  3232. for (i = 0; i < num_io_req; i++, iotag++) {
  3233. memset(iotag, 0, sizeof(struct bfa_iotag_s));
  3234. iotag->tag = i;
  3235. if (i < fcp->num_ioim_reqs)
  3236. list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
  3237. else
  3238. list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
  3239. }
  3240. bfa_mem_kva_curp(fcp) = (u8 *) iotag;
  3241. }
  3242. /*
  3243. * To send config req, first try to use throttle value from flash
  3244. * If 0, then use driver parameter
  3245. * We need to use min(flash_val, drv_val) because
  3246. * memory allocation was done based on this cfg'd value
  3247. */
  3248. u16
  3249. bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
  3250. {
  3251. u16 tmp;
  3252. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3253. /*
  3254. * If throttle value from flash is already in effect after driver is
  3255. * loaded then until next load, always return current value instead
  3256. * of actual flash value
  3257. */
  3258. if (!fcp->throttle_update_required)
  3259. return (u16)fcp->num_ioim_reqs;
  3260. tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
  3261. if (!tmp || (tmp > drv_cfg_param))
  3262. tmp = drv_cfg_param;
  3263. return tmp;
  3264. }
  3265. bfa_status_t
  3266. bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
  3267. {
  3268. if (!bfa_dconf_get_min_cfg(bfa)) {
  3269. BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
  3270. BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
  3271. return BFA_STATUS_OK;
  3272. }
  3273. return BFA_STATUS_FAILED;
  3274. }
  3275. u16
  3276. bfa_fcpim_read_throttle(struct bfa_s *bfa)
  3277. {
  3278. struct bfa_throttle_cfg_s *throttle_cfg =
  3279. &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
  3280. return ((!bfa_dconf_get_min_cfg(bfa)) ?
  3281. ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
  3282. }
  3283. bfa_status_t
  3284. bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
  3285. {
  3286. /* in min cfg no commands should run. */
  3287. if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
  3288. (!bfa_dconf_read_data_valid(bfa)))
  3289. return BFA_STATUS_FAILED;
  3290. bfa_fcpim_write_throttle(bfa, value);
  3291. return bfa_dconf_update(bfa);
  3292. }
  3293. bfa_status_t
  3294. bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
  3295. {
  3296. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3297. struct bfa_defs_fcpim_throttle_s throttle;
  3298. if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
  3299. (!bfa_dconf_read_data_valid(bfa)))
  3300. return BFA_STATUS_FAILED;
  3301. memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
  3302. throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
  3303. throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
  3304. if (!throttle.cfg_value)
  3305. throttle.cfg_value = throttle.cur_value;
  3306. throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
  3307. memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
  3308. return BFA_STATUS_OK;
  3309. }