qla_edif.c 103 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Marvell Fibre Channel HBA Driver
  4. * Copyright (c) 2021 Marvell
  5. */
  6. #include "qla_def.h"
  7. #include "qla_edif.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. #include <scsi/scsi_tcq.h>
  12. static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
  13. struct list_head *sa_list);
  14. static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
  15. struct qla_sa_update_frame *sa_frame);
  16. static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
  17. uint16_t sa_index);
  18. static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
  19. struct edb_node {
  20. struct list_head list;
  21. uint32_t ntype;
  22. union {
  23. port_id_t plogi_did;
  24. uint32_t async;
  25. port_id_t els_sid;
  26. struct edif_sa_update_aen sa_aen;
  27. } u;
  28. };
  29. static struct els_sub_cmd {
  30. uint16_t cmd;
  31. const char *str;
  32. } sc_str[] = {
  33. {SEND_ELS, "send ELS"},
  34. {SEND_ELS_REPLY, "send ELS Reply"},
  35. {PULL_ELS, "retrieve ELS"},
  36. };
  37. const char *sc_to_str(uint16_t cmd)
  38. {
  39. int i;
  40. struct els_sub_cmd *e;
  41. for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
  42. e = sc_str + i;
  43. if (cmd == e->cmd)
  44. return e->str;
  45. }
  46. return "unknown";
  47. }
  48. static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha)
  49. {
  50. unsigned long flags;
  51. struct edb_node *edbnode = NULL;
  52. spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
  53. /* db nodes are fifo - no qualifications done */
  54. if (!list_empty(&vha->e_dbell.head)) {
  55. edbnode = list_first_entry(&vha->e_dbell.head,
  56. struct edb_node, list);
  57. list_del_init(&edbnode->list);
  58. }
  59. spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
  60. return edbnode;
  61. }
  62. static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
  63. {
  64. list_del_init(&node->list);
  65. kfree(node);
  66. }
  67. static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
  68. uint16_t handle)
  69. {
  70. struct edif_list_entry *entry;
  71. struct edif_list_entry *tentry;
  72. struct list_head *indx_list = &fcport->edif.edif_indx_list;
  73. list_for_each_entry_safe(entry, tentry, indx_list, next) {
  74. if (entry->handle == handle)
  75. return entry;
  76. }
  77. return NULL;
  78. }
  79. /* timeout called when no traffic and delayed rx sa_index delete */
  80. static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
  81. {
  82. struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
  83. fc_port_t *fcport = edif_entry->fcport;
  84. struct scsi_qla_host *vha = fcport->vha;
  85. struct edif_sa_ctl *sa_ctl;
  86. uint16_t nport_handle;
  87. unsigned long flags = 0;
  88. ql_dbg(ql_dbg_edif, vha, 0x3069,
  89. "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n",
  90. __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
  91. /*
  92. * if delete_sa_index is valid then no one has serviced this
  93. * delayed delete
  94. */
  95. spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
  96. /*
  97. * delete_sa_index is invalidated when we find the new sa_index in
  98. * the incoming data stream. If it is not invalidated then we are
  99. * still looking for the new sa_index because there is no I/O and we
  100. * need to just force the rx delete and move on. Otherwise
  101. * we could get another rekey which will result in an error 66.
  102. */
  103. if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
  104. uint16_t delete_sa_index = edif_entry->delete_sa_index;
  105. edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
  106. nport_handle = edif_entry->handle;
  107. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  108. sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
  109. delete_sa_index, 0);
  110. if (sa_ctl) {
  111. ql_dbg(ql_dbg_edif, vha, 0x3063,
  112. "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
  113. __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
  114. nport_handle);
  115. sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
  116. set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
  117. qla_post_sa_replace_work(fcport->vha, fcport,
  118. nport_handle, sa_ctl);
  119. } else {
  120. ql_dbg(ql_dbg_edif, vha, 0x3063,
  121. "%s: sa_ctl not found for delete_sa_index: %d\n",
  122. __func__, edif_entry->delete_sa_index);
  123. }
  124. } else {
  125. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  126. }
  127. }
  128. /*
  129. * create a new list entry for this nport handle and
  130. * add an sa_update index to the list - called for sa_update
  131. */
  132. static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
  133. uint16_t sa_index, uint16_t handle)
  134. {
  135. struct edif_list_entry *entry;
  136. unsigned long flags = 0;
  137. /* if the entry exists, then just update the sa_index */
  138. entry = qla_edif_list_find_sa_index(fcport, handle);
  139. if (entry) {
  140. entry->update_sa_index = sa_index;
  141. entry->count = 0;
  142. return 0;
  143. }
  144. /*
  145. * This is the normal path - there should be no existing entry
  146. * when update is called. The exception is at startup
  147. * when update is called for the first two sa_indexes
  148. * followed by a delete of the first sa_index
  149. */
  150. entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
  151. if (!entry)
  152. return -ENOMEM;
  153. INIT_LIST_HEAD(&entry->next);
  154. entry->handle = handle;
  155. entry->update_sa_index = sa_index;
  156. entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
  157. entry->count = 0;
  158. entry->flags = 0;
  159. timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
  160. spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
  161. list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
  162. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  163. return 0;
  164. }
  165. /* remove an entry from the list */
  166. static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
  167. {
  168. unsigned long flags = 0;
  169. spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
  170. list_del(&entry->next);
  171. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  172. }
  173. int qla_post_sa_replace_work(struct scsi_qla_host *vha,
  174. fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
  175. {
  176. struct qla_work_evt *e;
  177. e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
  178. if (!e)
  179. return QLA_FUNCTION_FAILED;
  180. e->u.sa_update.fcport = fcport;
  181. e->u.sa_update.sa_ctl = sa_ctl;
  182. e->u.sa_update.nport_handle = nport_handle;
  183. fcport->flags |= FCF_ASYNC_ACTIVE;
  184. return qla2x00_post_work(vha, e);
  185. }
  186. static void
  187. qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport)
  188. {
  189. ql_dbg(ql_dbg_edif, vha, 0x2058,
  190. "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
  191. fcport->node_name, fcport->port_name, fcport->d_id.b24);
  192. fcport->edif.tx_rekey_cnt = 0;
  193. fcport->edif.rx_rekey_cnt = 0;
  194. fcport->edif.tx_bytes = 0;
  195. fcport->edif.rx_bytes = 0;
  196. }
  197. static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
  198. fc_port_t *fcport)
  199. {
  200. struct extra_auth_els *p;
  201. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  202. struct qla_bsg_auth_els_request *req =
  203. (struct qla_bsg_auth_els_request *)bsg_job->request;
  204. if (!vha->hw->flags.edif_enabled) {
  205. ql_dbg(ql_dbg_edif, vha, 0x9105,
  206. "%s edif not enabled\n", __func__);
  207. goto done;
  208. }
  209. if (DBELL_INACTIVE(vha)) {
  210. ql_dbg(ql_dbg_edif, vha, 0x09102,
  211. "%s doorbell not enabled\n", __func__);
  212. goto done;
  213. }
  214. p = &req->e;
  215. /* Get response */
  216. if (p->sub_cmd == PULL_ELS) {
  217. struct qla_bsg_auth_els_reply *rpl =
  218. (struct qla_bsg_auth_els_reply *)bsg_job->reply;
  219. qla_pur_get_pending(vha, fcport, bsg_job);
  220. ql_dbg(ql_dbg_edif, vha, 0x911d,
  221. "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
  222. __func__, sc_to_str(p->sub_cmd), fcport->port_name,
  223. fcport->d_id.b24, rpl->rx_xchg_address,
  224. rpl->r.reply_payload_rcv_len, bsg_job);
  225. goto done;
  226. }
  227. return 0;
  228. done:
  229. bsg_job_done(bsg_job, bsg_reply->result,
  230. bsg_reply->reply_payload_rcv_len);
  231. return -EIO;
  232. }
  233. fc_port_t *
  234. qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
  235. {
  236. fc_port_t *f, *tf;
  237. f = NULL;
  238. list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
  239. if (f->d_id.b24 == id->b24)
  240. return f;
  241. }
  242. return NULL;
  243. }
  244. /**
  245. * qla_edif_app_check(): check for valid application id.
  246. * @vha: host adapter pointer
  247. * @appid: application id
  248. * Return: false = fail, true = pass
  249. */
  250. static bool
  251. qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
  252. {
  253. /* check that the app is allow/known to the driver */
  254. if (appid.app_vid != EDIF_APP_ID) {
  255. ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
  256. __func__, appid.app_vid);
  257. return false;
  258. }
  259. if (appid.version != EDIF_VERSION1) {
  260. ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)",
  261. __func__, appid.version);
  262. return false;
  263. }
  264. return true;
  265. }
  266. static void
  267. qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
  268. int index)
  269. {
  270. unsigned long flags = 0;
  271. spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
  272. list_del(&sa_ctl->next);
  273. spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
  274. if (index >= 512)
  275. fcport->edif.tx_rekey_cnt--;
  276. else
  277. fcport->edif.rx_rekey_cnt--;
  278. kfree(sa_ctl);
  279. }
  280. /* return an index to the freepool */
  281. static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
  282. uint16_t sa_index)
  283. {
  284. void *sa_id_map;
  285. struct scsi_qla_host *vha = fcport->vha;
  286. struct qla_hw_data *ha = vha->hw;
  287. unsigned long flags = 0;
  288. u16 lsa_index = sa_index;
  289. ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
  290. "%s: entry\n", __func__);
  291. if (dir) {
  292. sa_id_map = ha->edif_tx_sa_id_map;
  293. lsa_index -= EDIF_TX_SA_INDEX_BASE;
  294. } else {
  295. sa_id_map = ha->edif_rx_sa_id_map;
  296. }
  297. spin_lock_irqsave(&ha->sadb_fp_lock, flags);
  298. clear_bit(lsa_index, sa_id_map);
  299. spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
  300. ql_dbg(ql_dbg_edif, vha, 0x3063,
  301. "%s: index %d added to free pool\n", __func__, sa_index);
  302. }
  303. static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
  304. struct fc_port *fcport, struct edif_sa_index_entry *entry,
  305. int pdir)
  306. {
  307. struct edif_list_entry *edif_entry;
  308. struct edif_sa_ctl *sa_ctl;
  309. int i, dir;
  310. int key_cnt = 0;
  311. for (i = 0; i < 2; i++) {
  312. if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
  313. continue;
  314. if (fcport->loop_id != entry->handle) {
  315. ql_dbg(ql_dbg_edif, vha, 0x3063,
  316. "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
  317. __func__, i, entry->handle, fcport->loop_id,
  318. entry->sa_pair[i].sa_index);
  319. }
  320. /* release the sa_ctl */
  321. sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
  322. entry->sa_pair[i].sa_index, pdir);
  323. if (sa_ctl &&
  324. qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
  325. ql_dbg(ql_dbg_edif, vha, 0x3063,
  326. "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
  327. qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
  328. } else {
  329. ql_dbg(ql_dbg_edif, vha, 0x3063,
  330. "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
  331. }
  332. /* Release the index */
  333. ql_dbg(ql_dbg_edif, vha, 0x3063,
  334. "%s: freeing sa_index %d, nph: 0x%x\n",
  335. __func__, entry->sa_pair[i].sa_index, entry->handle);
  336. dir = (entry->sa_pair[i].sa_index <
  337. EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
  338. qla_edif_add_sa_index_to_freepool(fcport, dir,
  339. entry->sa_pair[i].sa_index);
  340. /* Delete timer on RX */
  341. if (pdir != SAU_FLG_TX) {
  342. edif_entry =
  343. qla_edif_list_find_sa_index(fcport, entry->handle);
  344. if (edif_entry) {
  345. ql_dbg(ql_dbg_edif, vha, 0x5033,
  346. "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
  347. __func__, edif_entry, edif_entry->update_sa_index,
  348. edif_entry->delete_sa_index);
  349. qla_edif_list_delete_sa_index(fcport, edif_entry);
  350. /*
  351. * valid delete_sa_index indicates there is a rx
  352. * delayed delete queued
  353. */
  354. if (edif_entry->delete_sa_index !=
  355. INVALID_EDIF_SA_INDEX) {
  356. del_timer(&edif_entry->timer);
  357. /* build and send the aen */
  358. fcport->edif.rx_sa_set = 1;
  359. fcport->edif.rx_sa_pending = 0;
  360. qla_edb_eventcreate(vha,
  361. VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
  362. QL_VND_SA_STAT_SUCCESS,
  363. QL_VND_RX_SA_KEY, fcport);
  364. }
  365. ql_dbg(ql_dbg_edif, vha, 0x5033,
  366. "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
  367. __func__, edif_entry, edif_entry->update_sa_index,
  368. edif_entry->delete_sa_index);
  369. kfree(edif_entry);
  370. }
  371. }
  372. key_cnt++;
  373. }
  374. ql_dbg(ql_dbg_edif, vha, 0x3063,
  375. "%s: %d %s keys released\n",
  376. __func__, key_cnt, pdir ? "tx" : "rx");
  377. }
  378. /* find an release all outstanding sadb sa_indicies */
  379. void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
  380. {
  381. struct edif_sa_index_entry *entry, *tmp;
  382. struct qla_hw_data *ha = vha->hw;
  383. unsigned long flags;
  384. ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
  385. "%s: Starting...\n", __func__);
  386. spin_lock_irqsave(&ha->sadb_lock, flags);
  387. list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
  388. if (entry->fcport == fcport) {
  389. list_del(&entry->next);
  390. spin_unlock_irqrestore(&ha->sadb_lock, flags);
  391. __qla2x00_release_all_sadb(vha, fcport, entry, 0);
  392. kfree(entry);
  393. spin_lock_irqsave(&ha->sadb_lock, flags);
  394. break;
  395. }
  396. }
  397. list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
  398. if (entry->fcport == fcport) {
  399. list_del(&entry->next);
  400. spin_unlock_irqrestore(&ha->sadb_lock, flags);
  401. __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
  402. kfree(entry);
  403. spin_lock_irqsave(&ha->sadb_lock, flags);
  404. break;
  405. }
  406. }
  407. spin_unlock_irqrestore(&ha->sadb_lock, flags);
  408. }
  409. /**
  410. * qla_edif_app_start: application has announce its present
  411. * @vha: host adapter pointer
  412. * @bsg_job: user request
  413. *
  414. * Set/activate doorbell. Reset current sessions and re-login with
  415. * secure flag.
  416. */
  417. static int
  418. qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  419. {
  420. int32_t rval = 0;
  421. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  422. struct app_start appstart;
  423. struct app_start_reply appreply;
  424. struct fc_port *fcport, *tf;
  425. ql_log(ql_log_info, vha, 0x1313,
  426. "EDIF application registration with driver, FC device connections will be re-established.\n");
  427. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  428. bsg_job->request_payload.sg_cnt, &appstart,
  429. sizeof(struct app_start));
  430. ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
  431. __func__, appstart.app_info.app_vid, appstart.app_start_flags);
  432. if (DBELL_INACTIVE(vha)) {
  433. /* mark doorbell as active since an app is now present */
  434. vha->e_dbell.db_flags |= EDB_ACTIVE;
  435. } else {
  436. goto out;
  437. }
  438. if (N2N_TOPO(vha->hw)) {
  439. list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
  440. fcport->n2n_link_reset_cnt = 0;
  441. if (vha->hw->flags.n2n_fw_acc_sec) {
  442. list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
  443. qla_edif_sa_ctl_init(vha, fcport);
  444. /*
  445. * While authentication app was not running, remote device
  446. * could still try to login with this local port. Let's
  447. * clear the state and try again.
  448. */
  449. qla2x00_wait_for_sess_deletion(vha);
  450. /* bounce the link to get the other guy to relogin */
  451. if (!vha->hw->flags.n2n_bigger) {
  452. set_bit(N2N_LINK_RESET, &vha->dpc_flags);
  453. qla2xxx_wake_dpc(vha);
  454. }
  455. } else {
  456. qla2x00_wait_for_hba_online(vha);
  457. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  458. qla2xxx_wake_dpc(vha);
  459. qla2x00_wait_for_hba_online(vha);
  460. }
  461. } else {
  462. list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
  463. ql_dbg(ql_dbg_edif, vha, 0x2058,
  464. "FCSP - nn %8phN pn %8phN portid=%06x.\n",
  465. fcport->node_name, fcport->port_name,
  466. fcport->d_id.b24);
  467. ql_dbg(ql_dbg_edif, vha, 0xf084,
  468. "%s: se_sess %p / sess %p from port %8phC "
  469. "loop_id %#04x s_id %06x logout %d "
  470. "keep %d els_logo %d disc state %d auth state %d"
  471. "stop state %d\n",
  472. __func__, fcport->se_sess, fcport,
  473. fcport->port_name, fcport->loop_id,
  474. fcport->d_id.b24, fcport->logout_on_delete,
  475. fcport->keep_nport_handle, fcport->send_els_logo,
  476. fcport->disc_state, fcport->edif.auth_state,
  477. fcport->edif.app_stop);
  478. if (atomic_read(&vha->loop_state) == LOOP_DOWN)
  479. break;
  480. fcport->login_retry = vha->hw->login_retry_count;
  481. fcport->edif.app_stop = 0;
  482. fcport->edif.app_sess_online = 0;
  483. if (fcport->scan_state != QLA_FCPORT_FOUND)
  484. continue;
  485. if (fcport->port_type == FCT_UNKNOWN &&
  486. !fcport->fc4_features)
  487. rval = qla24xx_async_gffid(vha, fcport, true);
  488. if (!rval && !(fcport->fc4_features & FC4_FF_TARGET ||
  489. fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET)))
  490. continue;
  491. rval = 0;
  492. ql_dbg(ql_dbg_edif, vha, 0x911e,
  493. "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
  494. __func__, fcport->port_name);
  495. qlt_schedule_sess_for_deletion(fcport);
  496. qla_edif_sa_ctl_init(vha, fcport);
  497. }
  498. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  499. }
  500. if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
  501. /* mark as active since an app is now present */
  502. vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
  503. } else {
  504. ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
  505. __func__);
  506. }
  507. out:
  508. appreply.host_support_edif = vha->hw->flags.edif_enabled;
  509. appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
  510. appreply.edif_edb_active = vha->e_dbell.db_flags;
  511. appreply.version = EDIF_VERSION1;
  512. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  513. SET_DID_STATUS(bsg_reply->result, DID_OK);
  514. bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  515. bsg_job->reply_payload.sg_cnt,
  516. &appreply,
  517. sizeof(struct app_start_reply));
  518. ql_dbg(ql_dbg_edif, vha, 0x911d,
  519. "%s app start completed with 0x%x\n",
  520. __func__, rval);
  521. return rval;
  522. }
  523. /**
  524. * qla_edif_app_stop - app has announced it's exiting.
  525. * @vha: host adapter pointer
  526. * @bsg_job: user space command pointer
  527. *
  528. * Free any in flight messages, clear all doorbell events
  529. * to application. Reject any message relate to security.
  530. */
  531. static int
  532. qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  533. {
  534. struct app_stop appstop;
  535. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  536. struct fc_port *fcport, *tf;
  537. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  538. bsg_job->request_payload.sg_cnt, &appstop,
  539. sizeof(struct app_stop));
  540. ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
  541. __func__, appstop.app_info.app_vid);
  542. /* Call db stop and enode stop functions */
  543. /* if we leave this running short waits are operational < 16 secs */
  544. qla_enode_stop(vha); /* stop enode */
  545. qla_edb_stop(vha); /* stop db */
  546. list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
  547. if (!(fcport->flags & FCF_FCSP_DEVICE))
  548. continue;
  549. if (fcport->flags & FCF_FCSP_DEVICE) {
  550. ql_dbg(ql_dbg_edif, vha, 0xf084,
  551. "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
  552. __func__, fcport,
  553. fcport->port_name, fcport->loop_id, fcport->d_id.b24,
  554. fcport->logout_on_delete, fcport->keep_nport_handle,
  555. fcport->send_els_logo);
  556. if (atomic_read(&vha->loop_state) == LOOP_DOWN)
  557. break;
  558. fcport->edif.app_stop = 1;
  559. ql_dbg(ql_dbg_edif, vha, 0x911e,
  560. "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
  561. __func__, fcport->port_name);
  562. fcport->send_els_logo = 1;
  563. qlt_schedule_sess_for_deletion(fcport);
  564. }
  565. }
  566. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  567. SET_DID_STATUS(bsg_reply->result, DID_OK);
  568. /* no return interface to app - it assumes we cleaned up ok */
  569. return 0;
  570. }
  571. static int
  572. qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
  573. struct app_plogi_reply *appplogireply)
  574. {
  575. int ret = 0;
  576. if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
  577. ql_dbg(ql_dbg_edif, vha, 0x911e,
  578. "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
  579. __func__, fcport->port_name, fcport->edif.tx_sa_set,
  580. fcport->edif.rx_sa_set);
  581. appplogireply->prli_status = 0;
  582. ret = 1;
  583. } else {
  584. ql_dbg(ql_dbg_edif, vha, 0x911e,
  585. "%s wwpn %8phC Both SA(s) updated.\n", __func__,
  586. fcport->port_name);
  587. fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
  588. fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
  589. appplogireply->prli_status = 1;
  590. }
  591. return ret;
  592. }
  593. /**
  594. * qla_edif_app_authok - authentication by app succeeded. Driver can proceed
  595. * with prli
  596. * @vha: host adapter pointer
  597. * @bsg_job: user request
  598. */
  599. static int
  600. qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  601. {
  602. struct auth_complete_cmd appplogiok;
  603. struct app_plogi_reply appplogireply = {0};
  604. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  605. fc_port_t *fcport = NULL;
  606. port_id_t portid = {0};
  607. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  608. bsg_job->request_payload.sg_cnt, &appplogiok,
  609. sizeof(struct auth_complete_cmd));
  610. /* silent unaligned access warning */
  611. portid.b.domain = appplogiok.u.d_id.b.domain;
  612. portid.b.area = appplogiok.u.d_id.b.area;
  613. portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
  614. appplogireply.version = EDIF_VERSION1;
  615. switch (appplogiok.type) {
  616. case PL_TYPE_WWPN:
  617. fcport = qla2x00_find_fcport_by_wwpn(vha,
  618. appplogiok.u.wwpn, 0);
  619. if (!fcport)
  620. ql_dbg(ql_dbg_edif, vha, 0x911d,
  621. "%s wwpn lookup failed: %8phC\n",
  622. __func__, appplogiok.u.wwpn);
  623. break;
  624. case PL_TYPE_DID:
  625. fcport = qla2x00_find_fcport_by_pid(vha, &portid);
  626. if (!fcport)
  627. ql_dbg(ql_dbg_edif, vha, 0x911d,
  628. "%s d_id lookup failed: %x\n", __func__,
  629. portid.b24);
  630. break;
  631. default:
  632. ql_dbg(ql_dbg_edif, vha, 0x911d,
  633. "%s undefined type: %x\n", __func__,
  634. appplogiok.type);
  635. break;
  636. }
  637. if (!fcport) {
  638. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  639. goto errstate_exit;
  640. }
  641. /*
  642. * if port is online then this is a REKEY operation
  643. * Only do sa update checking
  644. */
  645. if (atomic_read(&fcport->state) == FCS_ONLINE) {
  646. ql_dbg(ql_dbg_edif, vha, 0x911d,
  647. "%s Skipping PRLI complete based on rekey\n", __func__);
  648. appplogireply.prli_status = 1;
  649. SET_DID_STATUS(bsg_reply->result, DID_OK);
  650. qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
  651. goto errstate_exit;
  652. }
  653. /* make sure in AUTH_PENDING or else reject */
  654. if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
  655. ql_dbg(ql_dbg_edif, vha, 0x911e,
  656. "%s wwpn %8phC is not in auth pending state (%x)\n",
  657. __func__, fcport->port_name, fcport->disc_state);
  658. SET_DID_STATUS(bsg_reply->result, DID_OK);
  659. appplogireply.prli_status = 0;
  660. goto errstate_exit;
  661. }
  662. SET_DID_STATUS(bsg_reply->result, DID_OK);
  663. appplogireply.prli_status = 1;
  664. fcport->edif.authok = 1;
  665. if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
  666. ql_dbg(ql_dbg_edif, vha, 0x911e,
  667. "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
  668. __func__, fcport->port_name, fcport->edif.tx_sa_set,
  669. fcport->edif.rx_sa_set);
  670. SET_DID_STATUS(bsg_reply->result, DID_OK);
  671. appplogireply.prli_status = 0;
  672. goto errstate_exit;
  673. } else {
  674. ql_dbg(ql_dbg_edif, vha, 0x911e,
  675. "%s wwpn %8phC Both SA(s) updated.\n", __func__,
  676. fcport->port_name);
  677. fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
  678. fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
  679. }
  680. if (qla_ini_mode_enabled(vha)) {
  681. ql_dbg(ql_dbg_edif, vha, 0x911e,
  682. "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
  683. __func__, fcport->port_name);
  684. qla24xx_post_prli_work(vha, fcport);
  685. }
  686. errstate_exit:
  687. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  688. bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  689. bsg_job->reply_payload.sg_cnt,
  690. &appplogireply,
  691. sizeof(struct app_plogi_reply));
  692. return 0;
  693. }
  694. /**
  695. * qla_edif_app_authfail - authentication by app has failed. Driver is given
  696. * notice to tear down current session.
  697. * @vha: host adapter pointer
  698. * @bsg_job: user request
  699. */
  700. static int
  701. qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  702. {
  703. int32_t rval = 0;
  704. struct auth_complete_cmd appplogifail;
  705. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  706. fc_port_t *fcport = NULL;
  707. port_id_t portid = {0};
  708. ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
  709. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  710. bsg_job->request_payload.sg_cnt, &appplogifail,
  711. sizeof(struct auth_complete_cmd));
  712. /* silent unaligned access warning */
  713. portid.b.domain = appplogifail.u.d_id.b.domain;
  714. portid.b.area = appplogifail.u.d_id.b.area;
  715. portid.b.al_pa = appplogifail.u.d_id.b.al_pa;
  716. /*
  717. * TODO: edif: app has failed this plogi. Inform driver to
  718. * take any action (if any).
  719. */
  720. switch (appplogifail.type) {
  721. case PL_TYPE_WWPN:
  722. fcport = qla2x00_find_fcport_by_wwpn(vha,
  723. appplogifail.u.wwpn, 0);
  724. SET_DID_STATUS(bsg_reply->result, DID_OK);
  725. break;
  726. case PL_TYPE_DID:
  727. fcport = qla2x00_find_fcport_by_pid(vha, &portid);
  728. if (!fcport)
  729. ql_dbg(ql_dbg_edif, vha, 0x911d,
  730. "%s d_id lookup failed: %x\n", __func__,
  731. portid.b24);
  732. SET_DID_STATUS(bsg_reply->result, DID_OK);
  733. break;
  734. default:
  735. ql_dbg(ql_dbg_edif, vha, 0x911e,
  736. "%s undefined type: %x\n", __func__,
  737. appplogifail.type);
  738. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  739. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  740. rval = -1;
  741. break;
  742. }
  743. ql_dbg(ql_dbg_edif, vha, 0x911d,
  744. "%s fcport is 0x%p\n", __func__, fcport);
  745. if (fcport) {
  746. /* set/reset edif values and flags */
  747. ql_dbg(ql_dbg_edif, vha, 0x911e,
  748. "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
  749. __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
  750. if (qla_ini_mode_enabled(fcport->vha)) {
  751. fcport->send_els_logo = 1;
  752. qlt_schedule_sess_for_deletion(fcport);
  753. }
  754. }
  755. return rval;
  756. }
  757. /**
  758. * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
  759. * [initiator|target] mode. It can specific session with specific nport id or
  760. * all sessions.
  761. * @vha: host adapter pointer
  762. * @bsg_job: user request pointer
  763. */
  764. static int
  765. qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  766. {
  767. int32_t rval = 0;
  768. int32_t pcnt = 0;
  769. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  770. struct app_pinfo_req app_req;
  771. struct app_pinfo_reply *app_reply;
  772. port_id_t tdid;
  773. ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
  774. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  775. bsg_job->request_payload.sg_cnt, &app_req,
  776. sizeof(struct app_pinfo_req));
  777. app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
  778. sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL);
  779. if (!app_reply) {
  780. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  781. rval = -1;
  782. } else {
  783. struct fc_port *fcport = NULL, *tf;
  784. app_reply->version = EDIF_VERSION1;
  785. list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
  786. if (!(fcport->flags & FCF_FCSP_DEVICE))
  787. continue;
  788. tdid.b.domain = app_req.remote_pid.domain;
  789. tdid.b.area = app_req.remote_pid.area;
  790. tdid.b.al_pa = app_req.remote_pid.al_pa;
  791. ql_dbg(ql_dbg_edif, vha, 0x2058,
  792. "APP request entry - portid=%06x.\n", tdid.b24);
  793. /* Ran out of space */
  794. if (pcnt >= app_req.num_ports)
  795. break;
  796. if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
  797. continue;
  798. if (!N2N_TOPO(vha->hw)) {
  799. if (fcport->scan_state != QLA_FCPORT_FOUND)
  800. continue;
  801. if (fcport->port_type == FCT_UNKNOWN &&
  802. !fcport->fc4_features)
  803. rval = qla24xx_async_gffid(vha, fcport,
  804. true);
  805. if (!rval &&
  806. !(fcport->fc4_features & FC4_FF_TARGET ||
  807. fcport->port_type &
  808. (FCT_TARGET | FCT_NVME_TARGET)))
  809. continue;
  810. }
  811. rval = 0;
  812. app_reply->ports[pcnt].version = EDIF_VERSION1;
  813. app_reply->ports[pcnt].remote_type =
  814. VND_CMD_RTYPE_UNKNOWN;
  815. if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
  816. app_reply->ports[pcnt].remote_type |=
  817. VND_CMD_RTYPE_TARGET;
  818. if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
  819. app_reply->ports[pcnt].remote_type |=
  820. VND_CMD_RTYPE_INITIATOR;
  821. app_reply->ports[pcnt].remote_pid = fcport->d_id;
  822. ql_dbg(ql_dbg_edif, vha, 0x2058,
  823. "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
  824. fcport->node_name, fcport->port_name, pcnt,
  825. fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
  826. switch (fcport->edif.auth_state) {
  827. case VND_CMD_AUTH_STATE_ELS_RCVD:
  828. if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
  829. fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
  830. app_reply->ports[pcnt].auth_state =
  831. VND_CMD_AUTH_STATE_NEEDED;
  832. } else {
  833. app_reply->ports[pcnt].auth_state =
  834. VND_CMD_AUTH_STATE_ELS_RCVD;
  835. }
  836. break;
  837. default:
  838. app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
  839. break;
  840. }
  841. memcpy(app_reply->ports[pcnt].remote_wwpn,
  842. fcport->port_name, 8);
  843. app_reply->ports[pcnt].remote_state =
  844. (atomic_read(&fcport->state) ==
  845. FCS_ONLINE ? 1 : 0);
  846. pcnt++;
  847. if (tdid.b24 != 0)
  848. break;
  849. }
  850. app_reply->port_count = pcnt;
  851. SET_DID_STATUS(bsg_reply->result, DID_OK);
  852. }
  853. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  854. bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  855. bsg_job->reply_payload.sg_cnt,
  856. app_reply,
  857. sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt);
  858. kfree(app_reply);
  859. return rval;
  860. }
  861. /**
  862. * qla_edif_app_getstats - app would like to read various statistics info
  863. * @vha: host adapter pointer
  864. * @bsg_job: user request
  865. */
  866. static int32_t
  867. qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  868. {
  869. int32_t rval = 0;
  870. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  871. uint32_t size;
  872. struct app_sinfo_req app_req;
  873. struct app_stats_reply *app_reply;
  874. uint32_t pcnt = 0;
  875. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  876. bsg_job->request_payload.sg_cnt, &app_req,
  877. sizeof(struct app_sinfo_req));
  878. if (app_req.num_ports == 0) {
  879. ql_dbg(ql_dbg_async, vha, 0x911d,
  880. "%s app did not indicate number of ports to return\n",
  881. __func__);
  882. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  883. rval = -1;
  884. }
  885. size = sizeof(struct app_stats_reply) +
  886. (sizeof(struct app_sinfo) * app_req.num_ports);
  887. app_reply = kzalloc(size, GFP_KERNEL);
  888. if (!app_reply) {
  889. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  890. rval = -1;
  891. } else {
  892. struct fc_port *fcport = NULL, *tf;
  893. app_reply->version = EDIF_VERSION1;
  894. list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
  895. if (fcport->edif.enable) {
  896. if (pcnt > app_req.num_ports)
  897. break;
  898. app_reply->elem[pcnt].rekey_count =
  899. fcport->edif.rekey_cnt;
  900. app_reply->elem[pcnt].tx_bytes =
  901. fcport->edif.tx_bytes;
  902. app_reply->elem[pcnt].rx_bytes =
  903. fcport->edif.rx_bytes;
  904. memcpy(app_reply->elem[pcnt].remote_wwpn,
  905. fcport->port_name, 8);
  906. pcnt++;
  907. }
  908. }
  909. app_reply->elem_count = pcnt;
  910. SET_DID_STATUS(bsg_reply->result, DID_OK);
  911. }
  912. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  913. bsg_reply->reply_payload_rcv_len =
  914. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  915. bsg_job->reply_payload.sg_cnt, app_reply,
  916. sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt));
  917. kfree(app_reply);
  918. return rval;
  919. }
  920. static int32_t
  921. qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  922. {
  923. struct fc_port *fcport;
  924. struct aen_complete_cmd ack;
  925. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  926. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  927. bsg_job->request_payload.sg_cnt, &ack, sizeof(ack));
  928. ql_dbg(ql_dbg_edif, vha, 0x70cf,
  929. "%s: %06x event_code %x\n",
  930. __func__, ack.port_id.b24, ack.event_code);
  931. fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id);
  932. SET_DID_STATUS(bsg_reply->result, DID_OK);
  933. if (!fcport) {
  934. ql_dbg(ql_dbg_edif, vha, 0x70cf,
  935. "%s: unable to find fcport %06x \n",
  936. __func__, ack.port_id.b24);
  937. return 0;
  938. }
  939. switch (ack.event_code) {
  940. case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
  941. fcport->edif.sess_down_acked = 1;
  942. break;
  943. default:
  944. break;
  945. }
  946. return 0;
  947. }
  948. static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  949. {
  950. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  951. u32 sg_skip, reply_payload_len;
  952. bool keep;
  953. struct edb_node *dbnode = NULL;
  954. struct edif_app_dbell ap;
  955. int dat_size = 0;
  956. sg_skip = 0;
  957. reply_payload_len = bsg_job->reply_payload.payload_len;
  958. while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) {
  959. dbnode = qla_edb_getnext(vha);
  960. if (dbnode) {
  961. keep = true;
  962. dat_size = 0;
  963. ap.event_code = dbnode->ntype;
  964. switch (dbnode->ntype) {
  965. case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
  966. case VND_CMD_AUTH_STATE_NEEDED:
  967. ap.port_id = dbnode->u.plogi_did;
  968. dat_size += sizeof(ap.port_id);
  969. break;
  970. case VND_CMD_AUTH_STATE_ELS_RCVD:
  971. ap.port_id = dbnode->u.els_sid;
  972. dat_size += sizeof(ap.port_id);
  973. break;
  974. case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
  975. ap.port_id = dbnode->u.sa_aen.port_id;
  976. memcpy(&ap.event_data, &dbnode->u,
  977. sizeof(struct edif_sa_update_aen));
  978. dat_size += sizeof(struct edif_sa_update_aen);
  979. break;
  980. default:
  981. keep = false;
  982. ql_log(ql_log_warn, vha, 0x09102,
  983. "%s unknown DB type=%d %p\n",
  984. __func__, dbnode->ntype, dbnode);
  985. break;
  986. }
  987. ap.event_data_size = dat_size;
  988. /* 8 = sizeof(ap.event_code + ap.event_data_size) */
  989. dat_size += 8;
  990. if (keep)
  991. sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list,
  992. bsg_job->reply_payload.sg_cnt,
  993. &ap, dat_size, sg_skip, false);
  994. ql_dbg(ql_dbg_edif, vha, 0x09102,
  995. "%s Doorbell consumed : type=%d %p\n",
  996. __func__, dbnode->ntype, dbnode);
  997. kfree(dbnode);
  998. } else {
  999. break;
  1000. }
  1001. }
  1002. SET_DID_STATUS(bsg_reply->result, DID_OK);
  1003. bsg_reply->reply_payload_rcv_len = sg_skip;
  1004. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1005. return 0;
  1006. }
  1007. static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
  1008. u32 delay)
  1009. {
  1010. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1011. /* small sleep for doorbell events to accumulate */
  1012. if (delay)
  1013. msleep(delay);
  1014. qla_edif_consume_dbell(vha, bsg_job);
  1015. bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
  1016. }
  1017. static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha)
  1018. {
  1019. unsigned long flags;
  1020. struct bsg_job *prev_bsg_job = NULL;
  1021. spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
  1022. if (vha->e_dbell.dbell_bsg_job) {
  1023. prev_bsg_job = vha->e_dbell.dbell_bsg_job;
  1024. vha->e_dbell.dbell_bsg_job = NULL;
  1025. }
  1026. spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
  1027. if (prev_bsg_job)
  1028. __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0);
  1029. }
  1030. static int
  1031. qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  1032. {
  1033. unsigned long flags;
  1034. bool return_bsg = false;
  1035. /* flush previous dbell bsg */
  1036. qla_edif_dbell_bsg_done(vha);
  1037. spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
  1038. if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) {
  1039. /*
  1040. * when the next db event happens, bsg_job will return.
  1041. * Otherwise, timer will return it.
  1042. */
  1043. vha->e_dbell.dbell_bsg_job = bsg_job;
  1044. vha->e_dbell.bsg_expire = jiffies + 10 * HZ;
  1045. } else {
  1046. return_bsg = true;
  1047. }
  1048. spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
  1049. if (return_bsg)
  1050. __qla_edif_dbell_bsg_done(vha, bsg_job, 1);
  1051. return 0;
  1052. }
  1053. int32_t
  1054. qla_edif_app_mgmt(struct bsg_job *bsg_job)
  1055. {
  1056. struct fc_bsg_request *bsg_request = bsg_job->request;
  1057. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1058. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1059. scsi_qla_host_t *vha = shost_priv(host);
  1060. struct app_id appcheck;
  1061. bool done = true;
  1062. int32_t rval = 0;
  1063. uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  1064. u32 level = ql_dbg_edif;
  1065. /* doorbell is high traffic */
  1066. if (vnd_sc == QL_VND_SC_READ_DBELL)
  1067. level = 0;
  1068. ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n",
  1069. __func__, vnd_sc);
  1070. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1071. bsg_job->request_payload.sg_cnt, &appcheck,
  1072. sizeof(struct app_id));
  1073. if (!vha->hw->flags.edif_enabled ||
  1074. test_bit(VPORT_DELETE, &vha->dpc_flags)) {
  1075. ql_dbg(level, vha, 0x911d,
  1076. "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
  1077. __func__, bsg_job, vha->dpc_flags);
  1078. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  1079. goto done;
  1080. }
  1081. if (!qla_edif_app_check(vha, appcheck)) {
  1082. ql_dbg(level, vha, 0x911d,
  1083. "%s app checked failed.\n",
  1084. __func__);
  1085. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1086. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  1087. goto done;
  1088. }
  1089. switch (vnd_sc) {
  1090. case QL_VND_SC_SA_UPDATE:
  1091. done = false;
  1092. rval = qla24xx_sadb_update(bsg_job);
  1093. break;
  1094. case QL_VND_SC_APP_START:
  1095. rval = qla_edif_app_start(vha, bsg_job);
  1096. break;
  1097. case QL_VND_SC_APP_STOP:
  1098. rval = qla_edif_app_stop(vha, bsg_job);
  1099. break;
  1100. case QL_VND_SC_AUTH_OK:
  1101. rval = qla_edif_app_authok(vha, bsg_job);
  1102. break;
  1103. case QL_VND_SC_AUTH_FAIL:
  1104. rval = qla_edif_app_authfail(vha, bsg_job);
  1105. break;
  1106. case QL_VND_SC_GET_FCINFO:
  1107. rval = qla_edif_app_getfcinfo(vha, bsg_job);
  1108. break;
  1109. case QL_VND_SC_GET_STATS:
  1110. rval = qla_edif_app_getstats(vha, bsg_job);
  1111. break;
  1112. case QL_VND_SC_AEN_COMPLETE:
  1113. rval = qla_edif_ack(vha, bsg_job);
  1114. break;
  1115. case QL_VND_SC_READ_DBELL:
  1116. rval = qla_edif_dbell_bsg(vha, bsg_job);
  1117. done = false;
  1118. break;
  1119. default:
  1120. ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
  1121. __func__,
  1122. bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
  1123. rval = EXT_STATUS_INVALID_PARAM;
  1124. done = false;
  1125. break;
  1126. }
  1127. done:
  1128. if (done) {
  1129. ql_dbg(level, vha, 0x7009,
  1130. "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
  1131. bsg_job_done(bsg_job, bsg_reply->result,
  1132. bsg_reply->reply_payload_rcv_len);
  1133. }
  1134. return rval;
  1135. }
  1136. static struct edif_sa_ctl *
  1137. qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
  1138. int dir)
  1139. {
  1140. struct edif_sa_ctl *sa_ctl;
  1141. struct qla_sa_update_frame *sap;
  1142. int index = sa_frame->fast_sa_index;
  1143. unsigned long flags = 0;
  1144. sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
  1145. if (!sa_ctl) {
  1146. /* couldn't get space */
  1147. ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
  1148. "unable to allocate SA CTL\n");
  1149. return NULL;
  1150. }
  1151. /*
  1152. * need to allocate sa_index here and save it
  1153. * in both sa_ctl->index and sa_frame->fast_sa_index;
  1154. * If alloc fails then delete sa_ctl and return NULL
  1155. */
  1156. INIT_LIST_HEAD(&sa_ctl->next);
  1157. sap = &sa_ctl->sa_frame;
  1158. *sap = *sa_frame;
  1159. sa_ctl->index = index;
  1160. sa_ctl->fcport = fcport;
  1161. sa_ctl->flags = 0;
  1162. sa_ctl->state = 0L;
  1163. ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
  1164. "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
  1165. __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
  1166. spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
  1167. if (dir == SAU_FLG_TX)
  1168. list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
  1169. else
  1170. list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
  1171. spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
  1172. return sa_ctl;
  1173. }
  1174. void
  1175. qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
  1176. {
  1177. struct edif_sa_ctl *sa_ctl, *tsa_ctl;
  1178. unsigned long flags = 0;
  1179. spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
  1180. list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
  1181. next) {
  1182. list_del(&sa_ctl->next);
  1183. kfree(sa_ctl);
  1184. }
  1185. list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
  1186. next) {
  1187. list_del(&sa_ctl->next);
  1188. kfree(sa_ctl);
  1189. }
  1190. spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
  1191. }
  1192. struct edif_sa_ctl *
  1193. qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
  1194. {
  1195. struct edif_sa_ctl *sa_ctl, *tsa_ctl;
  1196. struct list_head *sa_list;
  1197. if (dir == SAU_FLG_TX)
  1198. sa_list = &fcport->edif.tx_sa_list;
  1199. else
  1200. sa_list = &fcport->edif.rx_sa_list;
  1201. list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
  1202. if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
  1203. sa_ctl->index == index)
  1204. return sa_ctl;
  1205. }
  1206. return NULL;
  1207. }
  1208. /* add the sa to the correct list */
  1209. static int
  1210. qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
  1211. struct qla_sa_update_frame *sa_frame)
  1212. {
  1213. struct edif_sa_ctl *sa_ctl = NULL;
  1214. int dir;
  1215. uint16_t sa_index;
  1216. dir = (sa_frame->flags & SAU_FLG_TX);
  1217. /* map the spi to an sa_index */
  1218. sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
  1219. if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
  1220. /* process rx delete */
  1221. ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
  1222. "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
  1223. __func__, fcport->loop_id, sa_frame->spi);
  1224. /* build and send the aen */
  1225. fcport->edif.rx_sa_set = 1;
  1226. fcport->edif.rx_sa_pending = 0;
  1227. qla_edb_eventcreate(fcport->vha,
  1228. VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
  1229. QL_VND_SA_STAT_SUCCESS,
  1230. QL_VND_RX_SA_KEY, fcport);
  1231. /* force a return of good bsg status; */
  1232. return RX_DELETE_NO_EDIF_SA_INDEX;
  1233. } else if (sa_index == INVALID_EDIF_SA_INDEX) {
  1234. ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
  1235. "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
  1236. __func__, sa_frame->spi, dir);
  1237. return INVALID_EDIF_SA_INDEX;
  1238. }
  1239. ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
  1240. "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
  1241. __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
  1242. /* This is a local copy of sa_frame. */
  1243. sa_frame->fast_sa_index = sa_index;
  1244. /* create the sa_ctl */
  1245. sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
  1246. if (!sa_ctl) {
  1247. ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
  1248. "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
  1249. __func__, sa_frame->spi, dir, sa_index);
  1250. return -1;
  1251. }
  1252. set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
  1253. if (dir == SAU_FLG_TX)
  1254. fcport->edif.tx_rekey_cnt++;
  1255. else
  1256. fcport->edif.rx_rekey_cnt++;
  1257. ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
  1258. "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
  1259. __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
  1260. fcport->edif.tx_rekey_cnt,
  1261. fcport->edif.rx_rekey_cnt, fcport->loop_id);
  1262. return 0;
  1263. }
  1264. #define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
  1265. #define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
  1266. #define EDIF_MSLEEP_INTERVAL 100
  1267. #define EDIF_RETRY_COUNT 50
  1268. int
  1269. qla24xx_sadb_update(struct bsg_job *bsg_job)
  1270. {
  1271. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  1272. struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
  1273. scsi_qla_host_t *vha = shost_priv(host);
  1274. fc_port_t *fcport = NULL;
  1275. srb_t *sp = NULL;
  1276. struct edif_list_entry *edif_entry = NULL;
  1277. int found = 0;
  1278. int rval = 0;
  1279. int result = 0, cnt;
  1280. struct qla_sa_update_frame sa_frame;
  1281. struct srb_iocb *iocb_cmd;
  1282. port_id_t portid;
  1283. ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
  1284. "%s entered, vha: 0x%p\n", __func__, vha);
  1285. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1286. bsg_job->request_payload.sg_cnt, &sa_frame,
  1287. sizeof(struct qla_sa_update_frame));
  1288. /* Check if host is online */
  1289. if (!vha->flags.online) {
  1290. ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
  1291. rval = -EIO;
  1292. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  1293. goto done;
  1294. }
  1295. if (DBELL_INACTIVE(vha)) {
  1296. ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
  1297. rval = -EIO;
  1298. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  1299. goto done;
  1300. }
  1301. /* silent unaligned access warning */
  1302. portid.b.domain = sa_frame.port_id.b.domain;
  1303. portid.b.area = sa_frame.port_id.b.area;
  1304. portid.b.al_pa = sa_frame.port_id.b.al_pa;
  1305. fcport = qla2x00_find_fcport_by_pid(vha, &portid);
  1306. if (fcport) {
  1307. found = 1;
  1308. if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
  1309. fcport->edif.tx_bytes = 0;
  1310. if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
  1311. fcport->edif.rx_bytes = 0;
  1312. }
  1313. if (!found) {
  1314. ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
  1315. sa_frame.port_id.b24);
  1316. rval = -EINVAL;
  1317. SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
  1318. goto done;
  1319. }
  1320. /* make sure the nport_handle is valid */
  1321. if (fcport->loop_id == FC_NO_LOOP_ID) {
  1322. ql_dbg(ql_dbg_edif, vha, 0x70e1,
  1323. "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
  1324. __func__, fcport->port_name, sa_frame.spi,
  1325. fcport->disc_state);
  1326. rval = -EINVAL;
  1327. SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
  1328. goto done;
  1329. }
  1330. /* allocate and queue an sa_ctl */
  1331. result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
  1332. /* failure of bsg */
  1333. if (result == INVALID_EDIF_SA_INDEX) {
  1334. ql_dbg(ql_dbg_edif, vha, 0x70e1,
  1335. "%s: %8phN, skipping update.\n",
  1336. __func__, fcport->port_name);
  1337. rval = -EINVAL;
  1338. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  1339. goto done;
  1340. /* rx delete failure */
  1341. } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
  1342. ql_dbg(ql_dbg_edif, vha, 0x70e1,
  1343. "%s: %8phN, skipping rx delete.\n",
  1344. __func__, fcport->port_name);
  1345. SET_DID_STATUS(bsg_reply->result, DID_OK);
  1346. goto done;
  1347. }
  1348. ql_dbg(ql_dbg_edif, vha, 0x70e1,
  1349. "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
  1350. __func__, fcport->port_name, sa_frame.fast_sa_index,
  1351. sa_frame.flags);
  1352. /* looking for rx index and delete */
  1353. if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
  1354. (sa_frame.flags & SAU_FLG_INV)) {
  1355. uint16_t nport_handle = fcport->loop_id;
  1356. uint16_t sa_index = sa_frame.fast_sa_index;
  1357. /*
  1358. * make sure we have an existing rx key, otherwise just process
  1359. * this as a straight delete just like TX
  1360. * This is NOT a normal case, it indicates an error recovery or key cleanup
  1361. * by the ipsec code above us.
  1362. */
  1363. edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
  1364. if (!edif_entry) {
  1365. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1366. "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
  1367. __func__, fcport->loop_id, sa_index);
  1368. goto force_rx_delete;
  1369. }
  1370. /*
  1371. * if we have a forced delete for rx, remove the sa_index from the edif list
  1372. * and proceed with normal delete. The rx delay timer should not be running
  1373. */
  1374. if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
  1375. qla_edif_list_delete_sa_index(fcport, edif_entry);
  1376. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1377. "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
  1378. __func__, fcport->loop_id, sa_index);
  1379. kfree(edif_entry);
  1380. goto force_rx_delete;
  1381. }
  1382. /*
  1383. * delayed rx delete
  1384. *
  1385. * if delete_sa_index is not invalid then there is already
  1386. * a delayed index in progress, return bsg bad status
  1387. */
  1388. if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
  1389. struct edif_sa_ctl *sa_ctl;
  1390. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1391. "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
  1392. __func__, edif_entry->handle, edif_entry->delete_sa_index);
  1393. /* free up the sa_ctl that was allocated with the sa_index */
  1394. sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
  1395. (sa_frame.flags & SAU_FLG_TX));
  1396. if (sa_ctl) {
  1397. ql_dbg(ql_dbg_edif, vha, 0x3063,
  1398. "%s: freeing sa_ctl for index %d\n",
  1399. __func__, sa_ctl->index);
  1400. qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
  1401. }
  1402. /* release the sa_index */
  1403. ql_dbg(ql_dbg_edif, vha, 0x3063,
  1404. "%s: freeing sa_index %d, nph: 0x%x\n",
  1405. __func__, sa_index, nport_handle);
  1406. qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
  1407. rval = -EINVAL;
  1408. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  1409. goto done;
  1410. }
  1411. fcport->edif.rekey_cnt++;
  1412. /* configure and start the rx delay timer */
  1413. edif_entry->fcport = fcport;
  1414. edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
  1415. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1416. "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
  1417. __func__, edif_entry, sa_index, nport_handle);
  1418. /*
  1419. * Start the timer when we queue the delayed rx delete.
  1420. * This is an activity timer that goes off if we have not
  1421. * received packets with the new sa_index
  1422. */
  1423. add_timer(&edif_entry->timer);
  1424. /*
  1425. * sa_delete for rx key with an active rx key including this one
  1426. * add the delete rx sa index to the hash so we can look for it
  1427. * in the rsp queue. Do this after making any changes to the
  1428. * edif_entry as part of the rx delete.
  1429. */
  1430. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1431. "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
  1432. __func__, sa_index, nport_handle, bsg_job);
  1433. edif_entry->delete_sa_index = sa_index;
  1434. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1435. bsg_reply->result = DID_OK << 16;
  1436. goto done;
  1437. /*
  1438. * rx index and update
  1439. * add the index to the list and continue with normal update
  1440. */
  1441. } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
  1442. ((sa_frame.flags & SAU_FLG_INV) == 0)) {
  1443. /* sa_update for rx key */
  1444. uint32_t nport_handle = fcport->loop_id;
  1445. uint16_t sa_index = sa_frame.fast_sa_index;
  1446. int result;
  1447. /*
  1448. * add the update rx sa index to the hash so we can look for it
  1449. * in the rsp queue and continue normally
  1450. */
  1451. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1452. "%s: adding update sa_index %d, lid 0x%x to edif_list\n",
  1453. __func__, sa_index, nport_handle);
  1454. result = qla_edif_list_add_sa_update_index(fcport, sa_index,
  1455. nport_handle);
  1456. if (result) {
  1457. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1458. "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
  1459. __func__, sa_index, nport_handle);
  1460. }
  1461. }
  1462. if (sa_frame.flags & SAU_FLG_GMAC_MODE)
  1463. fcport->edif.aes_gmac = 1;
  1464. else
  1465. fcport->edif.aes_gmac = 0;
  1466. force_rx_delete:
  1467. /*
  1468. * sa_update for both rx and tx keys, sa_delete for tx key
  1469. * immediately process the request
  1470. */
  1471. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  1472. if (!sp) {
  1473. rval = -ENOMEM;
  1474. SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
  1475. goto done;
  1476. }
  1477. sp->type = SRB_SA_UPDATE;
  1478. sp->name = "bsg_sa_update";
  1479. sp->u.bsg_job = bsg_job;
  1480. /* sp->free = qla2x00_bsg_sp_free; */
  1481. sp->free = qla2x00_rel_sp;
  1482. sp->done = qla2x00_bsg_job_done;
  1483. iocb_cmd = &sp->u.iocb_cmd;
  1484. iocb_cmd->u.sa_update.sa_frame = sa_frame;
  1485. cnt = 0;
  1486. retry:
  1487. rval = qla2x00_start_sp(sp);
  1488. switch (rval) {
  1489. case QLA_SUCCESS:
  1490. break;
  1491. case EAGAIN:
  1492. msleep(EDIF_MSLEEP_INTERVAL);
  1493. cnt++;
  1494. if (cnt < EDIF_RETRY_COUNT)
  1495. goto retry;
  1496. fallthrough;
  1497. default:
  1498. ql_log(ql_dbg_edif, vha, 0x70e3,
  1499. "%s qla2x00_start_sp failed=%d.\n",
  1500. __func__, rval);
  1501. qla2x00_rel_sp(sp);
  1502. rval = -EIO;
  1503. SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
  1504. goto done;
  1505. }
  1506. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1507. "%s: %s sent, hdl=%x, portid=%06x.\n",
  1508. __func__, sp->name, sp->handle, fcport->d_id.b24);
  1509. fcport->edif.rekey_cnt++;
  1510. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1511. SET_DID_STATUS(bsg_reply->result, DID_OK);
  1512. return 0;
  1513. /*
  1514. * send back error status
  1515. */
  1516. done:
  1517. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1518. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1519. "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
  1520. __func__, bsg_reply->result, bsg_job);
  1521. bsg_job_done(bsg_job, bsg_reply->result,
  1522. bsg_reply->reply_payload_rcv_len);
  1523. return 0;
  1524. }
  1525. static void
  1526. qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
  1527. {
  1528. node->ntype = N_UNDEF;
  1529. kfree(node);
  1530. }
  1531. /**
  1532. * qla_enode_init - initialize enode structs & lock
  1533. * @vha: host adapter pointer
  1534. *
  1535. * should only be called when driver attaching
  1536. */
  1537. void
  1538. qla_enode_init(scsi_qla_host_t *vha)
  1539. {
  1540. struct qla_hw_data *ha = vha->hw;
  1541. char name[32];
  1542. if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
  1543. /* list still active - error */
  1544. ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
  1545. __func__);
  1546. return;
  1547. }
  1548. /* initialize lock which protects pur_core & init list */
  1549. spin_lock_init(&vha->pur_cinfo.pur_lock);
  1550. INIT_LIST_HEAD(&vha->pur_cinfo.head);
  1551. snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
  1552. ha->pdev->device);
  1553. }
  1554. /**
  1555. * qla_enode_stop - stop and clear and enode data
  1556. * @vha: host adapter pointer
  1557. *
  1558. * called when app notified it is exiting
  1559. */
  1560. void
  1561. qla_enode_stop(scsi_qla_host_t *vha)
  1562. {
  1563. unsigned long flags;
  1564. struct enode *node, *q;
  1565. if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
  1566. /* doorbell list not enabled */
  1567. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1568. "%s enode not active\n", __func__);
  1569. return;
  1570. }
  1571. /* grab lock so list doesn't move */
  1572. spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
  1573. vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
  1574. /* hopefully this is a null list at this point */
  1575. list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
  1576. ql_dbg(ql_dbg_edif, vha, 0x910f,
  1577. "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
  1578. node->dinfo.nodecnt);
  1579. list_del_init(&node->list);
  1580. qla_enode_free(vha, node);
  1581. }
  1582. spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
  1583. }
  1584. static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid)
  1585. {
  1586. unsigned long flags;
  1587. struct enode *e, *tmp;
  1588. struct purexevent *purex;
  1589. LIST_HEAD(enode_list);
  1590. if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
  1591. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1592. "%s enode not active\n", __func__);
  1593. return;
  1594. }
  1595. spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
  1596. list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) {
  1597. purex = &e->u.purexinfo;
  1598. if (purex->pur_info.pur_sid.b24 == portid.b24) {
  1599. ql_dbg(ql_dbg_edif, vha, 0x911d,
  1600. "%s free ELS sid=%06x. xchg %x, nb=%xh\n",
  1601. __func__, portid.b24,
  1602. purex->pur_info.pur_rx_xchg_address,
  1603. purex->pur_info.pur_bytes_rcvd);
  1604. list_del_init(&e->list);
  1605. list_add_tail(&e->list, &enode_list);
  1606. }
  1607. }
  1608. spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
  1609. list_for_each_entry_safe(e, tmp, &enode_list, list) {
  1610. list_del_init(&e->list);
  1611. qla_enode_free(vha, e);
  1612. }
  1613. }
  1614. /*
  1615. * allocate enode struct and populate buffer
  1616. * returns: enode pointer with buffers
  1617. * NULL on error
  1618. */
  1619. static struct enode *
  1620. qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
  1621. {
  1622. struct enode *node;
  1623. struct purexevent *purex;
  1624. node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
  1625. if (!node)
  1626. return NULL;
  1627. purex = &node->u.purexinfo;
  1628. purex->msgp = (u8 *)(node + 1);
  1629. purex->msgp_len = ELS_MAX_PAYLOAD;
  1630. node->ntype = ntype;
  1631. INIT_LIST_HEAD(&node->list);
  1632. return node;
  1633. }
  1634. static void
  1635. qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
  1636. {
  1637. unsigned long flags;
  1638. ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
  1639. "%s add enode for type=%x, cnt=%x\n",
  1640. __func__, ptr->ntype, ptr->dinfo.nodecnt);
  1641. spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
  1642. list_add_tail(&ptr->list, &vha->pur_cinfo.head);
  1643. spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
  1644. return;
  1645. }
  1646. static struct enode *
  1647. qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
  1648. {
  1649. struct enode *node_rtn = NULL;
  1650. struct enode *list_node, *q;
  1651. unsigned long flags;
  1652. uint32_t sid;
  1653. struct purexevent *purex;
  1654. /* secure the list from moving under us */
  1655. spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
  1656. list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) {
  1657. /* node type determines what p1 and p2 are */
  1658. purex = &list_node->u.purexinfo;
  1659. sid = p1;
  1660. if (purex->pur_info.pur_sid.b24 == sid) {
  1661. /* found it and its complete */
  1662. node_rtn = list_node;
  1663. list_del(&list_node->list);
  1664. break;
  1665. }
  1666. }
  1667. spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
  1668. return node_rtn;
  1669. }
  1670. /**
  1671. * qla_pur_get_pending - read/return authentication message sent
  1672. * from remote port
  1673. * @vha: host adapter pointer
  1674. * @fcport: session pointer
  1675. * @bsg_job: user request where the message is copy to.
  1676. */
  1677. static int
  1678. qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
  1679. struct bsg_job *bsg_job)
  1680. {
  1681. struct enode *ptr;
  1682. struct purexevent *purex;
  1683. struct qla_bsg_auth_els_reply *rpl =
  1684. (struct qla_bsg_auth_els_reply *)bsg_job->reply;
  1685. bsg_job->reply_len = sizeof(*rpl);
  1686. ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
  1687. if (!ptr) {
  1688. ql_dbg(ql_dbg_edif, vha, 0x9111,
  1689. "%s no enode data found for %8phN sid=%06x\n",
  1690. __func__, fcport->port_name, fcport->d_id.b24);
  1691. SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
  1692. return -EIO;
  1693. }
  1694. /*
  1695. * enode is now off the linked list and is ours to deal with
  1696. */
  1697. purex = &ptr->u.purexinfo;
  1698. /* Copy info back to caller */
  1699. rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
  1700. SET_DID_STATUS(rpl->r.result, DID_OK);
  1701. rpl->r.reply_payload_rcv_len =
  1702. sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
  1703. bsg_job->reply_payload.sg_cnt, purex->msgp,
  1704. purex->pur_info.pur_bytes_rcvd, 0);
  1705. /* data copy / passback completed - destroy enode */
  1706. qla_enode_free(vha, ptr);
  1707. return 0;
  1708. }
  1709. /* it is assume qpair lock is held */
  1710. static int
  1711. qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
  1712. struct qla_els_pt_arg *a)
  1713. {
  1714. struct els_entry_24xx *els_iocb;
  1715. els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
  1716. if (!els_iocb) {
  1717. ql_log(ql_log_warn, vha, 0x700c,
  1718. "qla2x00_alloc_iocbs failed.\n");
  1719. return QLA_FUNCTION_FAILED;
  1720. }
  1721. qla_els_pt_iocb(vha, els_iocb, a);
  1722. ql_dbg(ql_dbg_edif, vha, 0x0183,
  1723. "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n",
  1724. a->ox_id, a->sid.b24, a->did.b24);
  1725. ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
  1726. vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
  1727. /* flush iocb to mem before notifying hw doorbell */
  1728. wmb();
  1729. qla2x00_start_iocbs(vha, qp->req);
  1730. return 0;
  1731. }
  1732. void
  1733. qla_edb_init(scsi_qla_host_t *vha)
  1734. {
  1735. if (DBELL_ACTIVE(vha)) {
  1736. /* list already init'd - error */
  1737. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1738. "edif db already initialized, cannot reinit\n");
  1739. return;
  1740. }
  1741. /* initialize lock which protects doorbell & init list */
  1742. spin_lock_init(&vha->e_dbell.db_lock);
  1743. INIT_LIST_HEAD(&vha->e_dbell.head);
  1744. }
  1745. static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
  1746. {
  1747. unsigned long flags;
  1748. struct edb_node *e, *tmp;
  1749. port_id_t sid;
  1750. LIST_HEAD(edb_list);
  1751. if (DBELL_INACTIVE(vha)) {
  1752. /* doorbell list not enabled */
  1753. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1754. "%s doorbell not enabled\n", __func__);
  1755. return;
  1756. }
  1757. /* grab lock so list doesn't move */
  1758. spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
  1759. list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) {
  1760. switch (e->ntype) {
  1761. case VND_CMD_AUTH_STATE_NEEDED:
  1762. case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
  1763. sid = e->u.plogi_did;
  1764. break;
  1765. case VND_CMD_AUTH_STATE_ELS_RCVD:
  1766. sid = e->u.els_sid;
  1767. break;
  1768. case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
  1769. /* app wants to see this */
  1770. continue;
  1771. default:
  1772. ql_log(ql_log_warn, vha, 0x09102,
  1773. "%s unknown node type: %x\n", __func__, e->ntype);
  1774. sid.b24 = 0;
  1775. break;
  1776. }
  1777. if (sid.b24 == portid.b24) {
  1778. ql_dbg(ql_dbg_edif, vha, 0x910f,
  1779. "%s free doorbell event : node type = %x %p\n",
  1780. __func__, e->ntype, e);
  1781. list_del_init(&e->list);
  1782. list_add_tail(&e->list, &edb_list);
  1783. }
  1784. }
  1785. spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
  1786. list_for_each_entry_safe(e, tmp, &edb_list, list)
  1787. qla_edb_node_free(vha, e);
  1788. }
  1789. /* function called when app is stopping */
  1790. void
  1791. qla_edb_stop(scsi_qla_host_t *vha)
  1792. {
  1793. unsigned long flags;
  1794. struct edb_node *node, *q;
  1795. if (DBELL_INACTIVE(vha)) {
  1796. /* doorbell list not enabled */
  1797. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1798. "%s doorbell not enabled\n", __func__);
  1799. return;
  1800. }
  1801. /* grab lock so list doesn't move */
  1802. spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
  1803. vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
  1804. /* hopefully this is a null list at this point */
  1805. list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
  1806. ql_dbg(ql_dbg_edif, vha, 0x910f,
  1807. "%s freeing edb_node type=%x\n",
  1808. __func__, node->ntype);
  1809. qla_edb_node_free(vha, node);
  1810. }
  1811. spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
  1812. qla_edif_dbell_bsg_done(vha);
  1813. }
  1814. static struct edb_node *
  1815. qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
  1816. {
  1817. struct edb_node *node;
  1818. node = kzalloc(sizeof(*node), GFP_ATOMIC);
  1819. if (!node) {
  1820. /* couldn't get space */
  1821. ql_dbg(ql_dbg_edif, vha, 0x9100,
  1822. "edb node unable to be allocated\n");
  1823. return NULL;
  1824. }
  1825. node->ntype = ntype;
  1826. INIT_LIST_HEAD(&node->list);
  1827. return node;
  1828. }
  1829. /* adds a already allocated enode to the linked list */
  1830. static bool
  1831. qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
  1832. {
  1833. unsigned long flags;
  1834. if (DBELL_INACTIVE(vha)) {
  1835. /* doorbell list not enabled */
  1836. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1837. "%s doorbell not enabled\n", __func__);
  1838. return false;
  1839. }
  1840. spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
  1841. list_add_tail(&ptr->list, &vha->e_dbell.head);
  1842. spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
  1843. return true;
  1844. }
  1845. /* adds event to doorbell list */
  1846. void
  1847. qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
  1848. uint32_t data, uint32_t data2, fc_port_t *sfcport)
  1849. {
  1850. struct edb_node *edbnode;
  1851. fc_port_t *fcport = sfcport;
  1852. port_id_t id;
  1853. if (!vha->hw->flags.edif_enabled) {
  1854. /* edif not enabled */
  1855. return;
  1856. }
  1857. if (DBELL_INACTIVE(vha)) {
  1858. if (fcport)
  1859. fcport->edif.auth_state = dbtype;
  1860. /* doorbell list not enabled */
  1861. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1862. "%s doorbell not enabled (type=%d\n", __func__, dbtype);
  1863. return;
  1864. }
  1865. edbnode = qla_edb_node_alloc(vha, dbtype);
  1866. if (!edbnode) {
  1867. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1868. "%s unable to alloc db node\n", __func__);
  1869. return;
  1870. }
  1871. if (!fcport) {
  1872. id.b.domain = (data >> 16) & 0xff;
  1873. id.b.area = (data >> 8) & 0xff;
  1874. id.b.al_pa = data & 0xff;
  1875. ql_dbg(ql_dbg_edif, vha, 0x09222,
  1876. "%s: Arrived s_id: %06x\n", __func__,
  1877. id.b24);
  1878. fcport = qla2x00_find_fcport_by_pid(vha, &id);
  1879. if (!fcport) {
  1880. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1881. "%s can't find fcport for sid= 0x%x - ignoring\n",
  1882. __func__, id.b24);
  1883. kfree(edbnode);
  1884. return;
  1885. }
  1886. }
  1887. /* populate the edb node */
  1888. switch (dbtype) {
  1889. case VND_CMD_AUTH_STATE_NEEDED:
  1890. case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
  1891. edbnode->u.plogi_did.b24 = fcport->d_id.b24;
  1892. break;
  1893. case VND_CMD_AUTH_STATE_ELS_RCVD:
  1894. edbnode->u.els_sid.b24 = fcport->d_id.b24;
  1895. break;
  1896. case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
  1897. edbnode->u.sa_aen.port_id = fcport->d_id;
  1898. edbnode->u.sa_aen.status = data;
  1899. edbnode->u.sa_aen.key_type = data2;
  1900. edbnode->u.sa_aen.version = EDIF_VERSION1;
  1901. break;
  1902. default:
  1903. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1904. "%s unknown type: %x\n", __func__, dbtype);
  1905. kfree(edbnode);
  1906. edbnode = NULL;
  1907. break;
  1908. }
  1909. if (edbnode) {
  1910. if (!qla_edb_node_add(vha, edbnode)) {
  1911. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1912. "%s unable to add dbnode\n", __func__);
  1913. kfree(edbnode);
  1914. return;
  1915. }
  1916. ql_dbg(ql_dbg_edif, vha, 0x09102,
  1917. "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
  1918. qla_edif_dbell_bsg_done(vha);
  1919. if (fcport)
  1920. fcport->edif.auth_state = dbtype;
  1921. }
  1922. }
  1923. void
  1924. qla_edif_timer(scsi_qla_host_t *vha)
  1925. {
  1926. struct qla_hw_data *ha = vha->hw;
  1927. if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
  1928. if (DBELL_INACTIVE(vha) &&
  1929. ha->edif_post_stop_cnt_down) {
  1930. ha->edif_post_stop_cnt_down--;
  1931. /*
  1932. * turn off auto 'Plogi Acc + secure=1' feature
  1933. * Set Add FW option[3]
  1934. * BIT_15, if.
  1935. */
  1936. if (ha->edif_post_stop_cnt_down == 0) {
  1937. ql_dbg(ql_dbg_async, vha, 0x911d,
  1938. "%s chip reset to turn off PLOGI ACC + secure\n",
  1939. __func__);
  1940. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1941. }
  1942. } else {
  1943. ha->edif_post_stop_cnt_down = 60;
  1944. }
  1945. }
  1946. if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire))
  1947. qla_edif_dbell_bsg_done(vha);
  1948. }
  1949. static void qla_noop_sp_done(srb_t *sp, int res)
  1950. {
  1951. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  1952. /* ref: INIT */
  1953. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  1954. }
  1955. /*
  1956. * Called from work queue
  1957. * build and send the sa_update iocb to delete an rx sa_index
  1958. */
  1959. int
  1960. qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
  1961. {
  1962. srb_t *sp;
  1963. fc_port_t *fcport = NULL;
  1964. struct srb_iocb *iocb_cmd = NULL;
  1965. int rval = QLA_SUCCESS;
  1966. struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
  1967. uint16_t nport_handle = e->u.sa_update.nport_handle;
  1968. ql_dbg(ql_dbg_edif, vha, 0x70e6,
  1969. "%s: starting, sa_ctl: %p\n", __func__, sa_ctl);
  1970. if (!sa_ctl) {
  1971. ql_dbg(ql_dbg_edif, vha, 0x70e6,
  1972. "sa_ctl allocation failed\n");
  1973. rval = -ENOMEM;
  1974. return rval;
  1975. }
  1976. fcport = sa_ctl->fcport;
  1977. /* Alloc SRB structure */
  1978. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  1979. if (!sp) {
  1980. ql_dbg(ql_dbg_edif, vha, 0x70e6,
  1981. "SRB allocation failed\n");
  1982. rval = -ENOMEM;
  1983. goto done;
  1984. }
  1985. fcport->flags |= FCF_ASYNC_SENT;
  1986. iocb_cmd = &sp->u.iocb_cmd;
  1987. iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
  1988. ql_dbg(ql_dbg_edif, vha, 0x3073,
  1989. "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
  1990. fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
  1991. /*
  1992. * if this is a sadb cleanup delete, mark it so the isr can
  1993. * take the correct action
  1994. */
  1995. if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
  1996. /* mark this srb as a cleanup delete */
  1997. sp->flags |= SRB_EDIF_CLEANUP_DELETE;
  1998. ql_dbg(ql_dbg_edif, vha, 0x70e6,
  1999. "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
  2000. }
  2001. sp->type = SRB_SA_REPLACE;
  2002. sp->name = "SA_REPLACE";
  2003. sp->fcport = fcport;
  2004. sp->free = qla2x00_rel_sp;
  2005. sp->done = qla_noop_sp_done;
  2006. rval = qla2x00_start_sp(sp);
  2007. if (rval != QLA_SUCCESS) {
  2008. goto done_free_sp;
  2009. }
  2010. return rval;
  2011. done_free_sp:
  2012. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  2013. fcport->flags &= ~FCF_ASYNC_SENT;
  2014. done:
  2015. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  2016. return rval;
  2017. }
  2018. void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
  2019. {
  2020. int itr = 0;
  2021. struct scsi_qla_host *vha = sp->vha;
  2022. struct qla_sa_update_frame *sa_frame =
  2023. &sp->u.iocb_cmd.u.sa_update.sa_frame;
  2024. u8 flags = 0;
  2025. switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
  2026. case 0:
  2027. ql_dbg(ql_dbg_edif, vha, 0x911d,
  2028. "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
  2029. __func__, vha, sa_frame->fast_sa_index);
  2030. break;
  2031. case 1:
  2032. ql_dbg(ql_dbg_edif, vha, 0x911d,
  2033. "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
  2034. __func__, vha, sa_frame->fast_sa_index);
  2035. flags |= SA_FLAG_INVALIDATE;
  2036. break;
  2037. case 2:
  2038. ql_dbg(ql_dbg_edif, vha, 0x911d,
  2039. "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
  2040. __func__, vha, sa_frame->fast_sa_index);
  2041. flags |= SA_FLAG_TX;
  2042. break;
  2043. case 3:
  2044. ql_dbg(ql_dbg_edif, vha, 0x911d,
  2045. "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
  2046. __func__, vha, sa_frame->fast_sa_index);
  2047. flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
  2048. break;
  2049. }
  2050. sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
  2051. sa_update_iocb->entry_count = 1;
  2052. sa_update_iocb->sys_define = 0;
  2053. sa_update_iocb->entry_status = 0;
  2054. sa_update_iocb->handle = sp->handle;
  2055. sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
  2056. sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
  2057. sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
  2058. sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
  2059. sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
  2060. sa_update_iocb->flags = flags;
  2061. sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
  2062. sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
  2063. sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
  2064. sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
  2065. if (sp->fcport->edif.aes_gmac)
  2066. sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
  2067. if (sa_frame->flags & SAU_FLG_KEY256) {
  2068. sa_update_iocb->sa_control |= SA_CNTL_KEY256;
  2069. for (itr = 0; itr < 32; itr++)
  2070. sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
  2071. } else {
  2072. sa_update_iocb->sa_control |= SA_CNTL_KEY128;
  2073. for (itr = 0; itr < 16; itr++)
  2074. sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
  2075. }
  2076. ql_dbg(ql_dbg_edif, vha, 0x921d,
  2077. "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
  2078. __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
  2079. sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
  2080. sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
  2081. sp->fcport->edif.aes_gmac);
  2082. if (sa_frame->flags & SAU_FLG_TX)
  2083. sp->fcport->edif.tx_sa_pending = 1;
  2084. else
  2085. sp->fcport->edif.rx_sa_pending = 1;
  2086. sp->fcport->vha->qla_stats.control_requests++;
  2087. }
  2088. void
  2089. qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
  2090. {
  2091. struct scsi_qla_host *vha = sp->vha;
  2092. struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
  2093. struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
  2094. uint16_t nport_handle = sp->fcport->loop_id;
  2095. sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
  2096. sa_update_iocb->entry_count = 1;
  2097. sa_update_iocb->sys_define = 0;
  2098. sa_update_iocb->entry_status = 0;
  2099. sa_update_iocb->handle = sp->handle;
  2100. sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
  2101. sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
  2102. sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
  2103. sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
  2104. sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
  2105. /* Invalidate the index. salt, spi, control & key are ignore */
  2106. sa_update_iocb->flags = SA_FLAG_INVALIDATE;
  2107. sa_update_iocb->salt = 0;
  2108. sa_update_iocb->spi = 0;
  2109. sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
  2110. sa_update_iocb->sa_control = 0;
  2111. ql_dbg(ql_dbg_edif, vha, 0x921d,
  2112. "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
  2113. __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
  2114. sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
  2115. sa_update_iocb->sa_index, sp->handle);
  2116. sp->fcport->vha->qla_stats.control_requests++;
  2117. }
  2118. void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
  2119. {
  2120. struct purex_entry_24xx *p = *pkt;
  2121. struct enode *ptr;
  2122. int sid;
  2123. u16 totlen;
  2124. struct purexevent *purex;
  2125. struct scsi_qla_host *host = NULL;
  2126. int rc;
  2127. struct fc_port *fcport;
  2128. struct qla_els_pt_arg a;
  2129. be_id_t beid;
  2130. memset(&a, 0, sizeof(a));
  2131. a.els_opcode = ELS_AUTH_ELS;
  2132. a.nport_handle = p->nport_handle;
  2133. a.rx_xchg_address = p->rx_xchg_addr;
  2134. a.did.b.domain = p->s_id[2];
  2135. a.did.b.area = p->s_id[1];
  2136. a.did.b.al_pa = p->s_id[0];
  2137. a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
  2138. a.tx_addr = vha->hw->elsrej.cdma;
  2139. a.vp_idx = vha->vp_idx;
  2140. a.control_flags = EPD_ELS_RJT;
  2141. a.ox_id = le16_to_cpu(p->ox_id);
  2142. sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
  2143. totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
  2144. if (le16_to_cpu(p->status_flags) & 0x8000) {
  2145. totlen = le16_to_cpu(p->trunc_frame_size);
  2146. qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
  2147. __qla_consume_iocb(vha, pkt, rsp);
  2148. return;
  2149. }
  2150. if (totlen > ELS_MAX_PAYLOAD) {
  2151. ql_dbg(ql_dbg_edif, vha, 0x0910d,
  2152. "%s WARNING: verbose ELS frame received (totlen=%x)\n",
  2153. __func__, totlen);
  2154. qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
  2155. __qla_consume_iocb(vha, pkt, rsp);
  2156. return;
  2157. }
  2158. if (!vha->hw->flags.edif_enabled) {
  2159. /* edif support not enabled */
  2160. ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
  2161. __func__);
  2162. qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
  2163. __qla_consume_iocb(vha, pkt, rsp);
  2164. return;
  2165. }
  2166. ptr = qla_enode_alloc(vha, N_PUREX);
  2167. if (!ptr) {
  2168. ql_dbg(ql_dbg_edif, vha, 0x09109,
  2169. "WARNING: enode alloc failed for sid=%x\n",
  2170. sid);
  2171. qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
  2172. __qla_consume_iocb(vha, pkt, rsp);
  2173. return;
  2174. }
  2175. purex = &ptr->u.purexinfo;
  2176. purex->pur_info.pur_sid = a.did;
  2177. purex->pur_info.pur_bytes_rcvd = totlen;
  2178. purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
  2179. purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
  2180. purex->pur_info.pur_did.b.domain = p->d_id[2];
  2181. purex->pur_info.pur_did.b.area = p->d_id[1];
  2182. purex->pur_info.pur_did.b.al_pa = p->d_id[0];
  2183. purex->pur_info.vp_idx = p->vp_idx;
  2184. a.sid = purex->pur_info.pur_did;
  2185. rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
  2186. purex->msgp_len);
  2187. if (rc) {
  2188. qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
  2189. qla_enode_free(vha, ptr);
  2190. return;
  2191. }
  2192. beid.al_pa = purex->pur_info.pur_did.b.al_pa;
  2193. beid.area = purex->pur_info.pur_did.b.area;
  2194. beid.domain = purex->pur_info.pur_did.b.domain;
  2195. host = qla_find_host_by_d_id(vha, beid);
  2196. if (!host) {
  2197. ql_log(ql_log_fatal, vha, 0x508b,
  2198. "%s Drop ELS due to unable to find host %06x\n",
  2199. __func__, purex->pur_info.pur_did.b24);
  2200. qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
  2201. qla_enode_free(vha, ptr);
  2202. return;
  2203. }
  2204. fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
  2205. if (DBELL_INACTIVE(vha)) {
  2206. ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
  2207. __func__, host->e_dbell.db_flags,
  2208. fcport ? fcport->d_id.b24 : 0);
  2209. qla_els_reject_iocb(host, (*rsp)->qpair, &a);
  2210. qla_enode_free(host, ptr);
  2211. return;
  2212. }
  2213. if (fcport && EDIF_SESSION_DOWN(fcport)) {
  2214. ql_dbg(ql_dbg_edif, host, 0x13b6,
  2215. "%s terminate exchange. Send logo to 0x%x\n",
  2216. __func__, a.did.b24);
  2217. a.tx_byte_count = a.tx_len = 0;
  2218. a.tx_addr = 0;
  2219. a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */
  2220. qla_els_reject_iocb(host, (*rsp)->qpair, &a);
  2221. qla_enode_free(host, ptr);
  2222. /* send logo to let remote port knows to tear down session */
  2223. fcport->send_els_logo = 1;
  2224. qlt_schedule_sess_for_deletion(fcport);
  2225. return;
  2226. }
  2227. /* add the local enode to the list */
  2228. qla_enode_add(host, ptr);
  2229. ql_dbg(ql_dbg_edif, host, 0x0910c,
  2230. "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
  2231. __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
  2232. purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address);
  2233. qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
  2234. }
  2235. static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
  2236. {
  2237. struct scsi_qla_host *vha = fcport->vha;
  2238. struct qla_hw_data *ha = vha->hw;
  2239. void *sa_id_map;
  2240. unsigned long flags = 0;
  2241. u16 sa_index;
  2242. ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
  2243. "%s: entry\n", __func__);
  2244. if (dir)
  2245. sa_id_map = ha->edif_tx_sa_id_map;
  2246. else
  2247. sa_id_map = ha->edif_rx_sa_id_map;
  2248. spin_lock_irqsave(&ha->sadb_fp_lock, flags);
  2249. sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
  2250. if (sa_index >= EDIF_NUM_SA_INDEX) {
  2251. spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
  2252. return INVALID_EDIF_SA_INDEX;
  2253. }
  2254. set_bit(sa_index, sa_id_map);
  2255. spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
  2256. if (dir)
  2257. sa_index += EDIF_TX_SA_INDEX_BASE;
  2258. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2259. "%s: index retrieved from free pool %d\n", __func__, sa_index);
  2260. return sa_index;
  2261. }
  2262. /* find an sadb entry for an nport_handle */
  2263. static struct edif_sa_index_entry *
  2264. qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
  2265. struct list_head *sa_list)
  2266. {
  2267. struct edif_sa_index_entry *entry;
  2268. struct edif_sa_index_entry *tentry;
  2269. struct list_head *indx_list = sa_list;
  2270. list_for_each_entry_safe(entry, tentry, indx_list, next) {
  2271. if (entry->handle == nport_handle)
  2272. return entry;
  2273. }
  2274. return NULL;
  2275. }
  2276. /* remove an sa_index from the nport_handle and return it to the free pool */
  2277. static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
  2278. uint16_t sa_index)
  2279. {
  2280. struct edif_sa_index_entry *entry;
  2281. struct list_head *sa_list;
  2282. int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
  2283. int slot = 0;
  2284. int free_slot_count = 0;
  2285. scsi_qla_host_t *vha = fcport->vha;
  2286. struct qla_hw_data *ha = vha->hw;
  2287. unsigned long flags = 0;
  2288. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2289. "%s: entry\n", __func__);
  2290. if (dir)
  2291. sa_list = &ha->sadb_tx_index_list;
  2292. else
  2293. sa_list = &ha->sadb_rx_index_list;
  2294. entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
  2295. if (!entry) {
  2296. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2297. "%s: no entry found for nport_handle 0x%x\n",
  2298. __func__, nport_handle);
  2299. return -1;
  2300. }
  2301. spin_lock_irqsave(&ha->sadb_lock, flags);
  2302. /*
  2303. * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
  2304. * the other is use at re-key time.
  2305. */
  2306. for (slot = 0; slot < 2; slot++) {
  2307. if (entry->sa_pair[slot].sa_index == sa_index) {
  2308. entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
  2309. entry->sa_pair[slot].spi = 0;
  2310. free_slot_count++;
  2311. qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
  2312. } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
  2313. free_slot_count++;
  2314. }
  2315. }
  2316. if (free_slot_count == 2) {
  2317. list_del(&entry->next);
  2318. kfree(entry);
  2319. }
  2320. spin_unlock_irqrestore(&ha->sadb_lock, flags);
  2321. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2322. "%s: sa_index %d removed, free_slot_count: %d\n",
  2323. __func__, sa_index, free_slot_count);
  2324. return 0;
  2325. }
  2326. void
  2327. qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
  2328. struct sa_update_28xx *pkt)
  2329. {
  2330. const char *func = "SA_UPDATE_RESPONSE_IOCB";
  2331. srb_t *sp;
  2332. struct edif_sa_ctl *sa_ctl;
  2333. int old_sa_deleted = 1;
  2334. uint16_t nport_handle;
  2335. struct scsi_qla_host *vha;
  2336. sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
  2337. if (!sp) {
  2338. ql_dbg(ql_dbg_edif, v, 0x3063,
  2339. "%s: no sp found for pkt\n", __func__);
  2340. return;
  2341. }
  2342. /* use sp->vha due to npiv */
  2343. vha = sp->vha;
  2344. switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
  2345. case 0:
  2346. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2347. "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
  2348. __func__, vha, pkt->sa_index);
  2349. break;
  2350. case 1:
  2351. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2352. "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
  2353. __func__, vha, pkt->sa_index);
  2354. break;
  2355. case 2:
  2356. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2357. "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
  2358. __func__, vha, pkt->sa_index);
  2359. break;
  2360. case 3:
  2361. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2362. "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
  2363. __func__, vha, pkt->sa_index);
  2364. break;
  2365. }
  2366. /*
  2367. * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
  2368. * to be correct during cleanup sa_update iocbs.
  2369. */
  2370. nport_handle = sp->fcport->loop_id;
  2371. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2372. "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
  2373. __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
  2374. nport_handle, pkt->sa_index, pkt->flags, sp->handle);
  2375. /* if rx delete, remove the timer */
  2376. if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) {
  2377. struct edif_list_entry *edif_entry;
  2378. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  2379. edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
  2380. if (edif_entry) {
  2381. ql_dbg(ql_dbg_edif, vha, 0x5033,
  2382. "%s: removing edif_entry %p, new sa_index: 0x%x\n",
  2383. __func__, edif_entry, pkt->sa_index);
  2384. qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
  2385. del_timer(&edif_entry->timer);
  2386. ql_dbg(ql_dbg_edif, vha, 0x5033,
  2387. "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
  2388. __func__, edif_entry, pkt->sa_index);
  2389. kfree(edif_entry);
  2390. }
  2391. }
  2392. /*
  2393. * if this is a delete for either tx or rx, make sure it succeeded.
  2394. * The new_sa_info field should be 0xffff on success
  2395. */
  2396. if (pkt->flags & SA_FLAG_INVALIDATE)
  2397. old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
  2398. /* Process update and delete the same way */
  2399. /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
  2400. if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
  2401. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  2402. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2403. "%s: nph 0x%x, sa_index %d removed from fw\n",
  2404. __func__, sp->fcport->loop_id, pkt->sa_index);
  2405. } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
  2406. old_sa_deleted) {
  2407. /*
  2408. * Note: Wa are only keeping track of latest SA,
  2409. * so we know when we can start enableing encryption per I/O.
  2410. * If all SA's get deleted, let FW reject the IOCB.
  2411. * TODO: edif: don't set enabled here I think
  2412. * TODO: edif: prli complete is where it should be set
  2413. */
  2414. ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
  2415. "SA(%x)updated for s_id %02x%02x%02x\n",
  2416. pkt->new_sa_info,
  2417. pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
  2418. sp->fcport->edif.enable = 1;
  2419. if (pkt->flags & SA_FLAG_TX) {
  2420. sp->fcport->edif.tx_sa_set = 1;
  2421. sp->fcport->edif.tx_sa_pending = 0;
  2422. qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
  2423. QL_VND_SA_STAT_SUCCESS,
  2424. QL_VND_TX_SA_KEY, sp->fcport);
  2425. } else {
  2426. sp->fcport->edif.rx_sa_set = 1;
  2427. sp->fcport->edif.rx_sa_pending = 0;
  2428. qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
  2429. QL_VND_SA_STAT_SUCCESS,
  2430. QL_VND_RX_SA_KEY, sp->fcport);
  2431. }
  2432. } else {
  2433. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2434. "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
  2435. __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
  2436. pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
  2437. if (pkt->flags & SA_FLAG_TX)
  2438. qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
  2439. (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
  2440. QL_VND_TX_SA_KEY, sp->fcport);
  2441. else
  2442. qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
  2443. (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
  2444. QL_VND_RX_SA_KEY, sp->fcport);
  2445. }
  2446. /* for delete, release sa_ctl, sa_index */
  2447. if (pkt->flags & SA_FLAG_INVALIDATE) {
  2448. /* release the sa_ctl */
  2449. sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
  2450. le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
  2451. if (sa_ctl &&
  2452. qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
  2453. (pkt->flags & SA_FLAG_TX)) != NULL) {
  2454. ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
  2455. "%s: freeing sa_ctl for index %d\n",
  2456. __func__, sa_ctl->index);
  2457. qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
  2458. } else {
  2459. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2460. "%s: sa_ctl NOT freed, sa_ctl: %p\n",
  2461. __func__, sa_ctl);
  2462. }
  2463. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2464. "%s: freeing sa_index %d, nph: 0x%x\n",
  2465. __func__, le16_to_cpu(pkt->sa_index), nport_handle);
  2466. qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
  2467. le16_to_cpu(pkt->sa_index));
  2468. /*
  2469. * check for a failed sa_update and remove
  2470. * the sadb entry.
  2471. */
  2472. } else if (pkt->u.comp_sts) {
  2473. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2474. "%s: freeing sa_index %d, nph: 0x%x\n",
  2475. __func__, pkt->sa_index, nport_handle);
  2476. qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
  2477. le16_to_cpu(pkt->sa_index));
  2478. switch (le16_to_cpu(pkt->u.comp_sts)) {
  2479. case CS_PORT_EDIF_UNAVAIL:
  2480. case CS_PORT_EDIF_LOGOUT:
  2481. qlt_schedule_sess_for_deletion(sp->fcport);
  2482. break;
  2483. default:
  2484. break;
  2485. }
  2486. }
  2487. sp->done(sp, 0);
  2488. }
  2489. /**
  2490. * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
  2491. * @sp: command to send to the ISP
  2492. *
  2493. * Return: non-zero if a failure occurred, else zero.
  2494. */
  2495. int
  2496. qla28xx_start_scsi_edif(srb_t *sp)
  2497. {
  2498. int nseg;
  2499. unsigned long flags;
  2500. struct scsi_cmnd *cmd;
  2501. uint32_t *clr_ptr;
  2502. uint32_t index, i;
  2503. uint32_t handle;
  2504. uint16_t cnt;
  2505. int16_t req_cnt;
  2506. uint16_t tot_dsds;
  2507. __be32 *fcp_dl;
  2508. uint8_t additional_cdb_len;
  2509. struct ct6_dsd *ctx;
  2510. struct scsi_qla_host *vha = sp->vha;
  2511. struct qla_hw_data *ha = vha->hw;
  2512. struct cmd_type_6 *cmd_pkt;
  2513. struct dsd64 *cur_dsd;
  2514. uint8_t avail_dsds = 0;
  2515. struct scatterlist *sg;
  2516. struct req_que *req = sp->qpair->req;
  2517. spinlock_t *lock = sp->qpair->qp_lock_ptr;
  2518. /* Setup device pointers. */
  2519. cmd = GET_CMD_SP(sp);
  2520. /* So we know we haven't pci_map'ed anything yet */
  2521. tot_dsds = 0;
  2522. /* Send marker if required */
  2523. if (vha->marker_needed != 0) {
  2524. if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
  2525. QLA_SUCCESS) {
  2526. ql_log(ql_log_warn, vha, 0x300c,
  2527. "qla2x00_marker failed for cmd=%p.\n", cmd);
  2528. return QLA_FUNCTION_FAILED;
  2529. }
  2530. vha->marker_needed = 0;
  2531. }
  2532. /* Acquire ring specific lock */
  2533. spin_lock_irqsave(lock, flags);
  2534. /* Check for room in outstanding command list. */
  2535. handle = req->current_outstanding_cmd;
  2536. for (index = 1; index < req->num_outstanding_cmds; index++) {
  2537. handle++;
  2538. if (handle == req->num_outstanding_cmds)
  2539. handle = 1;
  2540. if (!req->outstanding_cmds[handle])
  2541. break;
  2542. }
  2543. if (index == req->num_outstanding_cmds)
  2544. goto queuing_error;
  2545. /* Map the sg table so we have an accurate count of sg entries needed */
  2546. if (scsi_sg_count(cmd)) {
  2547. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  2548. scsi_sg_count(cmd), cmd->sc_data_direction);
  2549. if (unlikely(!nseg))
  2550. goto queuing_error;
  2551. } else {
  2552. nseg = 0;
  2553. }
  2554. tot_dsds = nseg;
  2555. req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
  2556. sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
  2557. sp->iores.exch_cnt = 1;
  2558. sp->iores.iocb_cnt = req_cnt;
  2559. if (qla_get_fw_resources(sp->qpair, &sp->iores))
  2560. goto queuing_error;
  2561. if (req->cnt < (req_cnt + 2)) {
  2562. cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
  2563. rd_reg_dword(req->req_q_out);
  2564. if (req->ring_index < cnt)
  2565. req->cnt = cnt - req->ring_index;
  2566. else
  2567. req->cnt = req->length -
  2568. (req->ring_index - cnt);
  2569. if (req->cnt < (req_cnt + 2))
  2570. goto queuing_error;
  2571. }
  2572. ctx = sp->u.scmd.ct6_ctx =
  2573. mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
  2574. if (!ctx) {
  2575. ql_log(ql_log_fatal, vha, 0x3010,
  2576. "Failed to allocate ctx for cmd=%p.\n", cmd);
  2577. goto queuing_error;
  2578. }
  2579. memset(ctx, 0, sizeof(struct ct6_dsd));
  2580. ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
  2581. GFP_ATOMIC, &ctx->fcp_cmnd_dma);
  2582. if (!ctx->fcp_cmnd) {
  2583. ql_log(ql_log_fatal, vha, 0x3011,
  2584. "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
  2585. goto queuing_error;
  2586. }
  2587. /* Initialize the DSD list and dma handle */
  2588. INIT_LIST_HEAD(&ctx->dsd_list);
  2589. ctx->dsd_use_cnt = 0;
  2590. if (cmd->cmd_len > 16) {
  2591. additional_cdb_len = cmd->cmd_len - 16;
  2592. if ((cmd->cmd_len % 4) != 0) {
  2593. /*
  2594. * SCSI command bigger than 16 bytes must be
  2595. * multiple of 4
  2596. */
  2597. ql_log(ql_log_warn, vha, 0x3012,
  2598. "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
  2599. cmd->cmd_len, cmd);
  2600. goto queuing_error_fcp_cmnd;
  2601. }
  2602. ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
  2603. } else {
  2604. additional_cdb_len = 0;
  2605. ctx->fcp_cmnd_len = 12 + 16 + 4;
  2606. }
  2607. cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
  2608. cmd_pkt->handle = make_handle(req->id, handle);
  2609. /*
  2610. * Zero out remaining portion of packet.
  2611. * tagged queuing modifier -- default is TSK_SIMPLE (0).
  2612. */
  2613. clr_ptr = (uint32_t *)cmd_pkt + 2;
  2614. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  2615. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  2616. /* No data transfer */
  2617. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  2618. cmd_pkt->byte_count = cpu_to_le32(0);
  2619. goto no_dsds;
  2620. }
  2621. /* Set transfer direction */
  2622. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  2623. cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
  2624. vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  2625. vha->qla_stats.output_requests++;
  2626. sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
  2627. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  2628. cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
  2629. vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  2630. vha->qla_stats.input_requests++;
  2631. sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
  2632. }
  2633. cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
  2634. cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
  2635. /* One DSD is available in the Command Type 6 IOCB */
  2636. avail_dsds = 1;
  2637. cur_dsd = &cmd_pkt->fcp_dsd;
  2638. /* Load data segments */
  2639. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  2640. dma_addr_t sle_dma;
  2641. cont_a64_entry_t *cont_pkt;
  2642. /* Allocate additional continuation packets? */
  2643. if (avail_dsds == 0) {
  2644. /*
  2645. * Five DSDs are available in the Continuation
  2646. * Type 1 IOCB.
  2647. */
  2648. cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
  2649. cur_dsd = cont_pkt->dsd;
  2650. avail_dsds = 5;
  2651. }
  2652. sle_dma = sg_dma_address(sg);
  2653. put_unaligned_le64(sle_dma, &cur_dsd->address);
  2654. cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
  2655. cur_dsd++;
  2656. avail_dsds--;
  2657. }
  2658. no_dsds:
  2659. /* Set NPORT-ID and LUN number*/
  2660. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  2661. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  2662. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  2663. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  2664. cmd_pkt->vp_index = sp->vha->vp_idx;
  2665. cmd_pkt->entry_type = COMMAND_TYPE_6;
  2666. /* Set total data segment count. */
  2667. cmd_pkt->entry_count = (uint8_t)req_cnt;
  2668. int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
  2669. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  2670. /* build FCP_CMND IU */
  2671. int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
  2672. ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
  2673. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  2674. ctx->fcp_cmnd->additional_cdb_len |= 1;
  2675. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  2676. ctx->fcp_cmnd->additional_cdb_len |= 2;
  2677. /* Populate the FCP_PRIO. */
  2678. if (ha->flags.fcp_prio_enabled)
  2679. ctx->fcp_cmnd->task_attribute |=
  2680. sp->fcport->fcp_prio << 3;
  2681. memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
  2682. fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
  2683. additional_cdb_len);
  2684. *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
  2685. cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
  2686. put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
  2687. sp->flags |= SRB_FCP_CMND_DMA_VALID;
  2688. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  2689. /* Set total data segment count. */
  2690. cmd_pkt->entry_count = (uint8_t)req_cnt;
  2691. cmd_pkt->entry_status = 0;
  2692. /* Build command packet. */
  2693. req->current_outstanding_cmd = handle;
  2694. req->outstanding_cmds[handle] = sp;
  2695. sp->handle = handle;
  2696. cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  2697. req->cnt -= req_cnt;
  2698. /* Adjust ring index. */
  2699. wmb();
  2700. req->ring_index++;
  2701. if (req->ring_index == req->length) {
  2702. req->ring_index = 0;
  2703. req->ring_ptr = req->ring;
  2704. } else {
  2705. req->ring_ptr++;
  2706. }
  2707. sp->qpair->cmd_cnt++;
  2708. /* Set chip new ring index. */
  2709. wrt_reg_dword(req->req_q_in, req->ring_index);
  2710. spin_unlock_irqrestore(lock, flags);
  2711. return QLA_SUCCESS;
  2712. queuing_error_fcp_cmnd:
  2713. dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
  2714. queuing_error:
  2715. if (tot_dsds)
  2716. scsi_dma_unmap(cmd);
  2717. if (sp->u.scmd.ct6_ctx) {
  2718. mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
  2719. sp->u.scmd.ct6_ctx = NULL;
  2720. }
  2721. qla_put_fw_resources(sp->qpair, &sp->iores);
  2722. spin_unlock_irqrestore(lock, flags);
  2723. return QLA_FUNCTION_FAILED;
  2724. }
  2725. /**********************************************
  2726. * edif update/delete sa_index list functions *
  2727. **********************************************/
  2728. /* clear the edif_indx_list for this port */
  2729. void qla_edif_list_del(fc_port_t *fcport)
  2730. {
  2731. struct edif_list_entry *indx_lst;
  2732. struct edif_list_entry *tindx_lst;
  2733. struct list_head *indx_list = &fcport->edif.edif_indx_list;
  2734. unsigned long flags = 0;
  2735. spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
  2736. list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
  2737. list_del(&indx_lst->next);
  2738. kfree(indx_lst);
  2739. }
  2740. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  2741. }
  2742. /******************
  2743. * SADB functions *
  2744. ******************/
  2745. /* allocate/retrieve an sa_index for a given spi */
  2746. static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
  2747. struct qla_sa_update_frame *sa_frame)
  2748. {
  2749. struct edif_sa_index_entry *entry;
  2750. struct list_head *sa_list;
  2751. uint16_t sa_index;
  2752. int dir = sa_frame->flags & SAU_FLG_TX;
  2753. int slot = 0;
  2754. int free_slot = -1;
  2755. scsi_qla_host_t *vha = fcport->vha;
  2756. struct qla_hw_data *ha = vha->hw;
  2757. unsigned long flags = 0;
  2758. uint16_t nport_handle = fcport->loop_id;
  2759. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2760. "%s: entry fc_port: %p, nport_handle: 0x%x\n",
  2761. __func__, fcport, nport_handle);
  2762. if (dir)
  2763. sa_list = &ha->sadb_tx_index_list;
  2764. else
  2765. sa_list = &ha->sadb_rx_index_list;
  2766. entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
  2767. if (!entry) {
  2768. if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
  2769. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2770. "%s: rx delete request with no entry\n", __func__);
  2771. return RX_DELETE_NO_EDIF_SA_INDEX;
  2772. }
  2773. /* if there is no entry for this nport, add one */
  2774. entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
  2775. if (!entry)
  2776. return INVALID_EDIF_SA_INDEX;
  2777. sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
  2778. if (sa_index == INVALID_EDIF_SA_INDEX) {
  2779. kfree(entry);
  2780. return INVALID_EDIF_SA_INDEX;
  2781. }
  2782. INIT_LIST_HEAD(&entry->next);
  2783. entry->handle = nport_handle;
  2784. entry->fcport = fcport;
  2785. entry->sa_pair[0].spi = sa_frame->spi;
  2786. entry->sa_pair[0].sa_index = sa_index;
  2787. entry->sa_pair[1].spi = 0;
  2788. entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
  2789. spin_lock_irqsave(&ha->sadb_lock, flags);
  2790. list_add_tail(&entry->next, sa_list);
  2791. spin_unlock_irqrestore(&ha->sadb_lock, flags);
  2792. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2793. "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
  2794. __func__, nport_handle, sa_frame->spi, sa_index);
  2795. return sa_index;
  2796. }
  2797. spin_lock_irqsave(&ha->sadb_lock, flags);
  2798. /* see if we already have an entry for this spi */
  2799. for (slot = 0; slot < 2; slot++) {
  2800. if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
  2801. free_slot = slot;
  2802. } else {
  2803. if (entry->sa_pair[slot].spi == sa_frame->spi) {
  2804. spin_unlock_irqrestore(&ha->sadb_lock, flags);
  2805. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2806. "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
  2807. __func__, slot, entry->handle, sa_frame->spi,
  2808. entry->sa_pair[slot].sa_index);
  2809. return entry->sa_pair[slot].sa_index;
  2810. }
  2811. }
  2812. }
  2813. spin_unlock_irqrestore(&ha->sadb_lock, flags);
  2814. /* both slots are used */
  2815. if (free_slot == -1) {
  2816. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2817. "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
  2818. __func__, entry->handle, sa_frame->spi);
  2819. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2820. "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n",
  2821. __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
  2822. entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
  2823. return INVALID_EDIF_SA_INDEX;
  2824. }
  2825. /* there is at least one free slot, use it */
  2826. sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
  2827. if (sa_index == INVALID_EDIF_SA_INDEX) {
  2828. ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
  2829. "%s: empty freepool!!\n", __func__);
  2830. return INVALID_EDIF_SA_INDEX;
  2831. }
  2832. spin_lock_irqsave(&ha->sadb_lock, flags);
  2833. entry->sa_pair[free_slot].spi = sa_frame->spi;
  2834. entry->sa_pair[free_slot].sa_index = sa_index;
  2835. spin_unlock_irqrestore(&ha->sadb_lock, flags);
  2836. ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
  2837. "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
  2838. __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
  2839. return sa_index;
  2840. }
  2841. /* release any sadb entries -- only done at teardown */
  2842. void qla_edif_sadb_release(struct qla_hw_data *ha)
  2843. {
  2844. struct edif_sa_index_entry *entry, *tmp;
  2845. list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
  2846. list_del(&entry->next);
  2847. kfree(entry);
  2848. }
  2849. list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
  2850. list_del(&entry->next);
  2851. kfree(entry);
  2852. }
  2853. }
  2854. /**************************
  2855. * sadb freepool functions
  2856. **************************/
  2857. /* build the rx and tx sa_index free pools -- only done at fcport init */
  2858. int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
  2859. {
  2860. ha->edif_tx_sa_id_map =
  2861. kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
  2862. if (!ha->edif_tx_sa_id_map) {
  2863. ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
  2864. "Unable to allocate memory for sadb tx.\n");
  2865. return -ENOMEM;
  2866. }
  2867. ha->edif_rx_sa_id_map =
  2868. kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
  2869. if (!ha->edif_rx_sa_id_map) {
  2870. kfree(ha->edif_tx_sa_id_map);
  2871. ha->edif_tx_sa_id_map = NULL;
  2872. ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
  2873. "Unable to allocate memory for sadb rx.\n");
  2874. return -ENOMEM;
  2875. }
  2876. return 0;
  2877. }
  2878. /* release the free pool - only done during fcport teardown */
  2879. void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
  2880. {
  2881. kfree(ha->edif_tx_sa_id_map);
  2882. ha->edif_tx_sa_id_map = NULL;
  2883. kfree(ha->edif_rx_sa_id_map);
  2884. ha->edif_rx_sa_id_map = NULL;
  2885. }
  2886. static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
  2887. fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
  2888. {
  2889. struct edif_list_entry *edif_entry;
  2890. struct edif_sa_ctl *sa_ctl;
  2891. uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
  2892. unsigned long flags = 0;
  2893. uint16_t nport_handle = fcport->loop_id;
  2894. uint16_t cached_nport_handle;
  2895. spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
  2896. edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
  2897. if (!edif_entry) {
  2898. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  2899. return; /* no pending delete for this handle */
  2900. }
  2901. /*
  2902. * check for no pending delete for this index or iocb does not
  2903. * match rx sa_index
  2904. */
  2905. if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
  2906. edif_entry->update_sa_index != sa_index) {
  2907. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  2908. return;
  2909. }
  2910. /*
  2911. * wait until we have seen at least EDIF_DELAY_COUNT transfers before
  2912. * queueing RX delete
  2913. */
  2914. if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
  2915. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  2916. return;
  2917. }
  2918. ql_dbg(ql_dbg_edif, vha, 0x5033,
  2919. "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
  2920. __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
  2921. delete_sa_index = edif_entry->delete_sa_index;
  2922. edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
  2923. cached_nport_handle = edif_entry->handle;
  2924. spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
  2925. /* sanity check on the nport handle */
  2926. if (nport_handle != cached_nport_handle) {
  2927. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2928. "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
  2929. __func__, nport_handle, cached_nport_handle);
  2930. }
  2931. /* find the sa_ctl for the delete and schedule the delete */
  2932. sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
  2933. if (sa_ctl) {
  2934. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2935. "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
  2936. __func__, sa_ctl, sa_index);
  2937. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2938. "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
  2939. delete_sa_index,
  2940. edif_entry->update_sa_index, nport_handle, handle);
  2941. sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
  2942. set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
  2943. qla_post_sa_replace_work(fcport->vha, fcport,
  2944. nport_handle, sa_ctl);
  2945. } else {
  2946. ql_dbg(ql_dbg_edif, vha, 0x3063,
  2947. "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
  2948. __func__, delete_sa_index);
  2949. }
  2950. }
  2951. void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
  2952. srb_t *sp, struct sts_entry_24xx *sts24)
  2953. {
  2954. fc_port_t *fcport = sp->fcport;
  2955. /* sa_index used by this iocb */
  2956. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  2957. uint32_t handle;
  2958. handle = (uint32_t)LSW(sts24->handle);
  2959. /* find out if this status iosb is for a scsi read */
  2960. if (cmd->sc_data_direction != DMA_FROM_DEVICE)
  2961. return;
  2962. return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
  2963. le16_to_cpu(sts24->edif_sa_index));
  2964. }
  2965. void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
  2966. struct ctio7_from_24xx *pkt)
  2967. {
  2968. __chk_edif_rx_sa_delete_pending(vha, fcport,
  2969. pkt->handle, le16_to_cpu(pkt->edif_sa_index));
  2970. }
  2971. static void qla_parse_auth_els_ctl(struct srb *sp)
  2972. {
  2973. struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
  2974. struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
  2975. struct fc_bsg_request *request = bsg_job->request;
  2976. struct qla_bsg_auth_els_request *p =
  2977. (struct qla_bsg_auth_els_request *)bsg_job->request;
  2978. a->tx_len = a->tx_byte_count = sp->remap.req.len;
  2979. a->tx_addr = sp->remap.req.dma;
  2980. a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
  2981. a->rx_addr = sp->remap.rsp.dma;
  2982. if (p->e.sub_cmd == SEND_ELS_REPLY) {
  2983. a->control_flags = p->e.extra_control_flags << 13;
  2984. a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
  2985. if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
  2986. a->els_opcode = ELS_LS_ACC;
  2987. else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
  2988. a->els_opcode = ELS_LS_RJT;
  2989. }
  2990. a->did = sp->fcport->d_id;
  2991. a->els_opcode = request->rqst_data.h_els.command_code;
  2992. a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  2993. a->vp_idx = sp->vha->vp_idx;
  2994. }
  2995. int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
  2996. {
  2997. struct fc_bsg_request *bsg_request = bsg_job->request;
  2998. struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  2999. fc_port_t *fcport = NULL;
  3000. struct qla_hw_data *ha = vha->hw;
  3001. srb_t *sp;
  3002. int rval = (DID_ERROR << 16), cnt;
  3003. port_id_t d_id;
  3004. struct qla_bsg_auth_els_request *p =
  3005. (struct qla_bsg_auth_els_request *)bsg_job->request;
  3006. struct qla_bsg_auth_els_reply *rpl =
  3007. (struct qla_bsg_auth_els_reply *)bsg_job->reply;
  3008. rpl->version = EDIF_VERSION1;
  3009. d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
  3010. d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
  3011. d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
  3012. /* find matching d_id in fcport list */
  3013. fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
  3014. if (!fcport) {
  3015. ql_dbg(ql_dbg_edif, vha, 0x911a,
  3016. "%s fcport not find online portid=%06x.\n",
  3017. __func__, d_id.b24);
  3018. SET_DID_STATUS(bsg_reply->result, DID_ERROR);
  3019. return -EIO;
  3020. }
  3021. if (qla_bsg_check(vha, bsg_job, fcport))
  3022. return 0;
  3023. if (EDIF_SESS_DELETE(fcport)) {
  3024. ql_dbg(ql_dbg_edif, vha, 0x910d,
  3025. "%s ELS code %x, no loop id.\n", __func__,
  3026. bsg_request->rqst_data.r_els.els_code);
  3027. SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
  3028. return -ENXIO;
  3029. }
  3030. if (!vha->flags.online) {
  3031. ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
  3032. SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
  3033. rval = -EIO;
  3034. goto done;
  3035. }
  3036. /* pass through is supported only for ISP 4Gb or higher */
  3037. if (!IS_FWI2_CAPABLE(ha)) {
  3038. ql_dbg(ql_dbg_user, vha, 0x7001,
  3039. "ELS passthru not supported for ISP23xx based adapters.\n");
  3040. SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
  3041. rval = -EPERM;
  3042. goto done;
  3043. }
  3044. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  3045. if (!sp) {
  3046. ql_dbg(ql_dbg_user, vha, 0x7004,
  3047. "Failed get sp pid=%06x\n", fcport->d_id.b24);
  3048. rval = -ENOMEM;
  3049. SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
  3050. goto done;
  3051. }
  3052. sp->remap.req.len = bsg_job->request_payload.payload_len;
  3053. sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
  3054. GFP_KERNEL, &sp->remap.req.dma);
  3055. if (!sp->remap.req.buf) {
  3056. ql_dbg(ql_dbg_user, vha, 0x7005,
  3057. "Failed allocate request dma len=%x\n",
  3058. bsg_job->request_payload.payload_len);
  3059. rval = -ENOMEM;
  3060. SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
  3061. goto done_free_sp;
  3062. }
  3063. sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
  3064. sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
  3065. GFP_KERNEL, &sp->remap.rsp.dma);
  3066. if (!sp->remap.rsp.buf) {
  3067. ql_dbg(ql_dbg_user, vha, 0x7006,
  3068. "Failed allocate response dma len=%x\n",
  3069. bsg_job->reply_payload.payload_len);
  3070. rval = -ENOMEM;
  3071. SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
  3072. goto done_free_remap_req;
  3073. }
  3074. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  3075. bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
  3076. sp->remap.req.len);
  3077. sp->remap.remapped = true;
  3078. sp->type = SRB_ELS_CMD_HST_NOLOGIN;
  3079. sp->name = "SPCN_BSG_HST_NOLOGIN";
  3080. sp->u.bsg_cmd.bsg_job = bsg_job;
  3081. qla_parse_auth_els_ctl(sp);
  3082. sp->free = qla2x00_bsg_sp_free;
  3083. sp->done = qla2x00_bsg_job_done;
  3084. cnt = 0;
  3085. retry:
  3086. rval = qla2x00_start_sp(sp);
  3087. switch (rval) {
  3088. case QLA_SUCCESS:
  3089. ql_dbg(ql_dbg_edif, vha, 0x700a,
  3090. "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
  3091. __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
  3092. p->e.extra_rx_xchg_address, p->e.extra_control_flags,
  3093. sp->handle, sp->remap.req.len, bsg_job);
  3094. break;
  3095. case EAGAIN:
  3096. msleep(EDIF_MSLEEP_INTERVAL);
  3097. cnt++;
  3098. if (cnt < EDIF_RETRY_COUNT)
  3099. goto retry;
  3100. fallthrough;
  3101. default:
  3102. ql_log(ql_log_warn, vha, 0x700e,
  3103. "%s qla2x00_start_sp failed = %d\n", __func__, rval);
  3104. SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
  3105. rval = -EIO;
  3106. goto done_free_remap_rsp;
  3107. }
  3108. return rval;
  3109. done_free_remap_rsp:
  3110. dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
  3111. sp->remap.rsp.dma);
  3112. done_free_remap_req:
  3113. dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
  3114. sp->remap.req.dma);
  3115. done_free_sp:
  3116. qla2x00_rel_sp(sp);
  3117. done:
  3118. return rval;
  3119. }
  3120. void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
  3121. {
  3122. u16 cnt = 0;
  3123. if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
  3124. ql_dbg(ql_dbg_disc, vha, 0xf09c,
  3125. "%s: sess %8phN send port_offline event\n",
  3126. __func__, sess->port_name);
  3127. sess->edif.app_sess_online = 0;
  3128. sess->edif.sess_down_acked = 0;
  3129. qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
  3130. sess->d_id.b24, 0, sess);
  3131. qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
  3132. while (!READ_ONCE(sess->edif.sess_down_acked) &&
  3133. !test_bit(VPORT_DELETE, &vha->dpc_flags)) {
  3134. msleep(100);
  3135. cnt++;
  3136. if (cnt > 100)
  3137. break;
  3138. }
  3139. sess->edif.sess_down_acked = 0;
  3140. ql_dbg(ql_dbg_disc, vha, 0xf09c,
  3141. "%s: sess %8phN port_offline event completed\n",
  3142. __func__, sess->port_name);
  3143. }
  3144. }
  3145. void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport)
  3146. {
  3147. if (!(fcport->flags & FCF_FCSP_DEVICE))
  3148. return;
  3149. qla_edb_clear(vha, fcport->d_id);
  3150. qla_enode_clear(vha, fcport->d_id);
  3151. }