qedf_main.c 112 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic FCoE Offload Driver
  4. * Copyright (c) 2016-2018 Cavium Inc.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/device.h>
  11. #include <linux/highmem.h>
  12. #include <linux/crc32.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/list.h>
  15. #include <linux/kthread.h>
  16. #include <linux/phylink.h>
  17. #include <scsi/libfc.h>
  18. #include <scsi/scsi_host.h>
  19. #include <scsi/fc_frame.h>
  20. #include <linux/if_ether.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/cpu.h>
  23. #include "qedf.h"
  24. #include "qedf_dbg.h"
  25. #include <uapi/linux/pci_regs.h>
  26. const struct qed_fcoe_ops *qed_ops;
  27. static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  28. static void qedf_remove(struct pci_dev *pdev);
  29. static void qedf_shutdown(struct pci_dev *pdev);
  30. static void qedf_schedule_recovery_handler(void *dev);
  31. static void qedf_recovery_handler(struct work_struct *work);
  32. static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
  33. /*
  34. * Driver module parameters.
  35. */
  36. static unsigned int qedf_dev_loss_tmo = 60;
  37. module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
  38. MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
  39. "remote ports (default 60)");
  40. uint qedf_debug = QEDF_LOG_INFO;
  41. module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
  42. MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
  43. " mask");
  44. static uint qedf_fipvlan_retries = 60;
  45. module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
  46. MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
  47. "before giving up (default 60)");
  48. static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
  49. module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
  50. MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
  51. "(default 1002).");
  52. static int qedf_default_prio = -1;
  53. module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
  54. MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
  55. " traffic (value between 0 and 7, default 3).");
  56. uint qedf_dump_frames;
  57. module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
  58. MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
  59. "(default off)");
  60. static uint qedf_queue_depth;
  61. module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
  62. MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
  63. "by the qedf driver. Default is 0 (use OS default).");
  64. uint qedf_io_tracing;
  65. module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
  66. MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
  67. "into trace buffer. (default off).");
  68. static uint qedf_max_lun = MAX_FIBRE_LUNS;
  69. module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
  70. MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
  71. "supports. (default 0xffffffff)");
  72. uint qedf_link_down_tmo;
  73. module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
  74. MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
  75. "link is down by N seconds.");
  76. bool qedf_retry_delay;
  77. module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
  78. MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
  79. "delay handling (default off).");
  80. static bool qedf_dcbx_no_wait;
  81. module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
  82. MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
  83. "sending FIP VLAN requests on link up (Default: off).");
  84. static uint qedf_dp_module;
  85. module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
  86. MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
  87. "qed module during probe.");
  88. static uint qedf_dp_level = QED_LEVEL_NOTICE;
  89. module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
  90. MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
  91. "during probe (0-3: 0 more verbose).");
  92. static bool qedf_enable_recovery = true;
  93. module_param_named(enable_recovery, qedf_enable_recovery,
  94. bool, S_IRUGO | S_IWUSR);
  95. MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
  96. "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
  97. struct workqueue_struct *qedf_io_wq;
  98. static struct fcoe_percpu_s qedf_global;
  99. static DEFINE_SPINLOCK(qedf_global_lock);
  100. static struct kmem_cache *qedf_io_work_cache;
  101. void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
  102. {
  103. int vlan_id_tmp = 0;
  104. vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
  105. qedf->vlan_id = vlan_id_tmp;
  106. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  107. "Setting vlan_id=0x%04x prio=%d.\n",
  108. vlan_id_tmp, qedf->prio);
  109. }
  110. /* Returns true if we have a valid vlan, false otherwise */
  111. static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
  112. {
  113. while (qedf->fipvlan_retries--) {
  114. /* This is to catch if link goes down during fipvlan retries */
  115. if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
  116. QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
  117. return false;
  118. }
  119. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  120. QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
  121. return false;
  122. }
  123. if (qedf->vlan_id > 0) {
  124. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  125. "vlan = 0x%x already set, calling ctlr_link_up.\n",
  126. qedf->vlan_id);
  127. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
  128. fcoe_ctlr_link_up(&qedf->ctlr);
  129. return true;
  130. }
  131. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  132. "Retry %d.\n", qedf->fipvlan_retries);
  133. init_completion(&qedf->fipvlan_compl);
  134. qedf_fcoe_send_vlan_req(qedf);
  135. wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
  136. }
  137. return false;
  138. }
  139. static void qedf_handle_link_update(struct work_struct *work)
  140. {
  141. struct qedf_ctx *qedf =
  142. container_of(work, struct qedf_ctx, link_update.work);
  143. int rc;
  144. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
  145. atomic_read(&qedf->link_state));
  146. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
  147. rc = qedf_initiate_fipvlan_req(qedf);
  148. if (rc)
  149. return;
  150. if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
  151. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  152. "Link is down, resetting vlan_id.\n");
  153. qedf->vlan_id = 0;
  154. return;
  155. }
  156. /*
  157. * If we get here then we never received a repsonse to our
  158. * fip vlan request so set the vlan_id to the default and
  159. * tell FCoE that the link is up
  160. */
  161. QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
  162. "response, falling back to default VLAN %d.\n",
  163. qedf_fallback_vlan);
  164. qedf_set_vlan_id(qedf, qedf_fallback_vlan);
  165. /*
  166. * Zero out data_src_addr so we'll update it with the new
  167. * lport port_id
  168. */
  169. eth_zero_addr(qedf->data_src_addr);
  170. fcoe_ctlr_link_up(&qedf->ctlr);
  171. } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
  172. /*
  173. * If we hit here and link_down_tmo_valid is still 1 it means
  174. * that link_down_tmo timed out so set it to 0 to make sure any
  175. * other readers have accurate state.
  176. */
  177. atomic_set(&qedf->link_down_tmo_valid, 0);
  178. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  179. "Calling fcoe_ctlr_link_down().\n");
  180. fcoe_ctlr_link_down(&qedf->ctlr);
  181. if (qedf_wait_for_upload(qedf) == false)
  182. QEDF_ERR(&qedf->dbg_ctx,
  183. "Could not upload all sessions.\n");
  184. /* Reset the number of FIP VLAN retries */
  185. qedf->fipvlan_retries = qedf_fipvlan_retries;
  186. }
  187. }
  188. #define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1
  189. #define QEDF_FCOE_MAC_METHOD_FCF_MAP 2
  190. #define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3
  191. static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
  192. {
  193. u8 *granted_mac;
  194. struct fc_frame_header *fh = fc_frame_header_get(fp);
  195. u8 fc_map[3];
  196. int method = 0;
  197. /* Get granted MAC address from FIP FLOGI payload */
  198. granted_mac = fr_cb(fp)->granted_mac;
  199. /*
  200. * We set the source MAC for FCoE traffic based on the Granted MAC
  201. * address from the switch.
  202. *
  203. * If granted_mac is non-zero, we used that.
  204. * If the granted_mac is zeroed out, created the FCoE MAC based on
  205. * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
  206. * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
  207. * d_id of the FLOGI frame.
  208. */
  209. if (!is_zero_ether_addr(granted_mac)) {
  210. ether_addr_copy(qedf->data_src_addr, granted_mac);
  211. method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
  212. } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
  213. hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
  214. qedf->data_src_addr[0] = fc_map[0];
  215. qedf->data_src_addr[1] = fc_map[1];
  216. qedf->data_src_addr[2] = fc_map[2];
  217. qedf->data_src_addr[3] = fh->fh_d_id[0];
  218. qedf->data_src_addr[4] = fh->fh_d_id[1];
  219. qedf->data_src_addr[5] = fh->fh_d_id[2];
  220. method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
  221. } else {
  222. fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
  223. method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
  224. }
  225. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  226. "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
  227. }
  228. static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  229. void *arg)
  230. {
  231. struct fc_exch *exch = fc_seq_exch(seq);
  232. struct fc_lport *lport = exch->lp;
  233. struct qedf_ctx *qedf = lport_priv(lport);
  234. if (!qedf) {
  235. QEDF_ERR(NULL, "qedf is NULL.\n");
  236. return;
  237. }
  238. /*
  239. * If ERR_PTR is set then don't try to stat anything as it will cause
  240. * a crash when we access fp.
  241. */
  242. if (IS_ERR(fp)) {
  243. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  244. "fp has IS_ERR() set.\n");
  245. goto skip_stat;
  246. }
  247. /* Log stats for FLOGI reject */
  248. if (fc_frame_payload_op(fp) == ELS_LS_RJT)
  249. qedf->flogi_failed++;
  250. else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
  251. /* Set the source MAC we will use for FCoE traffic */
  252. qedf_set_data_src_addr(qedf, fp);
  253. qedf->flogi_pending = 0;
  254. }
  255. /* Complete flogi_compl so we can proceed to sending ADISCs */
  256. complete(&qedf->flogi_compl);
  257. skip_stat:
  258. /* Report response to libfc */
  259. fc_lport_flogi_resp(seq, fp, lport);
  260. }
  261. static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
  262. struct fc_frame *fp, unsigned int op,
  263. void (*resp)(struct fc_seq *,
  264. struct fc_frame *,
  265. void *),
  266. void *arg, u32 timeout)
  267. {
  268. struct qedf_ctx *qedf = lport_priv(lport);
  269. /*
  270. * Intercept FLOGI for statistic purposes. Note we use the resp
  271. * callback to tell if this is really a flogi.
  272. */
  273. if (resp == fc_lport_flogi_resp) {
  274. qedf->flogi_cnt++;
  275. if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
  276. schedule_delayed_work(&qedf->stag_work, 2);
  277. return NULL;
  278. }
  279. qedf->flogi_pending++;
  280. return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
  281. arg, timeout);
  282. }
  283. return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
  284. }
  285. int qedf_send_flogi(struct qedf_ctx *qedf)
  286. {
  287. struct fc_lport *lport;
  288. struct fc_frame *fp;
  289. lport = qedf->lport;
  290. if (!lport->tt.elsct_send) {
  291. QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
  292. return -EINVAL;
  293. }
  294. fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
  295. if (!fp) {
  296. QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
  297. return -ENOMEM;
  298. }
  299. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  300. "Sending FLOGI to reestablish session with switch.\n");
  301. lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
  302. ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
  303. init_completion(&qedf->flogi_compl);
  304. return 0;
  305. }
  306. /*
  307. * This function is called if link_down_tmo is in use. If we get a link up and
  308. * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
  309. * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
  310. */
  311. static void qedf_link_recovery(struct work_struct *work)
  312. {
  313. struct qedf_ctx *qedf =
  314. container_of(work, struct qedf_ctx, link_recovery.work);
  315. struct fc_lport *lport = qedf->lport;
  316. struct fc_rport_priv *rdata;
  317. bool rc;
  318. int retries = 30;
  319. int rval, i;
  320. struct list_head rdata_login_list;
  321. INIT_LIST_HEAD(&rdata_login_list);
  322. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  323. "Link down tmo did not expire.\n");
  324. /*
  325. * Essentially reset the fcoe_ctlr here without affecting the state
  326. * of the libfc structs.
  327. */
  328. qedf->ctlr.state = FIP_ST_LINK_WAIT;
  329. fcoe_ctlr_link_down(&qedf->ctlr);
  330. /*
  331. * Bring the link up before we send the fipvlan request so libfcoe
  332. * can select a new fcf in parallel
  333. */
  334. fcoe_ctlr_link_up(&qedf->ctlr);
  335. /* Since the link when down and up to verify which vlan we're on */
  336. qedf->fipvlan_retries = qedf_fipvlan_retries;
  337. rc = qedf_initiate_fipvlan_req(qedf);
  338. /* If getting the VLAN fails, set the VLAN to the fallback one */
  339. if (!rc)
  340. qedf_set_vlan_id(qedf, qedf_fallback_vlan);
  341. /*
  342. * We need to wait for an FCF to be selected due to the
  343. * fcoe_ctlr_link_up other the FLOGI will be rejected.
  344. */
  345. while (retries > 0) {
  346. if (qedf->ctlr.sel_fcf) {
  347. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  348. "FCF reselected, proceeding with FLOGI.\n");
  349. break;
  350. }
  351. msleep(500);
  352. retries--;
  353. }
  354. if (retries < 1) {
  355. QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
  356. "FCF selection.\n");
  357. return;
  358. }
  359. rval = qedf_send_flogi(qedf);
  360. if (rval)
  361. return;
  362. /* Wait for FLOGI completion before proceeding with sending ADISCs */
  363. i = wait_for_completion_timeout(&qedf->flogi_compl,
  364. qedf->lport->r_a_tov);
  365. if (i == 0) {
  366. QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
  367. return;
  368. }
  369. /*
  370. * Call lport->tt.rport_login which will cause libfc to send an
  371. * ADISC since the rport is in state ready.
  372. */
  373. mutex_lock(&lport->disc.disc_mutex);
  374. list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
  375. if (kref_get_unless_zero(&rdata->kref)) {
  376. fc_rport_login(rdata);
  377. kref_put(&rdata->kref, fc_rport_destroy);
  378. }
  379. }
  380. mutex_unlock(&lport->disc.disc_mutex);
  381. }
  382. static void qedf_update_link_speed(struct qedf_ctx *qedf,
  383. struct qed_link_output *link)
  384. {
  385. __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
  386. struct fc_lport *lport = qedf->lport;
  387. lport->link_speed = FC_PORTSPEED_UNKNOWN;
  388. lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
  389. /* Set fc_host link speed */
  390. switch (link->speed) {
  391. case 10000:
  392. lport->link_speed = FC_PORTSPEED_10GBIT;
  393. break;
  394. case 25000:
  395. lport->link_speed = FC_PORTSPEED_25GBIT;
  396. break;
  397. case 40000:
  398. lport->link_speed = FC_PORTSPEED_40GBIT;
  399. break;
  400. case 50000:
  401. lport->link_speed = FC_PORTSPEED_50GBIT;
  402. break;
  403. case 100000:
  404. lport->link_speed = FC_PORTSPEED_100GBIT;
  405. break;
  406. case 20000:
  407. lport->link_speed = FC_PORTSPEED_20GBIT;
  408. break;
  409. default:
  410. lport->link_speed = FC_PORTSPEED_UNKNOWN;
  411. break;
  412. }
  413. /*
  414. * Set supported link speed by querying the supported
  415. * capabilities of the link.
  416. */
  417. phylink_zero(sup_caps);
  418. phylink_set(sup_caps, 10000baseT_Full);
  419. phylink_set(sup_caps, 10000baseKX4_Full);
  420. phylink_set(sup_caps, 10000baseR_FEC);
  421. phylink_set(sup_caps, 10000baseCR_Full);
  422. phylink_set(sup_caps, 10000baseSR_Full);
  423. phylink_set(sup_caps, 10000baseLR_Full);
  424. phylink_set(sup_caps, 10000baseLRM_Full);
  425. phylink_set(sup_caps, 10000baseKR_Full);
  426. if (linkmode_intersects(link->supported_caps, sup_caps))
  427. lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
  428. phylink_zero(sup_caps);
  429. phylink_set(sup_caps, 25000baseKR_Full);
  430. phylink_set(sup_caps, 25000baseCR_Full);
  431. phylink_set(sup_caps, 25000baseSR_Full);
  432. if (linkmode_intersects(link->supported_caps, sup_caps))
  433. lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
  434. phylink_zero(sup_caps);
  435. phylink_set(sup_caps, 40000baseLR4_Full);
  436. phylink_set(sup_caps, 40000baseKR4_Full);
  437. phylink_set(sup_caps, 40000baseCR4_Full);
  438. phylink_set(sup_caps, 40000baseSR4_Full);
  439. if (linkmode_intersects(link->supported_caps, sup_caps))
  440. lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
  441. phylink_zero(sup_caps);
  442. phylink_set(sup_caps, 50000baseKR2_Full);
  443. phylink_set(sup_caps, 50000baseCR2_Full);
  444. phylink_set(sup_caps, 50000baseSR2_Full);
  445. if (linkmode_intersects(link->supported_caps, sup_caps))
  446. lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
  447. phylink_zero(sup_caps);
  448. phylink_set(sup_caps, 100000baseKR4_Full);
  449. phylink_set(sup_caps, 100000baseSR4_Full);
  450. phylink_set(sup_caps, 100000baseCR4_Full);
  451. phylink_set(sup_caps, 100000baseLR4_ER4_Full);
  452. if (linkmode_intersects(link->supported_caps, sup_caps))
  453. lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
  454. phylink_zero(sup_caps);
  455. phylink_set(sup_caps, 20000baseKR2_Full);
  456. if (linkmode_intersects(link->supported_caps, sup_caps))
  457. lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
  458. if (lport->host && lport->host->shost_data)
  459. fc_host_supported_speeds(lport->host) =
  460. lport->link_supported_speeds;
  461. }
  462. static void qedf_bw_update(void *dev)
  463. {
  464. struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
  465. struct qed_link_output link;
  466. /* Get the latest status of the link */
  467. qed_ops->common->get_link(qedf->cdev, &link);
  468. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  469. QEDF_ERR(&qedf->dbg_ctx,
  470. "Ignore link update, driver getting unload.\n");
  471. return;
  472. }
  473. if (link.link_up) {
  474. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
  475. qedf_update_link_speed(qedf, &link);
  476. else
  477. QEDF_ERR(&qedf->dbg_ctx,
  478. "Ignore bw update, link is down.\n");
  479. } else {
  480. QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
  481. }
  482. }
  483. static void qedf_link_update(void *dev, struct qed_link_output *link)
  484. {
  485. struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
  486. /*
  487. * Prevent race where we're removing the module and we get link update
  488. * for qed.
  489. */
  490. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  491. QEDF_ERR(&qedf->dbg_ctx,
  492. "Ignore link update, driver getting unload.\n");
  493. return;
  494. }
  495. if (link->link_up) {
  496. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
  497. QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
  498. "Ignoring link up event as link is already up.\n");
  499. return;
  500. }
  501. QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
  502. link->speed / 1000);
  503. /* Cancel any pending link down work */
  504. cancel_delayed_work(&qedf->link_update);
  505. atomic_set(&qedf->link_state, QEDF_LINK_UP);
  506. qedf_update_link_speed(qedf, link);
  507. if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
  508. qedf_dcbx_no_wait) {
  509. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  510. "DCBx done.\n");
  511. if (atomic_read(&qedf->link_down_tmo_valid) > 0)
  512. queue_delayed_work(qedf->link_update_wq,
  513. &qedf->link_recovery, 0);
  514. else
  515. queue_delayed_work(qedf->link_update_wq,
  516. &qedf->link_update, 0);
  517. atomic_set(&qedf->link_down_tmo_valid, 0);
  518. }
  519. } else {
  520. QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
  521. atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
  522. atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
  523. /*
  524. * Flag that we're waiting for the link to come back up before
  525. * informing the fcoe layer of the event.
  526. */
  527. if (qedf_link_down_tmo > 0) {
  528. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  529. "Starting link down tmo.\n");
  530. atomic_set(&qedf->link_down_tmo_valid, 1);
  531. }
  532. qedf->vlan_id = 0;
  533. qedf_update_link_speed(qedf, link);
  534. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  535. qedf_link_down_tmo * HZ);
  536. }
  537. }
  538. static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
  539. {
  540. struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
  541. u8 tmp_prio;
  542. QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
  543. "prio=%d.\n", get->operational.valid, get->operational.enabled,
  544. get->operational.app_prio.fcoe);
  545. if (get->operational.enabled && get->operational.valid) {
  546. /* If DCBX was already negotiated on link up then just exit */
  547. if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
  548. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  549. "DCBX already set on link up.\n");
  550. return;
  551. }
  552. atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
  553. /*
  554. * Set the 8021q priority in the following manner:
  555. *
  556. * 1. If a modparam is set use that
  557. * 2. If the value is not between 0..7 use the default
  558. * 3. Use the priority we get from the DCBX app tag
  559. */
  560. tmp_prio = get->operational.app_prio.fcoe;
  561. if (qedf_default_prio > -1)
  562. qedf->prio = qedf_default_prio;
  563. else if (tmp_prio > 7) {
  564. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  565. "FIP/FCoE prio %d out of range, setting to %d.\n",
  566. tmp_prio, QEDF_DEFAULT_PRIO);
  567. qedf->prio = QEDF_DEFAULT_PRIO;
  568. } else
  569. qedf->prio = tmp_prio;
  570. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
  571. !qedf_dcbx_no_wait) {
  572. if (atomic_read(&qedf->link_down_tmo_valid) > 0)
  573. queue_delayed_work(qedf->link_update_wq,
  574. &qedf->link_recovery, 0);
  575. else
  576. queue_delayed_work(qedf->link_update_wq,
  577. &qedf->link_update, 0);
  578. atomic_set(&qedf->link_down_tmo_valid, 0);
  579. }
  580. }
  581. }
  582. static u32 qedf_get_login_failures(void *cookie)
  583. {
  584. struct qedf_ctx *qedf;
  585. qedf = (struct qedf_ctx *)cookie;
  586. return qedf->flogi_failed;
  587. }
  588. static struct qed_fcoe_cb_ops qedf_cb_ops = {
  589. {
  590. .link_update = qedf_link_update,
  591. .bw_update = qedf_bw_update,
  592. .schedule_recovery_handler = qedf_schedule_recovery_handler,
  593. .dcbx_aen = qedf_dcbx_handler,
  594. .get_generic_tlv_data = qedf_get_generic_tlv_data,
  595. .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
  596. .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
  597. }
  598. };
  599. /*
  600. * Various transport templates.
  601. */
  602. static struct scsi_transport_template *qedf_fc_transport_template;
  603. static struct scsi_transport_template *qedf_fc_vport_transport_template;
  604. /*
  605. * SCSI EH handlers
  606. */
  607. static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
  608. {
  609. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  610. struct fc_lport *lport;
  611. struct qedf_ctx *qedf;
  612. struct qedf_ioreq *io_req;
  613. struct fc_rport_libfc_priv *rp = rport->dd_data;
  614. struct fc_rport_priv *rdata;
  615. struct qedf_rport *fcport = NULL;
  616. int rc = FAILED;
  617. int wait_count = 100;
  618. int refcount = 0;
  619. int rval;
  620. int got_ref = 0;
  621. lport = shost_priv(sc_cmd->device->host);
  622. qedf = (struct qedf_ctx *)lport_priv(lport);
  623. /* rport and tgt are allocated together, so tgt should be non-NULL */
  624. fcport = (struct qedf_rport *)&rp[1];
  625. rdata = fcport->rdata;
  626. if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
  627. QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
  628. rc = SUCCESS;
  629. goto out;
  630. }
  631. io_req = qedf_priv(sc_cmd)->io_req;
  632. if (!io_req) {
  633. QEDF_ERR(&qedf->dbg_ctx,
  634. "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
  635. sc_cmd, sc_cmd->cmnd[0],
  636. rdata->ids.port_id);
  637. rc = SUCCESS;
  638. goto drop_rdata_kref;
  639. }
  640. rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
  641. if (rval)
  642. got_ref = 1;
  643. /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
  644. if (!rval || io_req->sc_cmd != sc_cmd) {
  645. QEDF_ERR(&qedf->dbg_ctx,
  646. "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
  647. io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
  648. goto drop_rdata_kref;
  649. }
  650. if (fc_remote_port_chkready(rport)) {
  651. refcount = kref_read(&io_req->refcount);
  652. QEDF_ERR(&qedf->dbg_ctx,
  653. "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
  654. io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
  655. refcount, rdata->ids.port_id);
  656. goto drop_rdata_kref;
  657. }
  658. rc = fc_block_scsi_eh(sc_cmd);
  659. if (rc)
  660. goto drop_rdata_kref;
  661. if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
  662. QEDF_ERR(&qedf->dbg_ctx,
  663. "Connection uploading, xid=0x%x., port_id=%06x\n",
  664. io_req->xid, rdata->ids.port_id);
  665. while (io_req->sc_cmd && (wait_count != 0)) {
  666. msleep(100);
  667. wait_count--;
  668. }
  669. if (wait_count) {
  670. QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
  671. rc = SUCCESS;
  672. } else {
  673. QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
  674. rc = FAILED;
  675. }
  676. goto drop_rdata_kref;
  677. }
  678. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  679. QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
  680. goto drop_rdata_kref;
  681. }
  682. QEDF_ERR(&qedf->dbg_ctx,
  683. "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
  684. io_req, sc_cmd, io_req->xid, io_req->fp_idx,
  685. rdata->ids.port_id);
  686. if (qedf->stop_io_on_error) {
  687. qedf_stop_all_io(qedf);
  688. rc = SUCCESS;
  689. goto drop_rdata_kref;
  690. }
  691. init_completion(&io_req->abts_done);
  692. rval = qedf_initiate_abts(io_req, true);
  693. if (rval) {
  694. QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
  695. /*
  696. * If we fail to queue the ABTS then return this command to
  697. * the SCSI layer as it will own and free the xid
  698. */
  699. rc = SUCCESS;
  700. qedf_scsi_done(qedf, io_req, DID_ERROR);
  701. goto drop_rdata_kref;
  702. }
  703. wait_for_completion(&io_req->abts_done);
  704. if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
  705. io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
  706. io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
  707. /*
  708. * If we get a reponse to the abort this is success from
  709. * the perspective that all references to the command have
  710. * been removed from the driver and firmware
  711. */
  712. rc = SUCCESS;
  713. } else {
  714. /* If the abort and cleanup failed then return a failure */
  715. rc = FAILED;
  716. }
  717. if (rc == SUCCESS)
  718. QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
  719. io_req->xid);
  720. else
  721. QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
  722. io_req->xid);
  723. drop_rdata_kref:
  724. kref_put(&rdata->kref, fc_rport_destroy);
  725. out:
  726. if (got_ref)
  727. kref_put(&io_req->refcount, qedf_release_cmd);
  728. return rc;
  729. }
  730. static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
  731. {
  732. QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
  733. sc_cmd->device->host->host_no, sc_cmd->device->id,
  734. sc_cmd->device->lun);
  735. return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
  736. }
  737. static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
  738. {
  739. QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
  740. sc_cmd->device->host->host_no, sc_cmd->device->id,
  741. sc_cmd->device->lun);
  742. return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
  743. }
  744. bool qedf_wait_for_upload(struct qedf_ctx *qedf)
  745. {
  746. struct qedf_rport *fcport;
  747. int wait_cnt = 120;
  748. while (wait_cnt--) {
  749. if (atomic_read(&qedf->num_offloads))
  750. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  751. "Waiting for all uploads to complete num_offloads = 0x%x.\n",
  752. atomic_read(&qedf->num_offloads));
  753. else
  754. return true;
  755. msleep(500);
  756. }
  757. rcu_read_lock();
  758. list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
  759. if (test_bit(QEDF_RPORT_SESSION_READY,
  760. &fcport->flags)) {
  761. if (fcport->rdata)
  762. QEDF_ERR(&qedf->dbg_ctx,
  763. "Waiting for fcport %p portid=%06x.\n",
  764. fcport, fcport->rdata->ids.port_id);
  765. } else {
  766. QEDF_ERR(&qedf->dbg_ctx,
  767. "Waiting for fcport %p.\n", fcport);
  768. }
  769. }
  770. rcu_read_unlock();
  771. return false;
  772. }
  773. /* Performs soft reset of qedf_ctx by simulating a link down/up */
  774. void qedf_ctx_soft_reset(struct fc_lport *lport)
  775. {
  776. struct qedf_ctx *qedf;
  777. struct qed_link_output if_link;
  778. if (lport->vport) {
  779. printk_ratelimited("Cannot issue host reset on NPIV port.\n");
  780. return;
  781. }
  782. qedf = lport_priv(lport);
  783. qedf->flogi_pending = 0;
  784. /* For host reset, essentially do a soft link up/down */
  785. atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
  786. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  787. "Queuing link down work.\n");
  788. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  789. 0);
  790. if (qedf_wait_for_upload(qedf) == false) {
  791. QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
  792. WARN_ON(atomic_read(&qedf->num_offloads));
  793. }
  794. /* Before setting link up query physical link state */
  795. qed_ops->common->get_link(qedf->cdev, &if_link);
  796. /* Bail if the physical link is not up */
  797. if (!if_link.link_up) {
  798. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  799. "Physical link is not up.\n");
  800. return;
  801. }
  802. /* Flush and wait to make sure link down is processed */
  803. flush_delayed_work(&qedf->link_update);
  804. msleep(500);
  805. atomic_set(&qedf->link_state, QEDF_LINK_UP);
  806. qedf->vlan_id = 0;
  807. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  808. "Queue link up work.\n");
  809. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  810. 0);
  811. }
  812. /* Reset the host by gracefully logging out and then logging back in */
  813. static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
  814. {
  815. struct fc_lport *lport;
  816. struct qedf_ctx *qedf;
  817. lport = shost_priv(sc_cmd->device->host);
  818. qedf = lport_priv(lport);
  819. if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
  820. test_bit(QEDF_UNLOADING, &qedf->flags))
  821. return FAILED;
  822. QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
  823. qedf_ctx_soft_reset(lport);
  824. return SUCCESS;
  825. }
  826. static int qedf_slave_configure(struct scsi_device *sdev)
  827. {
  828. if (qedf_queue_depth) {
  829. scsi_change_queue_depth(sdev, qedf_queue_depth);
  830. }
  831. return 0;
  832. }
  833. static struct scsi_host_template qedf_host_template = {
  834. .module = THIS_MODULE,
  835. .name = QEDF_MODULE_NAME,
  836. .this_id = -1,
  837. .cmd_per_lun = 32,
  838. .max_sectors = 0xffff,
  839. .queuecommand = qedf_queuecommand,
  840. .shost_groups = qedf_host_groups,
  841. .eh_abort_handler = qedf_eh_abort,
  842. .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
  843. .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
  844. .eh_host_reset_handler = qedf_eh_host_reset,
  845. .slave_configure = qedf_slave_configure,
  846. .dma_boundary = QED_HW_DMA_BOUNDARY,
  847. .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
  848. .can_queue = FCOE_PARAMS_NUM_TASKS,
  849. .change_queue_depth = scsi_change_queue_depth,
  850. .cmd_size = sizeof(struct qedf_cmd_priv),
  851. };
  852. static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  853. {
  854. int rc;
  855. spin_lock(&qedf_global_lock);
  856. rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
  857. spin_unlock(&qedf_global_lock);
  858. return rc;
  859. }
  860. static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
  861. {
  862. struct qedf_rport *fcport;
  863. struct fc_rport_priv *rdata;
  864. rcu_read_lock();
  865. list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
  866. rdata = fcport->rdata;
  867. if (rdata == NULL)
  868. continue;
  869. if (rdata->ids.port_id == port_id) {
  870. rcu_read_unlock();
  871. return fcport;
  872. }
  873. }
  874. rcu_read_unlock();
  875. /* Return NULL to caller to let them know fcport was not found */
  876. return NULL;
  877. }
  878. /* Transmits an ELS frame over an offloaded session */
  879. static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
  880. {
  881. struct fc_frame_header *fh;
  882. int rc = 0;
  883. fh = fc_frame_header_get(fp);
  884. if ((fh->fh_type == FC_TYPE_ELS) &&
  885. (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  886. switch (fc_frame_payload_op(fp)) {
  887. case ELS_ADISC:
  888. qedf_send_adisc(fcport, fp);
  889. rc = 1;
  890. break;
  891. }
  892. }
  893. return rc;
  894. }
  895. /*
  896. * qedf_xmit - qedf FCoE frame transmit function
  897. */
  898. static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
  899. {
  900. struct fc_lport *base_lport;
  901. struct qedf_ctx *qedf;
  902. struct ethhdr *eh;
  903. struct fcoe_crc_eof *cp;
  904. struct sk_buff *skb;
  905. struct fc_frame_header *fh;
  906. struct fcoe_hdr *hp;
  907. u8 sof, eof;
  908. u32 crc;
  909. unsigned int hlen, tlen, elen;
  910. int wlen;
  911. struct fc_lport *tmp_lport;
  912. struct fc_lport *vn_port = NULL;
  913. struct qedf_rport *fcport;
  914. int rc;
  915. u16 vlan_tci = 0;
  916. qedf = (struct qedf_ctx *)lport_priv(lport);
  917. fh = fc_frame_header_get(fp);
  918. skb = fp_skb(fp);
  919. /* Filter out traffic to other NPIV ports on the same host */
  920. if (lport->vport)
  921. base_lport = shost_priv(vport_to_shost(lport->vport));
  922. else
  923. base_lport = lport;
  924. /* Flag if the destination is the base port */
  925. if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
  926. vn_port = base_lport;
  927. } else {
  928. /* Got through the list of vports attached to the base_lport
  929. * and see if we have a match with the destination address.
  930. */
  931. list_for_each_entry(tmp_lport, &base_lport->vports, list) {
  932. if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
  933. vn_port = tmp_lport;
  934. break;
  935. }
  936. }
  937. }
  938. if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
  939. struct fc_rport_priv *rdata = NULL;
  940. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  941. "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
  942. kfree_skb(skb);
  943. rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
  944. if (rdata) {
  945. rdata->retries = lport->max_rport_retry_count;
  946. kref_put(&rdata->kref, fc_rport_destroy);
  947. }
  948. return -EINVAL;
  949. }
  950. /* End NPIV filtering */
  951. if (!qedf->ctlr.sel_fcf) {
  952. kfree_skb(skb);
  953. return 0;
  954. }
  955. if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
  956. QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
  957. kfree_skb(skb);
  958. return 0;
  959. }
  960. if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
  961. QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
  962. kfree_skb(skb);
  963. return 0;
  964. }
  965. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  966. if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
  967. return 0;
  968. }
  969. /* Check to see if this needs to be sent on an offloaded session */
  970. fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
  971. if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  972. rc = qedf_xmit_l2_frame(fcport, fp);
  973. /*
  974. * If the frame was successfully sent over the middle path
  975. * then do not try to also send it over the LL2 path
  976. */
  977. if (rc)
  978. return 0;
  979. }
  980. sof = fr_sof(fp);
  981. eof = fr_eof(fp);
  982. elen = sizeof(struct ethhdr);
  983. hlen = sizeof(struct fcoe_hdr);
  984. tlen = sizeof(struct fcoe_crc_eof);
  985. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  986. skb->ip_summed = CHECKSUM_NONE;
  987. crc = fcoe_fc_crc(fp);
  988. /* copy port crc and eof to the skb buff */
  989. if (skb_is_nonlinear(skb)) {
  990. skb_frag_t *frag;
  991. if (qedf_get_paged_crc_eof(skb, tlen)) {
  992. kfree_skb(skb);
  993. return -ENOMEM;
  994. }
  995. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  996. cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
  997. } else {
  998. cp = skb_put(skb, tlen);
  999. }
  1000. memset(cp, 0, sizeof(*cp));
  1001. cp->fcoe_eof = eof;
  1002. cp->fcoe_crc32 = cpu_to_le32(~crc);
  1003. if (skb_is_nonlinear(skb)) {
  1004. kunmap_atomic(cp);
  1005. cp = NULL;
  1006. }
  1007. /* adjust skb network/transport offsets to match mac/fcoe/port */
  1008. skb_push(skb, elen + hlen);
  1009. skb_reset_mac_header(skb);
  1010. skb_reset_network_header(skb);
  1011. skb->mac_len = elen;
  1012. skb->protocol = htons(ETH_P_FCOE);
  1013. /*
  1014. * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
  1015. * for FIP/FCoE traffic.
  1016. */
  1017. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
  1018. /* fill up mac and fcoe headers */
  1019. eh = eth_hdr(skb);
  1020. eh->h_proto = htons(ETH_P_FCOE);
  1021. if (qedf->ctlr.map_dest)
  1022. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  1023. else
  1024. /* insert GW address */
  1025. ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
  1026. /* Set the source MAC address */
  1027. ether_addr_copy(eh->h_source, qedf->data_src_addr);
  1028. hp = (struct fcoe_hdr *)(eh + 1);
  1029. memset(hp, 0, sizeof(*hp));
  1030. if (FC_FCOE_VER)
  1031. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  1032. hp->fcoe_sof = sof;
  1033. /*update tx stats */
  1034. this_cpu_inc(lport->stats->TxFrames);
  1035. this_cpu_add(lport->stats->TxWords, wlen);
  1036. /* Get VLAN ID from skb for printing purposes */
  1037. __vlan_hwaccel_get_tag(skb, &vlan_tci);
  1038. /* send down to lld */
  1039. fr_dev(fp) = lport;
  1040. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
  1041. "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
  1042. ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
  1043. vlan_tci);
  1044. if (qedf_dump_frames)
  1045. print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
  1046. 1, skb->data, skb->len, false);
  1047. rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
  1048. if (rc) {
  1049. QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
  1050. kfree_skb(skb);
  1051. return rc;
  1052. }
  1053. return 0;
  1054. }
  1055. static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
  1056. {
  1057. int rval = 0;
  1058. u32 *pbl;
  1059. dma_addr_t page;
  1060. int num_pages;
  1061. /* Calculate appropriate queue and PBL sizes */
  1062. fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
  1063. fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
  1064. fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
  1065. sizeof(void *);
  1066. fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
  1067. fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
  1068. &fcport->sq_dma, GFP_KERNEL);
  1069. if (!fcport->sq) {
  1070. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
  1071. rval = 1;
  1072. goto out;
  1073. }
  1074. fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
  1075. fcport->sq_pbl_size,
  1076. &fcport->sq_pbl_dma, GFP_KERNEL);
  1077. if (!fcport->sq_pbl) {
  1078. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
  1079. rval = 1;
  1080. goto out_free_sq;
  1081. }
  1082. /* Create PBL */
  1083. num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
  1084. page = fcport->sq_dma;
  1085. pbl = (u32 *)fcport->sq_pbl;
  1086. while (num_pages--) {
  1087. *pbl = U64_LO(page);
  1088. pbl++;
  1089. *pbl = U64_HI(page);
  1090. pbl++;
  1091. page += QEDF_PAGE_SIZE;
  1092. }
  1093. return rval;
  1094. out_free_sq:
  1095. dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
  1096. fcport->sq_dma);
  1097. out:
  1098. return rval;
  1099. }
  1100. static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
  1101. {
  1102. if (fcport->sq_pbl)
  1103. dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
  1104. fcport->sq_pbl, fcport->sq_pbl_dma);
  1105. if (fcport->sq)
  1106. dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
  1107. fcport->sq, fcport->sq_dma);
  1108. }
  1109. static int qedf_offload_connection(struct qedf_ctx *qedf,
  1110. struct qedf_rport *fcport)
  1111. {
  1112. struct qed_fcoe_params_offload conn_info;
  1113. u32 port_id;
  1114. int rval;
  1115. uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
  1116. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
  1117. "portid=%06x.\n", fcport->rdata->ids.port_id);
  1118. rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
  1119. &fcport->fw_cid, &fcport->p_doorbell);
  1120. if (rval) {
  1121. QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
  1122. "for portid=%06x.\n", fcport->rdata->ids.port_id);
  1123. rval = 1; /* For some reason qed returns 0 on failure here */
  1124. goto out;
  1125. }
  1126. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
  1127. "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
  1128. fcport->fw_cid, fcport->handle);
  1129. memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
  1130. /* Fill in the offload connection info */
  1131. conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
  1132. conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
  1133. conn_info.sq_next_page_addr =
  1134. (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
  1135. /* Need to use our FCoE MAC for the offload session */
  1136. ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
  1137. ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
  1138. conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
  1139. conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
  1140. conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
  1141. conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
  1142. /* Set VLAN data */
  1143. conn_info.vlan_tag = qedf->vlan_id <<
  1144. FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
  1145. conn_info.vlan_tag |=
  1146. qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
  1147. conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
  1148. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
  1149. /* Set host port source id */
  1150. port_id = fc_host_port_id(qedf->lport->host);
  1151. fcport->sid = port_id;
  1152. conn_info.s_id.addr_hi = (port_id & 0x000000FF);
  1153. conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
  1154. conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
  1155. conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
  1156. /* Set remote port destination id */
  1157. port_id = fcport->rdata->rport->port_id;
  1158. conn_info.d_id.addr_hi = (port_id & 0x000000FF);
  1159. conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
  1160. conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
  1161. conn_info.def_q_idx = 0; /* Default index for send queue? */
  1162. /* Set FC-TAPE specific flags if needed */
  1163. if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
  1164. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
  1165. "Enable CONF, REC for portid=%06x.\n",
  1166. fcport->rdata->ids.port_id);
  1167. conn_info.flags |= 1 <<
  1168. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
  1169. conn_info.flags |=
  1170. ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
  1171. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
  1172. }
  1173. rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
  1174. if (rval) {
  1175. QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
  1176. "for portid=%06x.\n", fcport->rdata->ids.port_id);
  1177. goto out_free_conn;
  1178. } else
  1179. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
  1180. "succeeded portid=%06x total_sqe=%d.\n",
  1181. fcport->rdata->ids.port_id, total_sqe);
  1182. spin_lock_init(&fcport->rport_lock);
  1183. atomic_set(&fcport->free_sqes, total_sqe);
  1184. return 0;
  1185. out_free_conn:
  1186. qed_ops->release_conn(qedf->cdev, fcport->handle);
  1187. out:
  1188. return rval;
  1189. }
  1190. #define QEDF_TERM_BUFF_SIZE 10
  1191. static void qedf_upload_connection(struct qedf_ctx *qedf,
  1192. struct qedf_rport *fcport)
  1193. {
  1194. void *term_params;
  1195. dma_addr_t term_params_dma;
  1196. /* Term params needs to be a DMA coherent buffer as qed shared the
  1197. * physical DMA address with the firmware. The buffer may be used in
  1198. * the receive path so we may eventually have to move this.
  1199. */
  1200. term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
  1201. &term_params_dma, GFP_KERNEL);
  1202. if (!term_params)
  1203. return;
  1204. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
  1205. "port_id=%06x.\n", fcport->rdata->ids.port_id);
  1206. qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
  1207. qed_ops->release_conn(qedf->cdev, fcport->handle);
  1208. dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
  1209. term_params_dma);
  1210. }
  1211. static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
  1212. struct qedf_rport *fcport)
  1213. {
  1214. struct fc_rport_priv *rdata = fcport->rdata;
  1215. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
  1216. fcport->rdata->ids.port_id);
  1217. /* Flush any remaining i/o's before we upload the connection */
  1218. qedf_flush_active_ios(fcport, -1);
  1219. if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
  1220. qedf_upload_connection(qedf, fcport);
  1221. qedf_free_sq(qedf, fcport);
  1222. fcport->rdata = NULL;
  1223. fcport->qedf = NULL;
  1224. kref_put(&rdata->kref, fc_rport_destroy);
  1225. }
  1226. /*
  1227. * This event_callback is called after successful completion of libfc
  1228. * initiated target login. qedf can proceed with initiating the session
  1229. * establishment.
  1230. */
  1231. static void qedf_rport_event_handler(struct fc_lport *lport,
  1232. struct fc_rport_priv *rdata,
  1233. enum fc_rport_event event)
  1234. {
  1235. struct qedf_ctx *qedf = lport_priv(lport);
  1236. struct fc_rport *rport = rdata->rport;
  1237. struct fc_rport_libfc_priv *rp;
  1238. struct qedf_rport *fcport;
  1239. u32 port_id;
  1240. int rval;
  1241. unsigned long flags;
  1242. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
  1243. "port_id = 0x%x\n", event, rdata->ids.port_id);
  1244. switch (event) {
  1245. case RPORT_EV_READY:
  1246. if (!rport) {
  1247. QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
  1248. break;
  1249. }
  1250. rp = rport->dd_data;
  1251. fcport = (struct qedf_rport *)&rp[1];
  1252. fcport->qedf = qedf;
  1253. if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
  1254. QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
  1255. "portid=0x%x as max number of offloaded sessions "
  1256. "reached.\n", rdata->ids.port_id);
  1257. return;
  1258. }
  1259. /*
  1260. * Don't try to offload the session again. Can happen when we
  1261. * get an ADISC
  1262. */
  1263. if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  1264. QEDF_WARN(&(qedf->dbg_ctx), "Session already "
  1265. "offloaded, portid=0x%x.\n",
  1266. rdata->ids.port_id);
  1267. return;
  1268. }
  1269. if (rport->port_id == FC_FID_DIR_SERV) {
  1270. /*
  1271. * qedf_rport structure doesn't exist for
  1272. * directory server.
  1273. * We should not come here, as lport will
  1274. * take care of fabric login
  1275. */
  1276. QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
  1277. "exist for dir server port_id=%x\n",
  1278. rdata->ids.port_id);
  1279. break;
  1280. }
  1281. if (rdata->spp_type != FC_TYPE_FCP) {
  1282. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1283. "Not offloading since spp type isn't FCP\n");
  1284. break;
  1285. }
  1286. if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
  1287. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1288. "Not FCP target so not offloading\n");
  1289. break;
  1290. }
  1291. /* Initial reference held on entry, so this can't fail */
  1292. kref_get(&rdata->kref);
  1293. fcport->rdata = rdata;
  1294. fcport->rport = rport;
  1295. rval = qedf_alloc_sq(qedf, fcport);
  1296. if (rval) {
  1297. qedf_cleanup_fcport(qedf, fcport);
  1298. break;
  1299. }
  1300. /* Set device type */
  1301. if (rdata->flags & FC_RP_FLAGS_RETRY &&
  1302. rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
  1303. !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
  1304. fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
  1305. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1306. "portid=%06x is a TAPE device.\n",
  1307. rdata->ids.port_id);
  1308. } else {
  1309. fcport->dev_type = QEDF_RPORT_TYPE_DISK;
  1310. }
  1311. rval = qedf_offload_connection(qedf, fcport);
  1312. if (rval) {
  1313. qedf_cleanup_fcport(qedf, fcport);
  1314. break;
  1315. }
  1316. /* Add fcport to list of qedf_ctx list of offloaded ports */
  1317. spin_lock_irqsave(&qedf->hba_lock, flags);
  1318. list_add_rcu(&fcport->peers, &qedf->fcports);
  1319. spin_unlock_irqrestore(&qedf->hba_lock, flags);
  1320. /*
  1321. * Set the session ready bit to let everyone know that this
  1322. * connection is ready for I/O
  1323. */
  1324. set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
  1325. atomic_inc(&qedf->num_offloads);
  1326. break;
  1327. case RPORT_EV_LOGO:
  1328. case RPORT_EV_FAILED:
  1329. case RPORT_EV_STOP:
  1330. port_id = rdata->ids.port_id;
  1331. if (port_id == FC_FID_DIR_SERV)
  1332. break;
  1333. if (rdata->spp_type != FC_TYPE_FCP) {
  1334. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1335. "No action since spp type isn't FCP\n");
  1336. break;
  1337. }
  1338. if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
  1339. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1340. "Not FCP target so no action\n");
  1341. break;
  1342. }
  1343. if (!rport) {
  1344. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1345. "port_id=%x - rport notcreated Yet!!\n", port_id);
  1346. break;
  1347. }
  1348. rp = rport->dd_data;
  1349. /*
  1350. * Perform session upload. Note that rdata->peers is already
  1351. * removed from disc->rports list before we get this event.
  1352. */
  1353. fcport = (struct qedf_rport *)&rp[1];
  1354. spin_lock_irqsave(&fcport->rport_lock, flags);
  1355. /* Only free this fcport if it is offloaded already */
  1356. if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
  1357. !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  1358. &fcport->flags)) {
  1359. set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  1360. &fcport->flags);
  1361. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  1362. qedf_cleanup_fcport(qedf, fcport);
  1363. /*
  1364. * Remove fcport to list of qedf_ctx list of offloaded
  1365. * ports
  1366. */
  1367. spin_lock_irqsave(&qedf->hba_lock, flags);
  1368. list_del_rcu(&fcport->peers);
  1369. spin_unlock_irqrestore(&qedf->hba_lock, flags);
  1370. clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  1371. &fcport->flags);
  1372. atomic_dec(&qedf->num_offloads);
  1373. } else {
  1374. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  1375. }
  1376. break;
  1377. case RPORT_EV_NONE:
  1378. break;
  1379. }
  1380. }
  1381. static void qedf_abort_io(struct fc_lport *lport)
  1382. {
  1383. /* NO-OP but need to fill in the template */
  1384. }
  1385. static void qedf_fcp_cleanup(struct fc_lport *lport)
  1386. {
  1387. /*
  1388. * NO-OP but need to fill in template to prevent a NULL
  1389. * function pointer dereference during link down. I/Os
  1390. * will be flushed when port is uploaded.
  1391. */
  1392. }
  1393. static struct libfc_function_template qedf_lport_template = {
  1394. .frame_send = qedf_xmit,
  1395. .fcp_abort_io = qedf_abort_io,
  1396. .fcp_cleanup = qedf_fcp_cleanup,
  1397. .rport_event_callback = qedf_rport_event_handler,
  1398. .elsct_send = qedf_elsct_send,
  1399. };
  1400. static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
  1401. {
  1402. fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
  1403. qedf->ctlr.send = qedf_fip_send;
  1404. qedf->ctlr.get_src_addr = qedf_get_src_mac;
  1405. ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
  1406. }
  1407. static void qedf_setup_fdmi(struct qedf_ctx *qedf)
  1408. {
  1409. struct fc_lport *lport = qedf->lport;
  1410. u8 buf[8];
  1411. int pos;
  1412. uint32_t i;
  1413. /*
  1414. * fdmi_enabled needs to be set for libfc
  1415. * to execute FDMI registration
  1416. */
  1417. lport->fdmi_enabled = 1;
  1418. /*
  1419. * Setup the necessary fc_host attributes to that will be used to fill
  1420. * in the FDMI information.
  1421. */
  1422. /* Get the PCI-e Device Serial Number Capability */
  1423. pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
  1424. if (pos) {
  1425. pos += 4;
  1426. for (i = 0; i < 8; i++)
  1427. pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
  1428. snprintf(fc_host_serial_number(lport->host),
  1429. FC_SERIAL_NUMBER_SIZE,
  1430. "%02X%02X%02X%02X%02X%02X%02X%02X",
  1431. buf[7], buf[6], buf[5], buf[4],
  1432. buf[3], buf[2], buf[1], buf[0]);
  1433. } else
  1434. snprintf(fc_host_serial_number(lport->host),
  1435. FC_SERIAL_NUMBER_SIZE, "Unknown");
  1436. snprintf(fc_host_manufacturer(lport->host),
  1437. FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
  1438. if (qedf->pdev->device == QL45xxx) {
  1439. snprintf(fc_host_model(lport->host),
  1440. FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
  1441. snprintf(fc_host_model_description(lport->host),
  1442. FC_SYMBOLIC_NAME_SIZE, "%s",
  1443. "Marvell FastLinQ QL45xxx FCoE Adapter");
  1444. }
  1445. if (qedf->pdev->device == QL41xxx) {
  1446. snprintf(fc_host_model(lport->host),
  1447. FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
  1448. snprintf(fc_host_model_description(lport->host),
  1449. FC_SYMBOLIC_NAME_SIZE, "%s",
  1450. "Marvell FastLinQ QL41xxx FCoE Adapter");
  1451. }
  1452. snprintf(fc_host_hardware_version(lport->host),
  1453. FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
  1454. snprintf(fc_host_driver_version(lport->host),
  1455. FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
  1456. snprintf(fc_host_firmware_version(lport->host),
  1457. FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
  1458. FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
  1459. FW_ENGINEERING_VERSION);
  1460. snprintf(fc_host_vendor_identifier(lport->host),
  1461. FC_VENDOR_IDENTIFIER, "%s", "Marvell");
  1462. }
  1463. static int qedf_lport_setup(struct qedf_ctx *qedf)
  1464. {
  1465. struct fc_lport *lport = qedf->lport;
  1466. lport->link_up = 0;
  1467. lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
  1468. lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
  1469. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  1470. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  1471. lport->boot_time = jiffies;
  1472. lport->e_d_tov = 2 * 1000;
  1473. lport->r_a_tov = 10 * 1000;
  1474. /* Set NPIV support */
  1475. lport->does_npiv = 1;
  1476. fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
  1477. fc_set_wwnn(lport, qedf->wwnn);
  1478. fc_set_wwpn(lport, qedf->wwpn);
  1479. if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
  1480. QEDF_ERR(&qedf->dbg_ctx,
  1481. "fcoe_libfc_config failed.\n");
  1482. return -ENOMEM;
  1483. }
  1484. /* Allocate the exchange manager */
  1485. fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
  1486. 0xfffe, NULL);
  1487. if (fc_lport_init_stats(lport))
  1488. return -ENOMEM;
  1489. /* Finish lport config */
  1490. fc_lport_config(lport);
  1491. /* Set max frame size */
  1492. fc_set_mfs(lport, QEDF_MFS);
  1493. fc_host_maxframe_size(lport->host) = lport->mfs;
  1494. /* Set default dev_loss_tmo based on module parameter */
  1495. fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
  1496. /* Set symbolic node name */
  1497. if (qedf->pdev->device == QL45xxx)
  1498. snprintf(fc_host_symbolic_name(lport->host), 256,
  1499. "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
  1500. if (qedf->pdev->device == QL41xxx)
  1501. snprintf(fc_host_symbolic_name(lport->host), 256,
  1502. "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
  1503. qedf_setup_fdmi(qedf);
  1504. return 0;
  1505. }
  1506. /*
  1507. * NPIV functions
  1508. */
  1509. static int qedf_vport_libfc_config(struct fc_vport *vport,
  1510. struct fc_lport *lport)
  1511. {
  1512. lport->link_up = 0;
  1513. lport->qfull = 0;
  1514. lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
  1515. lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
  1516. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  1517. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  1518. lport->boot_time = jiffies;
  1519. lport->e_d_tov = 2 * 1000;
  1520. lport->r_a_tov = 10 * 1000;
  1521. lport->does_npiv = 1; /* Temporary until we add NPIV support */
  1522. /* Allocate stats for vport */
  1523. if (fc_lport_init_stats(lport))
  1524. return -ENOMEM;
  1525. /* Finish lport config */
  1526. fc_lport_config(lport);
  1527. /* offload related configuration */
  1528. lport->crc_offload = 0;
  1529. lport->seq_offload = 0;
  1530. lport->lro_enabled = 0;
  1531. lport->lro_xid = 0;
  1532. lport->lso_max = 0;
  1533. return 0;
  1534. }
  1535. static int qedf_vport_create(struct fc_vport *vport, bool disabled)
  1536. {
  1537. struct Scsi_Host *shost = vport_to_shost(vport);
  1538. struct fc_lport *n_port = shost_priv(shost);
  1539. struct fc_lport *vn_port;
  1540. struct qedf_ctx *base_qedf = lport_priv(n_port);
  1541. struct qedf_ctx *vport_qedf;
  1542. char buf[32];
  1543. int rc = 0;
  1544. rc = fcoe_validate_vport_create(vport);
  1545. if (rc) {
  1546. fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
  1547. QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
  1548. "WWPN (0x%s) already exists.\n", buf);
  1549. return rc;
  1550. }
  1551. if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
  1552. QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
  1553. "because link is not up.\n");
  1554. return -EIO;
  1555. }
  1556. vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
  1557. if (!vn_port) {
  1558. QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
  1559. "for vport.\n");
  1560. return -ENOMEM;
  1561. }
  1562. fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
  1563. QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
  1564. buf);
  1565. /* Copy some fields from base_qedf */
  1566. vport_qedf = lport_priv(vn_port);
  1567. memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
  1568. /* Set qedf data specific to this vport */
  1569. vport_qedf->lport = vn_port;
  1570. /* Use same hba_lock as base_qedf */
  1571. vport_qedf->hba_lock = base_qedf->hba_lock;
  1572. vport_qedf->pdev = base_qedf->pdev;
  1573. vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
  1574. init_completion(&vport_qedf->flogi_compl);
  1575. INIT_LIST_HEAD(&vport_qedf->fcports);
  1576. INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
  1577. rc = qedf_vport_libfc_config(vport, vn_port);
  1578. if (rc) {
  1579. QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
  1580. "for lport stats.\n");
  1581. goto err;
  1582. }
  1583. fc_set_wwnn(vn_port, vport->node_name);
  1584. fc_set_wwpn(vn_port, vport->port_name);
  1585. vport_qedf->wwnn = vn_port->wwnn;
  1586. vport_qedf->wwpn = vn_port->wwpn;
  1587. vn_port->host->transportt = qedf_fc_vport_transport_template;
  1588. vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
  1589. vn_port->host->max_lun = qedf_max_lun;
  1590. vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
  1591. vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
  1592. vn_port->host->max_id = QEDF_MAX_SESSIONS;
  1593. rc = scsi_add_host(vn_port->host, &vport->dev);
  1594. if (rc) {
  1595. QEDF_WARN(&base_qedf->dbg_ctx,
  1596. "Error adding Scsi_Host rc=0x%x.\n", rc);
  1597. goto err;
  1598. }
  1599. /* Set default dev_loss_tmo based on module parameter */
  1600. fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
  1601. /* Init libfc stuffs */
  1602. memcpy(&vn_port->tt, &qedf_lport_template,
  1603. sizeof(qedf_lport_template));
  1604. fc_exch_init(vn_port);
  1605. fc_elsct_init(vn_port);
  1606. fc_lport_init(vn_port);
  1607. fc_disc_init(vn_port);
  1608. fc_disc_config(vn_port, vn_port);
  1609. /* Allocate the exchange manager */
  1610. shost = vport_to_shost(vport);
  1611. n_port = shost_priv(shost);
  1612. fc_exch_mgr_list_clone(n_port, vn_port);
  1613. /* Set max frame size */
  1614. fc_set_mfs(vn_port, QEDF_MFS);
  1615. fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
  1616. if (disabled) {
  1617. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  1618. } else {
  1619. vn_port->boot_time = jiffies;
  1620. fc_fabric_login(vn_port);
  1621. fc_vport_setlink(vn_port);
  1622. }
  1623. /* Set symbolic node name */
  1624. if (base_qedf->pdev->device == QL45xxx)
  1625. snprintf(fc_host_symbolic_name(vn_port->host), 256,
  1626. "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
  1627. if (base_qedf->pdev->device == QL41xxx)
  1628. snprintf(fc_host_symbolic_name(vn_port->host), 256,
  1629. "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
  1630. /* Set supported speed */
  1631. fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
  1632. /* Set speed */
  1633. vn_port->link_speed = n_port->link_speed;
  1634. /* Set port type */
  1635. fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
  1636. /* Set maxframe size */
  1637. fc_host_maxframe_size(vn_port->host) = n_port->mfs;
  1638. QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
  1639. vn_port);
  1640. /* Set up debug context for vport */
  1641. vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
  1642. vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
  1643. return 0;
  1644. err:
  1645. scsi_host_put(vn_port->host);
  1646. return rc;
  1647. }
  1648. static int qedf_vport_destroy(struct fc_vport *vport)
  1649. {
  1650. struct Scsi_Host *shost = vport_to_shost(vport);
  1651. struct fc_lport *n_port = shost_priv(shost);
  1652. struct fc_lport *vn_port = vport->dd_data;
  1653. struct qedf_ctx *qedf = lport_priv(vn_port);
  1654. if (!qedf) {
  1655. QEDF_ERR(NULL, "qedf is NULL.\n");
  1656. goto out;
  1657. }
  1658. /* Set unloading bit on vport qedf_ctx to prevent more I/O */
  1659. set_bit(QEDF_UNLOADING, &qedf->flags);
  1660. mutex_lock(&n_port->lp_mutex);
  1661. list_del(&vn_port->list);
  1662. mutex_unlock(&n_port->lp_mutex);
  1663. fc_fabric_logoff(vn_port);
  1664. fc_lport_destroy(vn_port);
  1665. /* Detach from scsi-ml */
  1666. fc_remove_host(vn_port->host);
  1667. scsi_remove_host(vn_port->host);
  1668. /*
  1669. * Only try to release the exchange manager if the vn_port
  1670. * configuration is complete.
  1671. */
  1672. if (vn_port->state == LPORT_ST_READY)
  1673. fc_exch_mgr_free(vn_port);
  1674. /* Free memory used by statistical counters */
  1675. fc_lport_free_stats(vn_port);
  1676. /* Release Scsi_Host */
  1677. scsi_host_put(vn_port->host);
  1678. out:
  1679. return 0;
  1680. }
  1681. static int qedf_vport_disable(struct fc_vport *vport, bool disable)
  1682. {
  1683. struct fc_lport *lport = vport->dd_data;
  1684. if (disable) {
  1685. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  1686. fc_fabric_logoff(lport);
  1687. } else {
  1688. lport->boot_time = jiffies;
  1689. fc_fabric_login(lport);
  1690. fc_vport_setlink(lport);
  1691. }
  1692. return 0;
  1693. }
  1694. /*
  1695. * During removal we need to wait for all the vports associated with a port
  1696. * to be destroyed so we avoid a race condition where libfc is still trying
  1697. * to reap vports while the driver remove function has already reaped the
  1698. * driver contexts associated with the physical port.
  1699. */
  1700. static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
  1701. {
  1702. struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
  1703. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
  1704. "Entered.\n");
  1705. while (fc_host->npiv_vports_inuse > 0) {
  1706. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
  1707. "Waiting for all vports to be reaped.\n");
  1708. msleep(1000);
  1709. }
  1710. }
  1711. /**
  1712. * qedf_fcoe_reset - Resets the fcoe
  1713. *
  1714. * @shost: shost the reset is from
  1715. *
  1716. * Returns: always 0
  1717. */
  1718. static int qedf_fcoe_reset(struct Scsi_Host *shost)
  1719. {
  1720. struct fc_lport *lport = shost_priv(shost);
  1721. qedf_ctx_soft_reset(lport);
  1722. return 0;
  1723. }
  1724. static void qedf_get_host_port_id(struct Scsi_Host *shost)
  1725. {
  1726. struct fc_lport *lport = shost_priv(shost);
  1727. fc_host_port_id(shost) = lport->port_id;
  1728. }
  1729. static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
  1730. *shost)
  1731. {
  1732. struct fc_host_statistics *qedf_stats;
  1733. struct fc_lport *lport = shost_priv(shost);
  1734. struct qedf_ctx *qedf = lport_priv(lport);
  1735. struct qed_fcoe_stats *fw_fcoe_stats;
  1736. qedf_stats = fc_get_host_stats(shost);
  1737. /* We don't collect offload stats for specific NPIV ports */
  1738. if (lport->vport)
  1739. goto out;
  1740. fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
  1741. if (!fw_fcoe_stats) {
  1742. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
  1743. "fw_fcoe_stats.\n");
  1744. goto out;
  1745. }
  1746. mutex_lock(&qedf->stats_mutex);
  1747. /* Query firmware for offload stats */
  1748. qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
  1749. /*
  1750. * The expectation is that we add our offload stats to the stats
  1751. * being maintained by libfc each time the fc_get_host_status callback
  1752. * is invoked. The additions are not carried over for each call to
  1753. * the fc_get_host_stats callback.
  1754. */
  1755. qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
  1756. fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
  1757. fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
  1758. qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
  1759. fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
  1760. fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
  1761. qedf_stats->fcp_input_megabytes +=
  1762. do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
  1763. qedf_stats->fcp_output_megabytes +=
  1764. do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
  1765. qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
  1766. qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
  1767. qedf_stats->invalid_crc_count +=
  1768. fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
  1769. qedf_stats->dumped_frames =
  1770. fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
  1771. qedf_stats->error_frames +=
  1772. fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
  1773. qedf_stats->fcp_input_requests += qedf->input_requests;
  1774. qedf_stats->fcp_output_requests += qedf->output_requests;
  1775. qedf_stats->fcp_control_requests += qedf->control_requests;
  1776. qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
  1777. qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
  1778. mutex_unlock(&qedf->stats_mutex);
  1779. kfree(fw_fcoe_stats);
  1780. out:
  1781. return qedf_stats;
  1782. }
  1783. static struct fc_function_template qedf_fc_transport_fn = {
  1784. .show_host_node_name = 1,
  1785. .show_host_port_name = 1,
  1786. .show_host_supported_classes = 1,
  1787. .show_host_supported_fc4s = 1,
  1788. .show_host_active_fc4s = 1,
  1789. .show_host_maxframe_size = 1,
  1790. .get_host_port_id = qedf_get_host_port_id,
  1791. .show_host_port_id = 1,
  1792. .show_host_supported_speeds = 1,
  1793. .get_host_speed = fc_get_host_speed,
  1794. .show_host_speed = 1,
  1795. .show_host_port_type = 1,
  1796. .get_host_port_state = fc_get_host_port_state,
  1797. .show_host_port_state = 1,
  1798. .show_host_symbolic_name = 1,
  1799. /*
  1800. * Tell FC transport to allocate enough space to store the backpointer
  1801. * for the associate qedf_rport struct.
  1802. */
  1803. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  1804. sizeof(struct qedf_rport)),
  1805. .show_rport_maxframe_size = 1,
  1806. .show_rport_supported_classes = 1,
  1807. .show_host_fabric_name = 1,
  1808. .show_starget_node_name = 1,
  1809. .show_starget_port_name = 1,
  1810. .show_starget_port_id = 1,
  1811. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  1812. .show_rport_dev_loss_tmo = 1,
  1813. .get_fc_host_stats = qedf_fc_get_host_stats,
  1814. .issue_fc_host_lip = qedf_fcoe_reset,
  1815. .vport_create = qedf_vport_create,
  1816. .vport_delete = qedf_vport_destroy,
  1817. .vport_disable = qedf_vport_disable,
  1818. .bsg_request = fc_lport_bsg_request,
  1819. };
  1820. static struct fc_function_template qedf_fc_vport_transport_fn = {
  1821. .show_host_node_name = 1,
  1822. .show_host_port_name = 1,
  1823. .show_host_supported_classes = 1,
  1824. .show_host_supported_fc4s = 1,
  1825. .show_host_active_fc4s = 1,
  1826. .show_host_maxframe_size = 1,
  1827. .show_host_port_id = 1,
  1828. .show_host_supported_speeds = 1,
  1829. .get_host_speed = fc_get_host_speed,
  1830. .show_host_speed = 1,
  1831. .show_host_port_type = 1,
  1832. .get_host_port_state = fc_get_host_port_state,
  1833. .show_host_port_state = 1,
  1834. .show_host_symbolic_name = 1,
  1835. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  1836. sizeof(struct qedf_rport)),
  1837. .show_rport_maxframe_size = 1,
  1838. .show_rport_supported_classes = 1,
  1839. .show_host_fabric_name = 1,
  1840. .show_starget_node_name = 1,
  1841. .show_starget_port_name = 1,
  1842. .show_starget_port_id = 1,
  1843. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  1844. .show_rport_dev_loss_tmo = 1,
  1845. .get_fc_host_stats = fc_get_host_stats,
  1846. .issue_fc_host_lip = qedf_fcoe_reset,
  1847. .bsg_request = fc_lport_bsg_request,
  1848. };
  1849. static bool qedf_fp_has_work(struct qedf_fastpath *fp)
  1850. {
  1851. struct qedf_ctx *qedf = fp->qedf;
  1852. struct global_queue *que;
  1853. struct qed_sb_info *sb_info = fp->sb_info;
  1854. struct status_block *sb = sb_info->sb_virt;
  1855. u16 prod_idx;
  1856. /* Get the pointer to the global CQ this completion is on */
  1857. que = qedf->global_queues[fp->sb_id];
  1858. /* Be sure all responses have been written to PI */
  1859. rmb();
  1860. /* Get the current firmware producer index */
  1861. prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
  1862. return (que->cq_prod_idx != prod_idx);
  1863. }
  1864. /*
  1865. * Interrupt handler code.
  1866. */
  1867. /* Process completion queue and copy CQE contents for deferred processesing
  1868. *
  1869. * Return true if we should wake the I/O thread, false if not.
  1870. */
  1871. static bool qedf_process_completions(struct qedf_fastpath *fp)
  1872. {
  1873. struct qedf_ctx *qedf = fp->qedf;
  1874. struct qed_sb_info *sb_info = fp->sb_info;
  1875. struct status_block *sb = sb_info->sb_virt;
  1876. struct global_queue *que;
  1877. u16 prod_idx;
  1878. struct fcoe_cqe *cqe;
  1879. struct qedf_io_work *io_work;
  1880. int num_handled = 0;
  1881. unsigned int cpu;
  1882. struct qedf_ioreq *io_req = NULL;
  1883. u16 xid;
  1884. u16 new_cqes;
  1885. u32 comp_type;
  1886. /* Get the current firmware producer index */
  1887. prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
  1888. /* Get the pointer to the global CQ this completion is on */
  1889. que = qedf->global_queues[fp->sb_id];
  1890. /* Calculate the amount of new elements since last processing */
  1891. new_cqes = (prod_idx >= que->cq_prod_idx) ?
  1892. (prod_idx - que->cq_prod_idx) :
  1893. 0x10000 - que->cq_prod_idx + prod_idx;
  1894. /* Save producer index */
  1895. que->cq_prod_idx = prod_idx;
  1896. while (new_cqes) {
  1897. fp->completions++;
  1898. num_handled++;
  1899. cqe = &que->cq[que->cq_cons_idx];
  1900. comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
  1901. FCOE_CQE_CQE_TYPE_MASK;
  1902. /*
  1903. * Process unsolicited CQEs directly in the interrupt handler
  1904. * sine we need the fastpath ID
  1905. */
  1906. if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
  1907. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
  1908. "Unsolicated CQE.\n");
  1909. qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
  1910. /*
  1911. * Don't add a work list item. Increment consumer
  1912. * consumer index and move on.
  1913. */
  1914. goto inc_idx;
  1915. }
  1916. xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
  1917. io_req = &qedf->cmd_mgr->cmds[xid];
  1918. /*
  1919. * Figure out which percpu thread we should queue this I/O
  1920. * on.
  1921. */
  1922. if (!io_req)
  1923. /* If there is not io_req assocated with this CQE
  1924. * just queue it on CPU 0
  1925. */
  1926. cpu = 0;
  1927. else {
  1928. cpu = io_req->cpu;
  1929. io_req->int_cpu = smp_processor_id();
  1930. }
  1931. io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
  1932. if (!io_work) {
  1933. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
  1934. "work for I/O completion.\n");
  1935. continue;
  1936. }
  1937. memset(io_work, 0, sizeof(struct qedf_io_work));
  1938. INIT_WORK(&io_work->work, qedf_fp_io_handler);
  1939. /* Copy contents of CQE for deferred processing */
  1940. memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
  1941. io_work->qedf = fp->qedf;
  1942. io_work->fp = NULL; /* Only used for unsolicited frames */
  1943. queue_work_on(cpu, qedf_io_wq, &io_work->work);
  1944. inc_idx:
  1945. que->cq_cons_idx++;
  1946. if (que->cq_cons_idx == fp->cq_num_entries)
  1947. que->cq_cons_idx = 0;
  1948. new_cqes--;
  1949. }
  1950. return true;
  1951. }
  1952. /* MSI-X fastpath handler code */
  1953. static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
  1954. {
  1955. struct qedf_fastpath *fp = dev_id;
  1956. if (!fp) {
  1957. QEDF_ERR(NULL, "fp is null.\n");
  1958. return IRQ_HANDLED;
  1959. }
  1960. if (!fp->sb_info) {
  1961. QEDF_ERR(NULL, "fp->sb_info in null.");
  1962. return IRQ_HANDLED;
  1963. }
  1964. /*
  1965. * Disable interrupts for this status block while we process new
  1966. * completions
  1967. */
  1968. qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
  1969. while (1) {
  1970. qedf_process_completions(fp);
  1971. if (qedf_fp_has_work(fp) == 0) {
  1972. /* Update the sb information */
  1973. qed_sb_update_sb_idx(fp->sb_info);
  1974. /* Check for more work */
  1975. rmb();
  1976. if (qedf_fp_has_work(fp) == 0) {
  1977. /* Re-enable interrupts */
  1978. qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
  1979. return IRQ_HANDLED;
  1980. }
  1981. }
  1982. }
  1983. /* Do we ever want to break out of above loop? */
  1984. return IRQ_HANDLED;
  1985. }
  1986. /* simd handler for MSI/INTa */
  1987. static void qedf_simd_int_handler(void *cookie)
  1988. {
  1989. /* Cookie is qedf_ctx struct */
  1990. struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
  1991. QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
  1992. }
  1993. #define QEDF_SIMD_HANDLER_NUM 0
  1994. static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
  1995. {
  1996. int i;
  1997. u16 vector_idx = 0;
  1998. u32 vector;
  1999. if (qedf->int_info.msix_cnt) {
  2000. for (i = 0; i < qedf->int_info.used_cnt; i++) {
  2001. vector_idx = i * qedf->dev_info.common.num_hwfns +
  2002. qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
  2003. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  2004. "Freeing IRQ #%d vector_idx=%d.\n",
  2005. i, vector_idx);
  2006. vector = qedf->int_info.msix[vector_idx].vector;
  2007. synchronize_irq(vector);
  2008. irq_set_affinity_hint(vector, NULL);
  2009. irq_set_affinity_notifier(vector, NULL);
  2010. free_irq(vector, &qedf->fp_array[i]);
  2011. }
  2012. } else
  2013. qed_ops->common->simd_handler_clean(qedf->cdev,
  2014. QEDF_SIMD_HANDLER_NUM);
  2015. qedf->int_info.used_cnt = 0;
  2016. qed_ops->common->set_fp_int(qedf->cdev, 0);
  2017. }
  2018. static int qedf_request_msix_irq(struct qedf_ctx *qedf)
  2019. {
  2020. int i, rc, cpu;
  2021. u16 vector_idx = 0;
  2022. u32 vector;
  2023. cpu = cpumask_first(cpu_online_mask);
  2024. for (i = 0; i < qedf->num_queues; i++) {
  2025. vector_idx = i * qedf->dev_info.common.num_hwfns +
  2026. qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
  2027. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  2028. "Requesting IRQ #%d vector_idx=%d.\n",
  2029. i, vector_idx);
  2030. vector = qedf->int_info.msix[vector_idx].vector;
  2031. rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
  2032. &qedf->fp_array[i]);
  2033. if (rc) {
  2034. QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
  2035. qedf_sync_free_irqs(qedf);
  2036. return rc;
  2037. }
  2038. qedf->int_info.used_cnt++;
  2039. rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
  2040. cpu = cpumask_next(cpu, cpu_online_mask);
  2041. }
  2042. return 0;
  2043. }
  2044. static int qedf_setup_int(struct qedf_ctx *qedf)
  2045. {
  2046. int rc = 0;
  2047. /*
  2048. * Learn interrupt configuration
  2049. */
  2050. rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
  2051. if (rc <= 0)
  2052. return 0;
  2053. rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
  2054. if (rc)
  2055. return 0;
  2056. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
  2057. "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
  2058. num_online_cpus());
  2059. if (qedf->int_info.msix_cnt)
  2060. return qedf_request_msix_irq(qedf);
  2061. qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
  2062. QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
  2063. qedf->int_info.used_cnt = 1;
  2064. QEDF_ERR(&qedf->dbg_ctx,
  2065. "Cannot load driver due to a lack of MSI-X vectors.\n");
  2066. return -EINVAL;
  2067. }
  2068. /* Main function for libfc frame reception */
  2069. static void qedf_recv_frame(struct qedf_ctx *qedf,
  2070. struct sk_buff *skb)
  2071. {
  2072. u32 fr_len;
  2073. struct fc_lport *lport;
  2074. struct fc_frame_header *fh;
  2075. struct fcoe_crc_eof crc_eof;
  2076. struct fc_frame *fp;
  2077. u8 *mac = NULL;
  2078. u8 *dest_mac = NULL;
  2079. struct fcoe_hdr *hp;
  2080. struct qedf_rport *fcport;
  2081. struct fc_lport *vn_port;
  2082. u32 f_ctl;
  2083. lport = qedf->lport;
  2084. if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
  2085. QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
  2086. kfree_skb(skb);
  2087. return;
  2088. }
  2089. if (skb_is_nonlinear(skb))
  2090. skb_linearize(skb);
  2091. mac = eth_hdr(skb)->h_source;
  2092. dest_mac = eth_hdr(skb)->h_dest;
  2093. /* Pull the header */
  2094. hp = (struct fcoe_hdr *)skb->data;
  2095. fh = (struct fc_frame_header *) skb_transport_header(skb);
  2096. skb_pull(skb, sizeof(struct fcoe_hdr));
  2097. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  2098. fp = (struct fc_frame *)skb;
  2099. fc_frame_init(fp);
  2100. fr_dev(fp) = lport;
  2101. fr_sof(fp) = hp->fcoe_sof;
  2102. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  2103. QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
  2104. kfree_skb(skb);
  2105. return;
  2106. }
  2107. fr_eof(fp) = crc_eof.fcoe_eof;
  2108. fr_crc(fp) = crc_eof.fcoe_crc32;
  2109. if (pskb_trim(skb, fr_len)) {
  2110. QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
  2111. kfree_skb(skb);
  2112. return;
  2113. }
  2114. fh = fc_frame_header_get(fp);
  2115. /*
  2116. * Invalid frame filters.
  2117. */
  2118. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  2119. fh->fh_type == FC_TYPE_FCP) {
  2120. /* Drop FCP data. We dont this in L2 path */
  2121. kfree_skb(skb);
  2122. return;
  2123. }
  2124. if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
  2125. fh->fh_type == FC_TYPE_ELS) {
  2126. switch (fc_frame_payload_op(fp)) {
  2127. case ELS_LOGO:
  2128. if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
  2129. /* drop non-FIP LOGO */
  2130. kfree_skb(skb);
  2131. return;
  2132. }
  2133. break;
  2134. }
  2135. }
  2136. if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
  2137. /* Drop incoming ABTS */
  2138. kfree_skb(skb);
  2139. return;
  2140. }
  2141. if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
  2142. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  2143. "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
  2144. kfree_skb(skb);
  2145. return;
  2146. }
  2147. if (qedf->ctlr.state) {
  2148. if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
  2149. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  2150. "Wrong source address: mac:%pM dest_addr:%pM.\n",
  2151. mac, qedf->ctlr.dest_addr);
  2152. kfree_skb(skb);
  2153. return;
  2154. }
  2155. }
  2156. vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
  2157. /*
  2158. * If the destination ID from the frame header does not match what we
  2159. * have on record for lport and the search for a NPIV port came up
  2160. * empty then this is not addressed to our port so simply drop it.
  2161. */
  2162. if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
  2163. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
  2164. "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
  2165. lport->port_id, ntoh24(fh->fh_d_id));
  2166. kfree_skb(skb);
  2167. return;
  2168. }
  2169. f_ctl = ntoh24(fh->fh_f_ctl);
  2170. if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
  2171. (f_ctl & FC_FC_EX_CTX)) {
  2172. /* Drop incoming ABTS response that has both SEQ/EX CTX set */
  2173. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
  2174. "Dropping ABTS response as both SEQ/EX CTX set.\n");
  2175. kfree_skb(skb);
  2176. return;
  2177. }
  2178. /*
  2179. * If a connection is uploading, drop incoming FCoE frames as there
  2180. * is a small window where we could try to return a frame while libfc
  2181. * is trying to clean things up.
  2182. */
  2183. /* Get fcport associated with d_id if it exists */
  2184. fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
  2185. if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  2186. &fcport->flags)) {
  2187. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  2188. "Connection uploading, dropping fp=%p.\n", fp);
  2189. kfree_skb(skb);
  2190. return;
  2191. }
  2192. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
  2193. "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
  2194. ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
  2195. fh->fh_type);
  2196. if (qedf_dump_frames)
  2197. print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
  2198. 1, skb->data, skb->len, false);
  2199. fc_exch_recv(lport, fp);
  2200. }
  2201. static void qedf_ll2_process_skb(struct work_struct *work)
  2202. {
  2203. struct qedf_skb_work *skb_work =
  2204. container_of(work, struct qedf_skb_work, work);
  2205. struct qedf_ctx *qedf = skb_work->qedf;
  2206. struct sk_buff *skb = skb_work->skb;
  2207. struct ethhdr *eh;
  2208. if (!qedf) {
  2209. QEDF_ERR(NULL, "qedf is NULL\n");
  2210. goto err_out;
  2211. }
  2212. eh = (struct ethhdr *)skb->data;
  2213. /* Undo VLAN encapsulation */
  2214. if (eh->h_proto == htons(ETH_P_8021Q)) {
  2215. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  2216. eh = skb_pull(skb, VLAN_HLEN);
  2217. skb_reset_mac_header(skb);
  2218. }
  2219. /*
  2220. * Process either a FIP frame or FCoE frame based on the
  2221. * protocol value. If it's not either just drop the
  2222. * frame.
  2223. */
  2224. if (eh->h_proto == htons(ETH_P_FIP)) {
  2225. qedf_fip_recv(qedf, skb);
  2226. goto out;
  2227. } else if (eh->h_proto == htons(ETH_P_FCOE)) {
  2228. __skb_pull(skb, ETH_HLEN);
  2229. qedf_recv_frame(qedf, skb);
  2230. goto out;
  2231. } else
  2232. goto err_out;
  2233. err_out:
  2234. kfree_skb(skb);
  2235. out:
  2236. kfree(skb_work);
  2237. return;
  2238. }
  2239. static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
  2240. u32 arg1, u32 arg2)
  2241. {
  2242. struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
  2243. struct qedf_skb_work *skb_work;
  2244. if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
  2245. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
  2246. "Dropping frame as link state is down.\n");
  2247. kfree_skb(skb);
  2248. return 0;
  2249. }
  2250. skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
  2251. if (!skb_work) {
  2252. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
  2253. "dropping frame.\n");
  2254. kfree_skb(skb);
  2255. return 0;
  2256. }
  2257. INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
  2258. skb_work->skb = skb;
  2259. skb_work->qedf = qedf;
  2260. queue_work(qedf->ll2_recv_wq, &skb_work->work);
  2261. return 0;
  2262. }
  2263. static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
  2264. .rx_cb = qedf_ll2_rx,
  2265. .tx_cb = NULL,
  2266. };
  2267. /* Main thread to process I/O completions */
  2268. void qedf_fp_io_handler(struct work_struct *work)
  2269. {
  2270. struct qedf_io_work *io_work =
  2271. container_of(work, struct qedf_io_work, work);
  2272. u32 comp_type;
  2273. /*
  2274. * Deferred part of unsolicited CQE sends
  2275. * frame to libfc.
  2276. */
  2277. comp_type = (io_work->cqe.cqe_data >>
  2278. FCOE_CQE_CQE_TYPE_SHIFT) &
  2279. FCOE_CQE_CQE_TYPE_MASK;
  2280. if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
  2281. io_work->fp)
  2282. fc_exch_recv(io_work->qedf->lport, io_work->fp);
  2283. else
  2284. qedf_process_cqe(io_work->qedf, &io_work->cqe);
  2285. kfree(io_work);
  2286. }
  2287. static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
  2288. struct qed_sb_info *sb_info, u16 sb_id)
  2289. {
  2290. struct status_block *sb_virt;
  2291. dma_addr_t sb_phys;
  2292. int ret;
  2293. sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
  2294. sizeof(struct status_block), &sb_phys, GFP_KERNEL);
  2295. if (!sb_virt) {
  2296. QEDF_ERR(&qedf->dbg_ctx,
  2297. "Status block allocation failed for id = %d.\n",
  2298. sb_id);
  2299. return -ENOMEM;
  2300. }
  2301. ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
  2302. sb_id, QED_SB_TYPE_STORAGE);
  2303. if (ret) {
  2304. QEDF_ERR(&qedf->dbg_ctx,
  2305. "Status block initialization failed (0x%x) for id = %d.\n",
  2306. ret, sb_id);
  2307. return ret;
  2308. }
  2309. return 0;
  2310. }
  2311. static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
  2312. {
  2313. if (sb_info->sb_virt)
  2314. dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
  2315. (void *)sb_info->sb_virt, sb_info->sb_phys);
  2316. }
  2317. static void qedf_destroy_sb(struct qedf_ctx *qedf)
  2318. {
  2319. int id;
  2320. struct qedf_fastpath *fp = NULL;
  2321. for (id = 0; id < qedf->num_queues; id++) {
  2322. fp = &(qedf->fp_array[id]);
  2323. if (fp->sb_id == QEDF_SB_ID_NULL)
  2324. break;
  2325. qedf_free_sb(qedf, fp->sb_info);
  2326. kfree(fp->sb_info);
  2327. }
  2328. kfree(qedf->fp_array);
  2329. }
  2330. static int qedf_prepare_sb(struct qedf_ctx *qedf)
  2331. {
  2332. int id;
  2333. struct qedf_fastpath *fp;
  2334. int ret;
  2335. qedf->fp_array =
  2336. kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
  2337. GFP_KERNEL);
  2338. if (!qedf->fp_array) {
  2339. QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
  2340. "failed.\n");
  2341. return -ENOMEM;
  2342. }
  2343. for (id = 0; id < qedf->num_queues; id++) {
  2344. fp = &(qedf->fp_array[id]);
  2345. fp->sb_id = QEDF_SB_ID_NULL;
  2346. fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
  2347. if (!fp->sb_info) {
  2348. QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
  2349. "allocation failed.\n");
  2350. goto err;
  2351. }
  2352. ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
  2353. if (ret) {
  2354. QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
  2355. "initialization failed.\n");
  2356. goto err;
  2357. }
  2358. fp->sb_id = id;
  2359. fp->qedf = qedf;
  2360. fp->cq_num_entries =
  2361. qedf->global_queues[id]->cq_mem_size /
  2362. sizeof(struct fcoe_cqe);
  2363. }
  2364. err:
  2365. return 0;
  2366. }
  2367. void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
  2368. {
  2369. u16 xid;
  2370. struct qedf_ioreq *io_req;
  2371. struct qedf_rport *fcport;
  2372. u32 comp_type;
  2373. u8 io_comp_type;
  2374. unsigned long flags;
  2375. comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
  2376. FCOE_CQE_CQE_TYPE_MASK;
  2377. xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
  2378. io_req = &qedf->cmd_mgr->cmds[xid];
  2379. /* Completion not for a valid I/O anymore so just return */
  2380. if (!io_req) {
  2381. QEDF_ERR(&qedf->dbg_ctx,
  2382. "io_req is NULL for xid=0x%x.\n", xid);
  2383. return;
  2384. }
  2385. fcport = io_req->fcport;
  2386. if (fcport == NULL) {
  2387. QEDF_ERR(&qedf->dbg_ctx,
  2388. "fcport is NULL for xid=0x%x io_req=%p.\n",
  2389. xid, io_req);
  2390. return;
  2391. }
  2392. /*
  2393. * Check that fcport is offloaded. If it isn't then the spinlock
  2394. * isn't valid and shouldn't be taken. We should just return.
  2395. */
  2396. if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  2397. QEDF_ERR(&qedf->dbg_ctx,
  2398. "Session not offloaded yet, fcport = %p.\n", fcport);
  2399. return;
  2400. }
  2401. spin_lock_irqsave(&fcport->rport_lock, flags);
  2402. io_comp_type = io_req->cmd_type;
  2403. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  2404. switch (comp_type) {
  2405. case FCOE_GOOD_COMPLETION_CQE_TYPE:
  2406. atomic_inc(&fcport->free_sqes);
  2407. switch (io_comp_type) {
  2408. case QEDF_SCSI_CMD:
  2409. qedf_scsi_completion(qedf, cqe, io_req);
  2410. break;
  2411. case QEDF_ELS:
  2412. qedf_process_els_compl(qedf, cqe, io_req);
  2413. break;
  2414. case QEDF_TASK_MGMT_CMD:
  2415. qedf_process_tmf_compl(qedf, cqe, io_req);
  2416. break;
  2417. case QEDF_SEQ_CLEANUP:
  2418. qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
  2419. break;
  2420. }
  2421. break;
  2422. case FCOE_ERROR_DETECTION_CQE_TYPE:
  2423. atomic_inc(&fcport->free_sqes);
  2424. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2425. "Error detect CQE.\n");
  2426. qedf_process_error_detect(qedf, cqe, io_req);
  2427. break;
  2428. case FCOE_EXCH_CLEANUP_CQE_TYPE:
  2429. atomic_inc(&fcport->free_sqes);
  2430. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2431. "Cleanup CQE.\n");
  2432. qedf_process_cleanup_compl(qedf, cqe, io_req);
  2433. break;
  2434. case FCOE_ABTS_CQE_TYPE:
  2435. atomic_inc(&fcport->free_sqes);
  2436. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2437. "Abort CQE.\n");
  2438. qedf_process_abts_compl(qedf, cqe, io_req);
  2439. break;
  2440. case FCOE_DUMMY_CQE_TYPE:
  2441. atomic_inc(&fcport->free_sqes);
  2442. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2443. "Dummy CQE.\n");
  2444. break;
  2445. case FCOE_LOCAL_COMP_CQE_TYPE:
  2446. atomic_inc(&fcport->free_sqes);
  2447. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2448. "Local completion CQE.\n");
  2449. break;
  2450. case FCOE_WARNING_CQE_TYPE:
  2451. atomic_inc(&fcport->free_sqes);
  2452. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2453. "Warning CQE.\n");
  2454. qedf_process_warning_compl(qedf, cqe, io_req);
  2455. break;
  2456. case MAX_FCOE_CQE_TYPE:
  2457. atomic_inc(&fcport->free_sqes);
  2458. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2459. "Max FCoE CQE.\n");
  2460. break;
  2461. default:
  2462. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2463. "Default CQE.\n");
  2464. break;
  2465. }
  2466. }
  2467. static void qedf_free_bdq(struct qedf_ctx *qedf)
  2468. {
  2469. int i;
  2470. if (qedf->bdq_pbl_list)
  2471. dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
  2472. qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
  2473. if (qedf->bdq_pbl)
  2474. dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
  2475. qedf->bdq_pbl, qedf->bdq_pbl_dma);
  2476. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2477. if (qedf->bdq[i].buf_addr) {
  2478. dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
  2479. qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
  2480. }
  2481. }
  2482. }
  2483. static void qedf_free_global_queues(struct qedf_ctx *qedf)
  2484. {
  2485. int i;
  2486. struct global_queue **gl = qedf->global_queues;
  2487. for (i = 0; i < qedf->num_queues; i++) {
  2488. if (!gl[i])
  2489. continue;
  2490. if (gl[i]->cq)
  2491. dma_free_coherent(&qedf->pdev->dev,
  2492. gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
  2493. if (gl[i]->cq_pbl)
  2494. dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
  2495. gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
  2496. kfree(gl[i]);
  2497. }
  2498. qedf_free_bdq(qedf);
  2499. }
  2500. static int qedf_alloc_bdq(struct qedf_ctx *qedf)
  2501. {
  2502. int i;
  2503. struct scsi_bd *pbl;
  2504. u64 *list;
  2505. dma_addr_t page;
  2506. /* Alloc dma memory for BDQ buffers */
  2507. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2508. qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
  2509. QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
  2510. if (!qedf->bdq[i].buf_addr) {
  2511. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
  2512. "buffer %d.\n", i);
  2513. return -ENOMEM;
  2514. }
  2515. }
  2516. /* Alloc dma memory for BDQ page buffer list */
  2517. qedf->bdq_pbl_mem_size =
  2518. QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
  2519. qedf->bdq_pbl_mem_size =
  2520. ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
  2521. qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
  2522. qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
  2523. if (!qedf->bdq_pbl) {
  2524. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
  2525. return -ENOMEM;
  2526. }
  2527. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2528. "BDQ PBL addr=0x%p dma=%pad\n",
  2529. qedf->bdq_pbl, &qedf->bdq_pbl_dma);
  2530. /*
  2531. * Populate BDQ PBL with physical and virtual address of individual
  2532. * BDQ buffers
  2533. */
  2534. pbl = (struct scsi_bd *)qedf->bdq_pbl;
  2535. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2536. pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
  2537. pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
  2538. pbl->opaque.fcoe_opaque.hi = 0;
  2539. /* Opaque lo data is an index into the BDQ array */
  2540. pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
  2541. pbl++;
  2542. }
  2543. /* Allocate list of PBL pages */
  2544. qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
  2545. QEDF_PAGE_SIZE,
  2546. &qedf->bdq_pbl_list_dma,
  2547. GFP_KERNEL);
  2548. if (!qedf->bdq_pbl_list) {
  2549. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
  2550. return -ENOMEM;
  2551. }
  2552. /*
  2553. * Now populate PBL list with pages that contain pointers to the
  2554. * individual buffers.
  2555. */
  2556. qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
  2557. QEDF_PAGE_SIZE;
  2558. list = (u64 *)qedf->bdq_pbl_list;
  2559. page = qedf->bdq_pbl_list_dma;
  2560. for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
  2561. *list = qedf->bdq_pbl_dma;
  2562. list++;
  2563. page += QEDF_PAGE_SIZE;
  2564. }
  2565. return 0;
  2566. }
  2567. static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
  2568. {
  2569. u32 *list;
  2570. int i;
  2571. int status;
  2572. u32 *pbl;
  2573. dma_addr_t page;
  2574. int num_pages;
  2575. /* Allocate and map CQs, RQs */
  2576. /*
  2577. * Number of global queues (CQ / RQ). This should
  2578. * be <= number of available MSIX vectors for the PF
  2579. */
  2580. if (!qedf->num_queues) {
  2581. QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
  2582. return -ENOMEM;
  2583. }
  2584. /*
  2585. * Make sure we allocated the PBL that will contain the physical
  2586. * addresses of our queues
  2587. */
  2588. if (!qedf->p_cpuq) {
  2589. QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
  2590. return -EINVAL;
  2591. }
  2592. qedf->global_queues = kzalloc((sizeof(struct global_queue *)
  2593. * qedf->num_queues), GFP_KERNEL);
  2594. if (!qedf->global_queues) {
  2595. QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
  2596. "queues array ptr memory\n");
  2597. return -ENOMEM;
  2598. }
  2599. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2600. "qedf->global_queues=%p.\n", qedf->global_queues);
  2601. /* Allocate DMA coherent buffers for BDQ */
  2602. status = qedf_alloc_bdq(qedf);
  2603. if (status) {
  2604. QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
  2605. goto mem_alloc_failure;
  2606. }
  2607. /* Allocate a CQ and an associated PBL for each MSI-X vector */
  2608. for (i = 0; i < qedf->num_queues; i++) {
  2609. qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
  2610. GFP_KERNEL);
  2611. if (!qedf->global_queues[i]) {
  2612. QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
  2613. "global queue %d.\n", i);
  2614. status = -ENOMEM;
  2615. goto mem_alloc_failure;
  2616. }
  2617. qedf->global_queues[i]->cq_mem_size =
  2618. FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
  2619. qedf->global_queues[i]->cq_mem_size =
  2620. ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
  2621. qedf->global_queues[i]->cq_pbl_size =
  2622. (qedf->global_queues[i]->cq_mem_size /
  2623. PAGE_SIZE) * sizeof(void *);
  2624. qedf->global_queues[i]->cq_pbl_size =
  2625. ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
  2626. qedf->global_queues[i]->cq =
  2627. dma_alloc_coherent(&qedf->pdev->dev,
  2628. qedf->global_queues[i]->cq_mem_size,
  2629. &qedf->global_queues[i]->cq_dma,
  2630. GFP_KERNEL);
  2631. if (!qedf->global_queues[i]->cq) {
  2632. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
  2633. status = -ENOMEM;
  2634. goto mem_alloc_failure;
  2635. }
  2636. qedf->global_queues[i]->cq_pbl =
  2637. dma_alloc_coherent(&qedf->pdev->dev,
  2638. qedf->global_queues[i]->cq_pbl_size,
  2639. &qedf->global_queues[i]->cq_pbl_dma,
  2640. GFP_KERNEL);
  2641. if (!qedf->global_queues[i]->cq_pbl) {
  2642. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
  2643. status = -ENOMEM;
  2644. goto mem_alloc_failure;
  2645. }
  2646. /* Create PBL */
  2647. num_pages = qedf->global_queues[i]->cq_mem_size /
  2648. QEDF_PAGE_SIZE;
  2649. page = qedf->global_queues[i]->cq_dma;
  2650. pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
  2651. while (num_pages--) {
  2652. *pbl = U64_LO(page);
  2653. pbl++;
  2654. *pbl = U64_HI(page);
  2655. pbl++;
  2656. page += QEDF_PAGE_SIZE;
  2657. }
  2658. /* Set the initial consumer index for cq */
  2659. qedf->global_queues[i]->cq_cons_idx = 0;
  2660. }
  2661. list = (u32 *)qedf->p_cpuq;
  2662. /*
  2663. * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
  2664. * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
  2665. * to the physical address which contains an array of pointers to
  2666. * the physical addresses of the specific queue pages.
  2667. */
  2668. for (i = 0; i < qedf->num_queues; i++) {
  2669. *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
  2670. list++;
  2671. *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
  2672. list++;
  2673. *list = U64_LO(0);
  2674. list++;
  2675. *list = U64_HI(0);
  2676. list++;
  2677. }
  2678. return 0;
  2679. mem_alloc_failure:
  2680. qedf_free_global_queues(qedf);
  2681. return status;
  2682. }
  2683. static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
  2684. {
  2685. u8 sq_num_pbl_pages;
  2686. u32 sq_mem_size;
  2687. u32 cq_mem_size;
  2688. u32 cq_num_entries;
  2689. int rval;
  2690. /*
  2691. * The number of completion queues/fastpath interrupts/status blocks
  2692. * we allocation is the minimum off:
  2693. *
  2694. * Number of CPUs
  2695. * Number allocated by qed for our PCI function
  2696. */
  2697. qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
  2698. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
  2699. qedf->num_queues);
  2700. qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
  2701. qedf->num_queues * sizeof(struct qedf_glbl_q_params),
  2702. &qedf->hw_p_cpuq, GFP_KERNEL);
  2703. if (!qedf->p_cpuq) {
  2704. QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
  2705. return 1;
  2706. }
  2707. rval = qedf_alloc_global_queues(qedf);
  2708. if (rval) {
  2709. QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
  2710. "failed.\n");
  2711. return 1;
  2712. }
  2713. /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
  2714. sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
  2715. sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
  2716. sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
  2717. /* Calculate CQ num entries */
  2718. cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
  2719. cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
  2720. cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
  2721. memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
  2722. /* Setup the value for fcoe PF */
  2723. qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
  2724. qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
  2725. qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
  2726. (u64)qedf->hw_p_cpuq;
  2727. qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
  2728. qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
  2729. qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
  2730. qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
  2731. /* log_page_size: 12 for 4KB pages */
  2732. qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
  2733. qedf->pf_params.fcoe_pf_params.mtu = 9000;
  2734. qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
  2735. qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
  2736. /* BDQ address and size */
  2737. qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
  2738. qedf->bdq_pbl_list_dma;
  2739. qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
  2740. qedf->bdq_pbl_list_num_entries;
  2741. qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
  2742. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2743. "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
  2744. qedf->bdq_pbl_list,
  2745. qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
  2746. qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
  2747. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2748. "cq_num_entries=%d.\n",
  2749. qedf->pf_params.fcoe_pf_params.cq_num_entries);
  2750. return 0;
  2751. }
  2752. /* Free DMA coherent memory for array of queue pointers we pass to qed */
  2753. static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
  2754. {
  2755. size_t size = 0;
  2756. if (qedf->p_cpuq) {
  2757. size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
  2758. dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
  2759. qedf->hw_p_cpuq);
  2760. }
  2761. qedf_free_global_queues(qedf);
  2762. kfree(qedf->global_queues);
  2763. }
  2764. /*
  2765. * PCI driver functions
  2766. */
  2767. static const struct pci_device_id qedf_pci_tbl[] = {
  2768. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
  2769. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
  2770. {0}
  2771. };
  2772. MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
  2773. static struct pci_driver qedf_pci_driver = {
  2774. .name = QEDF_MODULE_NAME,
  2775. .id_table = qedf_pci_tbl,
  2776. .probe = qedf_probe,
  2777. .remove = qedf_remove,
  2778. .shutdown = qedf_shutdown,
  2779. .suspend = qedf_suspend,
  2780. };
  2781. static int __qedf_probe(struct pci_dev *pdev, int mode)
  2782. {
  2783. int rc = -EINVAL;
  2784. struct fc_lport *lport;
  2785. struct qedf_ctx *qedf = NULL;
  2786. struct Scsi_Host *host;
  2787. bool is_vf = false;
  2788. struct qed_ll2_params params;
  2789. char host_buf[20];
  2790. struct qed_link_params link_params;
  2791. int status;
  2792. void *task_start, *task_end;
  2793. struct qed_slowpath_params slowpath_params;
  2794. struct qed_probe_params qed_params;
  2795. u16 retry_cnt = 10;
  2796. /*
  2797. * When doing error recovery we didn't reap the lport so don't try
  2798. * to reallocate it.
  2799. */
  2800. retry_probe:
  2801. if (mode == QEDF_MODE_RECOVERY)
  2802. msleep(2000);
  2803. if (mode != QEDF_MODE_RECOVERY) {
  2804. lport = libfc_host_alloc(&qedf_host_template,
  2805. sizeof(struct qedf_ctx));
  2806. if (!lport) {
  2807. QEDF_ERR(NULL, "Could not allocate lport.\n");
  2808. rc = -ENOMEM;
  2809. goto err0;
  2810. }
  2811. fc_disc_init(lport);
  2812. /* Initialize qedf_ctx */
  2813. qedf = lport_priv(lport);
  2814. set_bit(QEDF_PROBING, &qedf->flags);
  2815. qedf->lport = lport;
  2816. qedf->ctlr.lp = lport;
  2817. qedf->pdev = pdev;
  2818. qedf->dbg_ctx.pdev = pdev;
  2819. qedf->dbg_ctx.host_no = lport->host->host_no;
  2820. spin_lock_init(&qedf->hba_lock);
  2821. INIT_LIST_HEAD(&qedf->fcports);
  2822. qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
  2823. atomic_set(&qedf->num_offloads, 0);
  2824. qedf->stop_io_on_error = false;
  2825. pci_set_drvdata(pdev, qedf);
  2826. init_completion(&qedf->fipvlan_compl);
  2827. mutex_init(&qedf->stats_mutex);
  2828. mutex_init(&qedf->flush_mutex);
  2829. qedf->flogi_pending = 0;
  2830. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
  2831. "QLogic FastLinQ FCoE Module qedf %s, "
  2832. "FW %d.%d.%d.%d\n", QEDF_VERSION,
  2833. FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
  2834. FW_ENGINEERING_VERSION);
  2835. } else {
  2836. /* Init pointers during recovery */
  2837. qedf = pci_get_drvdata(pdev);
  2838. set_bit(QEDF_PROBING, &qedf->flags);
  2839. lport = qedf->lport;
  2840. }
  2841. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
  2842. host = lport->host;
  2843. /* Allocate mempool for qedf_io_work structs */
  2844. qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
  2845. qedf_io_work_cache);
  2846. if (qedf->io_mempool == NULL) {
  2847. QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
  2848. goto err1;
  2849. }
  2850. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
  2851. qedf->io_mempool);
  2852. sprintf(host_buf, "qedf_%u_link",
  2853. qedf->lport->host->host_no);
  2854. qedf->link_update_wq = create_workqueue(host_buf);
  2855. INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
  2856. INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
  2857. INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
  2858. INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
  2859. qedf->fipvlan_retries = qedf_fipvlan_retries;
  2860. /* Set a default prio in case DCBX doesn't converge */
  2861. if (qedf_default_prio > -1) {
  2862. /*
  2863. * This is the case where we pass a modparam in so we want to
  2864. * honor it even if dcbx doesn't converge.
  2865. */
  2866. qedf->prio = qedf_default_prio;
  2867. } else
  2868. qedf->prio = QEDF_DEFAULT_PRIO;
  2869. /*
  2870. * Common probe. Takes care of basic hardware init and pci_*
  2871. * functions.
  2872. */
  2873. memset(&qed_params, 0, sizeof(qed_params));
  2874. qed_params.protocol = QED_PROTOCOL_FCOE;
  2875. qed_params.dp_module = qedf_dp_module;
  2876. qed_params.dp_level = qedf_dp_level;
  2877. qed_params.is_vf = is_vf;
  2878. qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
  2879. if (!qedf->cdev) {
  2880. if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
  2881. QEDF_ERR(&qedf->dbg_ctx,
  2882. "Retry %d initialize hardware\n", retry_cnt);
  2883. retry_cnt--;
  2884. goto retry_probe;
  2885. }
  2886. QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
  2887. rc = -ENODEV;
  2888. goto err1;
  2889. }
  2890. /* Learn information crucial for qedf to progress */
  2891. rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
  2892. if (rc) {
  2893. QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
  2894. goto err1;
  2895. }
  2896. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  2897. "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
  2898. qedf->dev_info.common.num_hwfns,
  2899. qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
  2900. /* queue allocation code should come here
  2901. * order should be
  2902. * slowpath_start
  2903. * status block allocation
  2904. * interrupt registration (to get min number of queues)
  2905. * set_fcoe_pf_param
  2906. * qed_sp_fcoe_func_start
  2907. */
  2908. rc = qedf_set_fcoe_pf_param(qedf);
  2909. if (rc) {
  2910. QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
  2911. goto err2;
  2912. }
  2913. qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
  2914. /* Learn information crucial for qedf to progress */
  2915. rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
  2916. if (rc) {
  2917. QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
  2918. goto err2;
  2919. }
  2920. if (mode != QEDF_MODE_RECOVERY) {
  2921. qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
  2922. if (IS_ERR(qedf->devlink)) {
  2923. QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
  2924. rc = PTR_ERR(qedf->devlink);
  2925. qedf->devlink = NULL;
  2926. goto err2;
  2927. }
  2928. }
  2929. /* Record BDQ producer doorbell addresses */
  2930. qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
  2931. qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
  2932. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2933. "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
  2934. qedf->bdq_secondary_prod);
  2935. qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
  2936. rc = qedf_prepare_sb(qedf);
  2937. if (rc) {
  2938. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
  2939. goto err2;
  2940. }
  2941. /* Start the Slowpath-process */
  2942. slowpath_params.int_mode = QED_INT_MODE_MSIX;
  2943. slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
  2944. slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
  2945. slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
  2946. slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
  2947. strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
  2948. rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
  2949. if (rc) {
  2950. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
  2951. goto err2;
  2952. }
  2953. /*
  2954. * update_pf_params needs to be called before and after slowpath
  2955. * start
  2956. */
  2957. qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
  2958. /* Setup interrupts */
  2959. rc = qedf_setup_int(qedf);
  2960. if (rc) {
  2961. QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
  2962. goto err3;
  2963. }
  2964. rc = qed_ops->start(qedf->cdev, &qedf->tasks);
  2965. if (rc) {
  2966. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
  2967. goto err4;
  2968. }
  2969. task_start = qedf_get_task_mem(&qedf->tasks, 0);
  2970. task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
  2971. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
  2972. "end=%p block_size=%u.\n", task_start, task_end,
  2973. qedf->tasks.size);
  2974. /*
  2975. * We need to write the number of BDs in the BDQ we've preallocated so
  2976. * the f/w will do a prefetch and we'll get an unsolicited CQE when a
  2977. * packet arrives.
  2978. */
  2979. qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
  2980. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2981. "Writing %d to primary and secondary BDQ doorbell registers.\n",
  2982. qedf->bdq_prod_idx);
  2983. writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
  2984. readw(qedf->bdq_primary_prod);
  2985. writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
  2986. readw(qedf->bdq_secondary_prod);
  2987. qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
  2988. /* Now that the dev_info struct has been filled in set the MAC
  2989. * address
  2990. */
  2991. ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
  2992. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
  2993. qedf->mac);
  2994. /*
  2995. * Set the WWNN and WWPN in the following way:
  2996. *
  2997. * If the info we get from qed is non-zero then use that to set the
  2998. * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
  2999. * on the MAC address.
  3000. */
  3001. if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
  3002. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  3003. "Setting WWPN and WWNN from qed dev_info.\n");
  3004. qedf->wwnn = qedf->dev_info.wwnn;
  3005. qedf->wwpn = qedf->dev_info.wwpn;
  3006. } else {
  3007. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  3008. "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
  3009. qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
  3010. qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
  3011. }
  3012. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
  3013. "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
  3014. sprintf(host_buf, "host_%d", host->host_no);
  3015. qed_ops->common->set_name(qedf->cdev, host_buf);
  3016. /* Allocate cmd mgr */
  3017. qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
  3018. if (!qedf->cmd_mgr) {
  3019. QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
  3020. rc = -ENOMEM;
  3021. goto err5;
  3022. }
  3023. if (mode != QEDF_MODE_RECOVERY) {
  3024. host->transportt = qedf_fc_transport_template;
  3025. host->max_lun = qedf_max_lun;
  3026. host->max_cmd_len = QEDF_MAX_CDB_LEN;
  3027. host->max_id = QEDF_MAX_SESSIONS;
  3028. host->can_queue = FCOE_PARAMS_NUM_TASKS;
  3029. rc = scsi_add_host(host, &pdev->dev);
  3030. if (rc) {
  3031. QEDF_WARN(&qedf->dbg_ctx,
  3032. "Error adding Scsi_Host rc=0x%x.\n", rc);
  3033. goto err6;
  3034. }
  3035. }
  3036. memset(&params, 0, sizeof(params));
  3037. params.mtu = QEDF_LL2_BUF_SIZE;
  3038. ether_addr_copy(params.ll2_mac_address, qedf->mac);
  3039. /* Start LL2 processing thread */
  3040. snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
  3041. qedf->ll2_recv_wq =
  3042. create_workqueue(host_buf);
  3043. if (!qedf->ll2_recv_wq) {
  3044. QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
  3045. rc = -ENOMEM;
  3046. goto err7;
  3047. }
  3048. #ifdef CONFIG_DEBUG_FS
  3049. qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
  3050. qedf_dbg_fops);
  3051. #endif
  3052. /* Start LL2 */
  3053. qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
  3054. rc = qed_ops->ll2->start(qedf->cdev, &params);
  3055. if (rc) {
  3056. QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
  3057. goto err7;
  3058. }
  3059. set_bit(QEDF_LL2_STARTED, &qedf->flags);
  3060. /* Set initial FIP/FCoE VLAN to NULL */
  3061. qedf->vlan_id = 0;
  3062. /*
  3063. * No need to setup fcoe_ctlr or fc_lport objects during recovery since
  3064. * they were not reaped during the unload process.
  3065. */
  3066. if (mode != QEDF_MODE_RECOVERY) {
  3067. /* Setup imbedded fcoe controller */
  3068. qedf_fcoe_ctlr_setup(qedf);
  3069. /* Setup lport */
  3070. rc = qedf_lport_setup(qedf);
  3071. if (rc) {
  3072. QEDF_ERR(&(qedf->dbg_ctx),
  3073. "qedf_lport_setup failed.\n");
  3074. goto err7;
  3075. }
  3076. }
  3077. sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
  3078. qedf->timer_work_queue =
  3079. create_workqueue(host_buf);
  3080. if (!qedf->timer_work_queue) {
  3081. QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
  3082. "workqueue.\n");
  3083. rc = -ENOMEM;
  3084. goto err7;
  3085. }
  3086. /* DPC workqueue is not reaped during recovery unload */
  3087. if (mode != QEDF_MODE_RECOVERY) {
  3088. sprintf(host_buf, "qedf_%u_dpc",
  3089. qedf->lport->host->host_no);
  3090. qedf->dpc_wq = create_workqueue(host_buf);
  3091. }
  3092. INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
  3093. /*
  3094. * GRC dump and sysfs parameters are not reaped during the recovery
  3095. * unload process.
  3096. */
  3097. if (mode != QEDF_MODE_RECOVERY) {
  3098. qedf->grcdump_size =
  3099. qed_ops->common->dbg_all_data_size(qedf->cdev);
  3100. if (qedf->grcdump_size) {
  3101. rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
  3102. qedf->grcdump_size);
  3103. if (rc) {
  3104. QEDF_ERR(&(qedf->dbg_ctx),
  3105. "GRC Dump buffer alloc failed.\n");
  3106. qedf->grcdump = NULL;
  3107. }
  3108. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  3109. "grcdump: addr=%p, size=%u.\n",
  3110. qedf->grcdump, qedf->grcdump_size);
  3111. }
  3112. qedf_create_sysfs_ctx_attr(qedf);
  3113. /* Initialize I/O tracing for this adapter */
  3114. spin_lock_init(&qedf->io_trace_lock);
  3115. qedf->io_trace_idx = 0;
  3116. }
  3117. init_completion(&qedf->flogi_compl);
  3118. status = qed_ops->common->update_drv_state(qedf->cdev, true);
  3119. if (status)
  3120. QEDF_ERR(&(qedf->dbg_ctx),
  3121. "Failed to send drv state to MFW.\n");
  3122. memset(&link_params, 0, sizeof(struct qed_link_params));
  3123. link_params.link_up = true;
  3124. status = qed_ops->common->set_link(qedf->cdev, &link_params);
  3125. if (status)
  3126. QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
  3127. /* Start/restart discovery */
  3128. if (mode == QEDF_MODE_RECOVERY)
  3129. fcoe_ctlr_link_up(&qedf->ctlr);
  3130. else
  3131. fc_fabric_login(lport);
  3132. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
  3133. clear_bit(QEDF_PROBING, &qedf->flags);
  3134. /* All good */
  3135. return 0;
  3136. err7:
  3137. if (qedf->ll2_recv_wq)
  3138. destroy_workqueue(qedf->ll2_recv_wq);
  3139. fc_remove_host(qedf->lport->host);
  3140. scsi_remove_host(qedf->lport->host);
  3141. #ifdef CONFIG_DEBUG_FS
  3142. qedf_dbg_host_exit(&(qedf->dbg_ctx));
  3143. #endif
  3144. err6:
  3145. qedf_cmd_mgr_free(qedf->cmd_mgr);
  3146. err5:
  3147. qed_ops->stop(qedf->cdev);
  3148. err4:
  3149. qedf_free_fcoe_pf_param(qedf);
  3150. qedf_sync_free_irqs(qedf);
  3151. err3:
  3152. qed_ops->common->slowpath_stop(qedf->cdev);
  3153. err2:
  3154. qed_ops->common->remove(qedf->cdev);
  3155. err1:
  3156. scsi_host_put(lport->host);
  3157. err0:
  3158. return rc;
  3159. }
  3160. static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  3161. {
  3162. return __qedf_probe(pdev, QEDF_MODE_NORMAL);
  3163. }
  3164. static void __qedf_remove(struct pci_dev *pdev, int mode)
  3165. {
  3166. struct qedf_ctx *qedf;
  3167. int rc;
  3168. if (!pdev) {
  3169. QEDF_ERR(NULL, "pdev is NULL.\n");
  3170. return;
  3171. }
  3172. qedf = pci_get_drvdata(pdev);
  3173. /*
  3174. * Prevent race where we're in board disable work and then try to
  3175. * rmmod the module.
  3176. */
  3177. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  3178. QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
  3179. return;
  3180. }
  3181. if (mode != QEDF_MODE_RECOVERY)
  3182. set_bit(QEDF_UNLOADING, &qedf->flags);
  3183. /* Logoff the fabric to upload all connections */
  3184. if (mode == QEDF_MODE_RECOVERY)
  3185. fcoe_ctlr_link_down(&qedf->ctlr);
  3186. else
  3187. fc_fabric_logoff(qedf->lport);
  3188. if (!qedf_wait_for_upload(qedf))
  3189. QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
  3190. #ifdef CONFIG_DEBUG_FS
  3191. qedf_dbg_host_exit(&(qedf->dbg_ctx));
  3192. #endif
  3193. /* Stop any link update handling */
  3194. cancel_delayed_work_sync(&qedf->link_update);
  3195. destroy_workqueue(qedf->link_update_wq);
  3196. qedf->link_update_wq = NULL;
  3197. if (qedf->timer_work_queue)
  3198. destroy_workqueue(qedf->timer_work_queue);
  3199. /* Stop Light L2 */
  3200. clear_bit(QEDF_LL2_STARTED, &qedf->flags);
  3201. qed_ops->ll2->stop(qedf->cdev);
  3202. if (qedf->ll2_recv_wq)
  3203. destroy_workqueue(qedf->ll2_recv_wq);
  3204. /* Stop fastpath */
  3205. qedf_sync_free_irqs(qedf);
  3206. qedf_destroy_sb(qedf);
  3207. /*
  3208. * During recovery don't destroy OS constructs that represent the
  3209. * physical port.
  3210. */
  3211. if (mode != QEDF_MODE_RECOVERY) {
  3212. qedf_free_grc_dump_buf(&qedf->grcdump);
  3213. qedf_remove_sysfs_ctx_attr(qedf);
  3214. /* Remove all SCSI/libfc/libfcoe structures */
  3215. fcoe_ctlr_destroy(&qedf->ctlr);
  3216. fc_lport_destroy(qedf->lport);
  3217. fc_remove_host(qedf->lport->host);
  3218. scsi_remove_host(qedf->lport->host);
  3219. }
  3220. qedf_cmd_mgr_free(qedf->cmd_mgr);
  3221. if (mode != QEDF_MODE_RECOVERY) {
  3222. fc_exch_mgr_free(qedf->lport);
  3223. fc_lport_free_stats(qedf->lport);
  3224. /* Wait for all vports to be reaped */
  3225. qedf_wait_for_vport_destroy(qedf);
  3226. }
  3227. /*
  3228. * Now that all connections have been uploaded we can stop the
  3229. * rest of the qed operations
  3230. */
  3231. qed_ops->stop(qedf->cdev);
  3232. if (mode != QEDF_MODE_RECOVERY) {
  3233. if (qedf->dpc_wq) {
  3234. /* Stop general DPC handling */
  3235. destroy_workqueue(qedf->dpc_wq);
  3236. qedf->dpc_wq = NULL;
  3237. }
  3238. }
  3239. /* Final shutdown for the board */
  3240. qedf_free_fcoe_pf_param(qedf);
  3241. if (mode != QEDF_MODE_RECOVERY) {
  3242. qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
  3243. pci_set_drvdata(pdev, NULL);
  3244. }
  3245. rc = qed_ops->common->update_drv_state(qedf->cdev, false);
  3246. if (rc)
  3247. QEDF_ERR(&(qedf->dbg_ctx),
  3248. "Failed to send drv state to MFW.\n");
  3249. if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
  3250. qed_ops->common->devlink_unregister(qedf->devlink);
  3251. qedf->devlink = NULL;
  3252. }
  3253. qed_ops->common->slowpath_stop(qedf->cdev);
  3254. qed_ops->common->remove(qedf->cdev);
  3255. mempool_destroy(qedf->io_mempool);
  3256. /* Only reap the Scsi_host on a real removal */
  3257. if (mode != QEDF_MODE_RECOVERY)
  3258. scsi_host_put(qedf->lport->host);
  3259. }
  3260. static void qedf_remove(struct pci_dev *pdev)
  3261. {
  3262. /* Check to make sure this function wasn't already disabled */
  3263. if (!atomic_read(&pdev->enable_cnt))
  3264. return;
  3265. __qedf_remove(pdev, QEDF_MODE_NORMAL);
  3266. }
  3267. void qedf_wq_grcdump(struct work_struct *work)
  3268. {
  3269. struct qedf_ctx *qedf =
  3270. container_of(work, struct qedf_ctx, grcdump_work.work);
  3271. QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
  3272. qedf_capture_grc_dump(qedf);
  3273. }
  3274. void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
  3275. {
  3276. struct qedf_ctx *qedf = dev;
  3277. QEDF_ERR(&(qedf->dbg_ctx),
  3278. "Hardware error handler scheduled, event=%d.\n",
  3279. err_type);
  3280. if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
  3281. QEDF_ERR(&(qedf->dbg_ctx),
  3282. "Already in recovery, not scheduling board disable work.\n");
  3283. return;
  3284. }
  3285. switch (err_type) {
  3286. case QED_HW_ERR_FAN_FAIL:
  3287. schedule_delayed_work(&qedf->board_disable_work, 0);
  3288. break;
  3289. case QED_HW_ERR_MFW_RESP_FAIL:
  3290. case QED_HW_ERR_HW_ATTN:
  3291. case QED_HW_ERR_DMAE_FAIL:
  3292. case QED_HW_ERR_FW_ASSERT:
  3293. /* Prevent HW attentions from being reasserted */
  3294. qed_ops->common->attn_clr_enable(qedf->cdev, true);
  3295. break;
  3296. case QED_HW_ERR_RAMROD_FAIL:
  3297. /* Prevent HW attentions from being reasserted */
  3298. qed_ops->common->attn_clr_enable(qedf->cdev, true);
  3299. if (qedf_enable_recovery && qedf->devlink)
  3300. qed_ops->common->report_fatal_error(qedf->devlink,
  3301. err_type);
  3302. break;
  3303. default:
  3304. break;
  3305. }
  3306. }
  3307. /*
  3308. * Protocol TLV handler
  3309. */
  3310. void qedf_get_protocol_tlv_data(void *dev, void *data)
  3311. {
  3312. struct qedf_ctx *qedf = dev;
  3313. struct qed_mfw_tlv_fcoe *fcoe = data;
  3314. struct fc_lport *lport;
  3315. struct Scsi_Host *host;
  3316. struct fc_host_attrs *fc_host;
  3317. struct fc_host_statistics *hst;
  3318. if (!qedf) {
  3319. QEDF_ERR(NULL, "qedf is null.\n");
  3320. return;
  3321. }
  3322. if (test_bit(QEDF_PROBING, &qedf->flags)) {
  3323. QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
  3324. return;
  3325. }
  3326. lport = qedf->lport;
  3327. host = lport->host;
  3328. fc_host = shost_to_fc_host(host);
  3329. /* Force a refresh of the fc_host stats including offload stats */
  3330. hst = qedf_fc_get_host_stats(host);
  3331. fcoe->qos_pri_set = true;
  3332. fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
  3333. fcoe->ra_tov_set = true;
  3334. fcoe->ra_tov = lport->r_a_tov;
  3335. fcoe->ed_tov_set = true;
  3336. fcoe->ed_tov = lport->e_d_tov;
  3337. fcoe->npiv_state_set = true;
  3338. fcoe->npiv_state = 1; /* NPIV always enabled */
  3339. fcoe->num_npiv_ids_set = true;
  3340. fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
  3341. /* Certain attributes we only want to set if we've selected an FCF */
  3342. if (qedf->ctlr.sel_fcf) {
  3343. fcoe->switch_name_set = true;
  3344. u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
  3345. }
  3346. fcoe->port_state_set = true;
  3347. /* For qedf we're either link down or fabric attach */
  3348. if (lport->link_up)
  3349. fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
  3350. else
  3351. fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
  3352. fcoe->link_failures_set = true;
  3353. fcoe->link_failures = (u16)hst->link_failure_count;
  3354. fcoe->fcoe_txq_depth_set = true;
  3355. fcoe->fcoe_rxq_depth_set = true;
  3356. fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
  3357. fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
  3358. fcoe->fcoe_rx_frames_set = true;
  3359. fcoe->fcoe_rx_frames = hst->rx_frames;
  3360. fcoe->fcoe_tx_frames_set = true;
  3361. fcoe->fcoe_tx_frames = hst->tx_frames;
  3362. fcoe->fcoe_rx_bytes_set = true;
  3363. fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
  3364. fcoe->fcoe_tx_bytes_set = true;
  3365. fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
  3366. fcoe->crc_count_set = true;
  3367. fcoe->crc_count = hst->invalid_crc_count;
  3368. fcoe->tx_abts_set = true;
  3369. fcoe->tx_abts = hst->fcp_packet_aborts;
  3370. fcoe->tx_lun_rst_set = true;
  3371. fcoe->tx_lun_rst = qedf->lun_resets;
  3372. fcoe->abort_task_sets_set = true;
  3373. fcoe->abort_task_sets = qedf->packet_aborts;
  3374. fcoe->scsi_busy_set = true;
  3375. fcoe->scsi_busy = qedf->busy;
  3376. fcoe->scsi_tsk_full_set = true;
  3377. fcoe->scsi_tsk_full = qedf->task_set_fulls;
  3378. }
  3379. /* Deferred work function to perform soft context reset on STAG change */
  3380. void qedf_stag_change_work(struct work_struct *work)
  3381. {
  3382. struct qedf_ctx *qedf =
  3383. container_of(work, struct qedf_ctx, stag_work.work);
  3384. printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
  3385. dev_name(&qedf->pdev->dev), __func__, __LINE__,
  3386. qedf->dbg_ctx.host_no);
  3387. qedf_ctx_soft_reset(qedf->lport);
  3388. }
  3389. static void qedf_shutdown(struct pci_dev *pdev)
  3390. {
  3391. __qedf_remove(pdev, QEDF_MODE_NORMAL);
  3392. }
  3393. static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
  3394. {
  3395. struct qedf_ctx *qedf;
  3396. if (!pdev) {
  3397. QEDF_ERR(NULL, "pdev is NULL.\n");
  3398. return -ENODEV;
  3399. }
  3400. qedf = pci_get_drvdata(pdev);
  3401. QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
  3402. return -EPERM;
  3403. }
  3404. /*
  3405. * Recovery handler code
  3406. */
  3407. static void qedf_schedule_recovery_handler(void *dev)
  3408. {
  3409. struct qedf_ctx *qedf = dev;
  3410. QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
  3411. schedule_delayed_work(&qedf->recovery_work, 0);
  3412. }
  3413. static void qedf_recovery_handler(struct work_struct *work)
  3414. {
  3415. struct qedf_ctx *qedf =
  3416. container_of(work, struct qedf_ctx, recovery_work.work);
  3417. if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
  3418. return;
  3419. /*
  3420. * Call common_ops->recovery_prolog to allow the MFW to quiesce
  3421. * any PCI transactions.
  3422. */
  3423. qed_ops->common->recovery_prolog(qedf->cdev);
  3424. QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
  3425. __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
  3426. /*
  3427. * Reset link and dcbx to down state since we will not get a link down
  3428. * event from the MFW but calling __qedf_remove will essentially be a
  3429. * link down event.
  3430. */
  3431. atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
  3432. atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
  3433. __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
  3434. clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
  3435. QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
  3436. }
  3437. /* Generic TLV data callback */
  3438. void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
  3439. {
  3440. struct qedf_ctx *qedf;
  3441. if (!dev) {
  3442. QEDF_INFO(NULL, QEDF_LOG_EVT,
  3443. "dev is NULL so ignoring get_generic_tlv_data request.\n");
  3444. return;
  3445. }
  3446. qedf = (struct qedf_ctx *)dev;
  3447. memset(data, 0, sizeof(struct qed_generic_tlvs));
  3448. ether_addr_copy(data->mac[0], qedf->mac);
  3449. }
  3450. /*
  3451. * Module Init/Remove
  3452. */
  3453. static int __init qedf_init(void)
  3454. {
  3455. int ret;
  3456. /* If debug=1 passed, set the default log mask */
  3457. if (qedf_debug == QEDF_LOG_DEFAULT)
  3458. qedf_debug = QEDF_DEFAULT_LOG_MASK;
  3459. /*
  3460. * Check that default prio for FIP/FCoE traffic is between 0..7 if a
  3461. * value has been set
  3462. */
  3463. if (qedf_default_prio > -1)
  3464. if (qedf_default_prio > 7) {
  3465. qedf_default_prio = QEDF_DEFAULT_PRIO;
  3466. QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
  3467. QEDF_DEFAULT_PRIO);
  3468. }
  3469. /* Print driver banner */
  3470. QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
  3471. QEDF_VERSION);
  3472. /* Create kmem_cache for qedf_io_work structs */
  3473. qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
  3474. sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
  3475. if (qedf_io_work_cache == NULL) {
  3476. QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
  3477. goto err1;
  3478. }
  3479. QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
  3480. qedf_io_work_cache);
  3481. qed_ops = qed_get_fcoe_ops();
  3482. if (!qed_ops) {
  3483. QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
  3484. goto err1;
  3485. }
  3486. #ifdef CONFIG_DEBUG_FS
  3487. qedf_dbg_init("qedf");
  3488. #endif
  3489. qedf_fc_transport_template =
  3490. fc_attach_transport(&qedf_fc_transport_fn);
  3491. if (!qedf_fc_transport_template) {
  3492. QEDF_ERR(NULL, "Could not register with FC transport\n");
  3493. goto err2;
  3494. }
  3495. qedf_fc_vport_transport_template =
  3496. fc_attach_transport(&qedf_fc_vport_transport_fn);
  3497. if (!qedf_fc_vport_transport_template) {
  3498. QEDF_ERR(NULL, "Could not register vport template with FC "
  3499. "transport\n");
  3500. goto err3;
  3501. }
  3502. qedf_io_wq = create_workqueue("qedf_io_wq");
  3503. if (!qedf_io_wq) {
  3504. QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
  3505. goto err4;
  3506. }
  3507. qedf_cb_ops.get_login_failures = qedf_get_login_failures;
  3508. ret = pci_register_driver(&qedf_pci_driver);
  3509. if (ret) {
  3510. QEDF_ERR(NULL, "Failed to register driver\n");
  3511. goto err5;
  3512. }
  3513. return 0;
  3514. err5:
  3515. destroy_workqueue(qedf_io_wq);
  3516. err4:
  3517. fc_release_transport(qedf_fc_vport_transport_template);
  3518. err3:
  3519. fc_release_transport(qedf_fc_transport_template);
  3520. err2:
  3521. #ifdef CONFIG_DEBUG_FS
  3522. qedf_dbg_exit();
  3523. #endif
  3524. qed_put_fcoe_ops();
  3525. err1:
  3526. return -EINVAL;
  3527. }
  3528. static void __exit qedf_cleanup(void)
  3529. {
  3530. pci_unregister_driver(&qedf_pci_driver);
  3531. destroy_workqueue(qedf_io_wq);
  3532. fc_release_transport(qedf_fc_vport_transport_template);
  3533. fc_release_transport(qedf_fc_transport_template);
  3534. #ifdef CONFIG_DEBUG_FS
  3535. qedf_dbg_exit();
  3536. #endif
  3537. qed_put_fcoe_ops();
  3538. kmem_cache_destroy(qedf_io_work_cache);
  3539. }
  3540. MODULE_LICENSE("GPL");
  3541. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
  3542. MODULE_AUTHOR("QLogic Corporation");
  3543. MODULE_VERSION(QEDF_VERSION);
  3544. module_init(qedf_init);
  3545. module_exit(qedf_cleanup);